code stringlengths 118 171k | apis list | extract_api stringlengths 145 164k |
|---|---|---|
import os
import numpy as np
import time
import argparse
import oneflow as flow
from models.networks import Generator, Discriminator
from utils.data_utils import load_facades
from utils.utils import init_logger, to_tensor, to_numpy, save_images, mkdirs
class Pix2Pix:
def __init__(self, args) -> None:
self.lr = args.learning_rate
self.LAMBDA = args.LAMBDA
self.save = args.save
self.batch_size = args.batch_size
self.path = args.path
self.n_epochs = args.epoch_num
self.eval_interval = 10
self.G_image_loss = []
self.G_GAN_loss = []
self.G_total_loss = []
self.D_loss = []
self.netG = Generator().to("cuda")
self.netD = Discriminator().to("cuda")
self.optimizerG = flow.optim.Adam(
self.netG.parameters(), lr=self.lr, betas=(0.5, 0.999)
)
self.optimizerD = flow.optim.Adam(
self.netD.parameters(), lr=self.lr, betas=(0.5, 0.999)
)
self.criterionGAN = flow.nn.BCEWithLogitsLoss()
self.criterionL1 = flow.nn.L1Loss()
self.checkpoint_path = os.path.join(self.path, "checkpoint")
self.test_images_path = os.path.join(self.path, "test_images")
mkdirs(self.checkpoint_path, self.test_images_path)
self.logger = init_logger(os.path.join(self.path, "log.txt"))
def train(self):
# init dataset
x, y = load_facades()
# flow.Tensor() bug in here
x, y = np.ascontiguousarray(x), np.ascontiguousarray(y)
self.fixed_inp = to_tensor(x[: self.batch_size].astype(np.float32))
self.fixed_target = to_tensor(y[: self.batch_size].astype(np.float32))
batch_num = len(x) // self.batch_size
label1 = to_tensor(np.ones((self.batch_size, 1, 30, 30)), dtype=flow.float32)
label0 = to_tensor(np.zeros((self.batch_size, 1, 30, 30)), dtype=flow.float32)
for epoch_idx in range(self.n_epochs):
self.netG.train()
self.netD.train()
start = time.time()
# run every epoch to shuffle
for batch_idx in range(batch_num):
inp = to_tensor(
x[
batch_idx * self.batch_size : (batch_idx + 1) * self.batch_size
].astype(np.float32)
)
target = to_tensor(
y[
batch_idx * self.batch_size : (batch_idx + 1) * self.batch_size
].astype(np.float32)
)
# update D
d_fake_loss, d_real_loss, d_loss = self.train_discriminator(
inp, target, label0, label1
)
# update G
g_gan_loss, g_image_loss, g_total_loss, g_out = self.train_generator(
inp, target, label1
)
self.G_GAN_loss.append(g_gan_loss)
self.G_image_loss.append(g_image_loss)
self.G_total_loss.append(g_total_loss)
self.D_loss.append(d_loss)
if (batch_idx + 1) % self.eval_interval == 0:
self.logger.info(
"{}th epoch, {}th batch, d_fakeloss:{:>8.4f}, d_realloss:{:>8.4f}, ggan_loss:{:>8.4f}, gl1_loss:{:>8.4f}".format(
epoch_idx + 1,
batch_idx + 1,
d_fake_loss,
d_real_loss,
g_gan_loss,
g_image_loss,
)
)
self.logger.info(
"Time for epoch {} is {} sec.".format(
epoch_idx + 1, time.time() - start
)
)
if (epoch_idx + 1) % 2 * self.eval_interval == 0:
# save .train() images
# save .eval() images
self._eval_generator_and_save_images(epoch_idx)
if self.save:
flow.save(
self.netG.state_dict(),
os.path.join(
self.checkpoint_path, "pix2pix_g_{}".format(epoch_idx + 1)
),
)
flow.save(
self.netD.state_dict(),
os.path.join(
self.checkpoint_path, "pix2pix_d_{}".format(epoch_idx + 1)
),
)
# save train loss and val error to plot
np.save(
os.path.join(self.path, "G_image_loss_{}.npy".format(self.n_epochs)),
self.G_image_loss,
)
np.save(
os.path.join(self.path, "G_GAN_loss_{}.npy".format(self.n_epochs)),
self.G_GAN_loss,
)
np.save(
os.path.join(self.path, "G_total_loss_{}.npy".format(self.n_epochs)),
self.G_total_loss,
)
np.save(
os.path.join(self.path, "D_loss_{}.npy".format(self.n_epochs)),
self.D_loss,
)
self.logger.info("*************** Train done ***************** ")
def train_generator(self, input, target, label1):
g_out = self.netG(input)
# First, G(A) should fake the discriminator
fake_AB = flow.cat([input, g_out], 1)
pred_fake = self.netD(fake_AB)
gan_loss = self.criterionGAN(pred_fake, label1)
# Second, G(A) = B
l1_loss = self.criterionL1(g_out, target)
# combine loss and calculate gradients
g_loss = gan_loss + self.LAMBDA * l1_loss
g_loss.backward()
self.optimizerG.step()
self.optimizerG.zero_grad()
return (
to_numpy(gan_loss),
to_numpy(self.LAMBDA * l1_loss),
to_numpy(g_loss),
to_numpy(g_out, False),
)
def train_discriminator(self, input, target, label0, label1):
g_out = self.netG(input)
# Fake; stop backprop to the generator by detaching fake_B
fake_AB = flow.cat([input, g_out.detach()], 1)
pred_fake = self.netD(fake_AB)
d_fake_loss = self.criterionGAN(pred_fake, label0)
# Real
real_AB = flow.cat([input, target], 1)
pred_real = self.netD(real_AB)
d_real_loss = self.criterionGAN(pred_real, label1)
# combine loss and calculate gradients
d_loss = (d_fake_loss + d_real_loss) * 0.5
d_loss.backward()
self.optimizerD.step()
self.optimizerD.zero_grad()
return to_numpy(d_fake_loss), to_numpy(d_real_loss), to_numpy(d_loss)
def _eval_generator_and_save_images(self, epoch_idx):
results = self._eval_generator()
save_images(
results,
to_numpy(self.fixed_inp, False),
to_numpy(self.fixed_target, False),
path=os.path.join(
self.test_images_path, "testimage_{:02d}.png".format(epoch_idx + 1)
),
)
def _eval_generator(self):
self.netG.eval()
with flow.no_grad():
g_out = self.netG(self.fixed_inp)
return to_numpy(g_out, False)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="oneflow PIX2PIX")
parser.add_argument("--path", type=str, default="./of_pix2pix", required=False)
parser.add_argument("-e", "--epoch_num", type=int, default=200, required=False)
parser.add_argument(
"-lr", "--learning_rate", type=float, default=2e-4, required=False
)
parser.add_argument("--LAMBDA", type=float, default=200, required=False)
parser.add_argument("--batch_size", type=int, default=32, required=False)
parser.add_argument(
"--save",
type=bool,
default=True,
required=False,
help="whether to save train_images, train_checkpoint and train_loss",
)
args = parser.parse_args()
pix2pix = Pix2Pix(args)
pix2pix.train()
| [
"oneflow.no_grad",
"oneflow.cat",
"oneflow.nn.BCEWithLogitsLoss",
"oneflow.nn.L1Loss"
] | [((7251, 7305), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""oneflow PIX2PIX"""'}), "(description='oneflow PIX2PIX')\n", (7274, 7305), False, 'import argparse\n'), ((1026, 1053), 'oneflow.nn.BCEWithLogitsLoss', 'flow.nn.BCEWithLogitsLoss', ([], {}), '()\n', (1051, 1053), True, 'import oneflow as flow\n'), ((1081, 1097), 'oneflow.nn.L1Loss', 'flow.nn.L1Loss', ([], {}), '()\n', (1095, 1097), True, 'import oneflow as flow\n'), ((1130, 1167), 'os.path.join', 'os.path.join', (['self.path', '"""checkpoint"""'], {}), "(self.path, 'checkpoint')\n", (1142, 1167), False, 'import os\n'), ((1200, 1238), 'os.path.join', 'os.path.join', (['self.path', '"""test_images"""'], {}), "(self.path, 'test_images')\n", (1212, 1238), False, 'import os\n'), ((1248, 1299), 'utils.utils.mkdirs', 'mkdirs', (['self.checkpoint_path', 'self.test_images_path'], {}), '(self.checkpoint_path, self.test_images_path)\n', (1254, 1299), False, 'from utils.utils import init_logger, to_tensor, to_numpy, save_images, mkdirs\n'), ((1430, 1444), 'utils.data_utils.load_facades', 'load_facades', ([], {}), '()\n', (1442, 1444), False, 'from utils.data_utils import load_facades\n'), ((5351, 5378), 'oneflow.cat', 'flow.cat', (['[input, g_out]', '(1)'], {}), '([input, g_out], 1)\n', (5359, 5378), True, 'import oneflow as flow\n'), ((6266, 6294), 'oneflow.cat', 'flow.cat', (['[input, target]', '(1)'], {}), '([input, target], 1)\n', (6274, 6294), True, 'import oneflow as flow\n'), ((7185, 7207), 'utils.utils.to_numpy', 'to_numpy', (['g_out', '(False)'], {}), '(g_out, False)\n', (7193, 7207), False, 'from utils.utils import init_logger, to_tensor, to_numpy, save_images, mkdirs\n'), ((1334, 1368), 'os.path.join', 'os.path.join', (['self.path', '"""log.txt"""'], {}), "(self.path, 'log.txt')\n", (1346, 1368), False, 'import os\n'), ((1496, 1519), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['x'], {}), '(x)\n', (1516, 1519), True, 'import numpy as np\n'), ((1521, 1544), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['y'], {}), '(y)\n', (1541, 1544), True, 'import numpy as np\n'), ((1774, 1811), 'numpy.ones', 'np.ones', (['(self.batch_size, 1, 30, 30)'], {}), '((self.batch_size, 1, 30, 30))\n', (1781, 1811), True, 'import numpy as np\n'), ((1860, 1898), 'numpy.zeros', 'np.zeros', (['(self.batch_size, 1, 30, 30)'], {}), '((self.batch_size, 1, 30, 30))\n', (1868, 1898), True, 'import numpy as np\n'), ((2048, 2059), 'time.time', 'time.time', ([], {}), '()\n', (2057, 2059), False, 'import time\n'), ((5771, 5789), 'utils.utils.to_numpy', 'to_numpy', (['gan_loss'], {}), '(gan_loss)\n', (5779, 5789), False, 'from utils.utils import init_logger, to_tensor, to_numpy, save_images, mkdirs\n'), ((5803, 5834), 'utils.utils.to_numpy', 'to_numpy', (['(self.LAMBDA * l1_loss)'], {}), '(self.LAMBDA * l1_loss)\n', (5811, 5834), False, 'from utils.utils import init_logger, to_tensor, to_numpy, save_images, mkdirs\n'), ((5848, 5864), 'utils.utils.to_numpy', 'to_numpy', (['g_loss'], {}), '(g_loss)\n', (5856, 5864), False, 'from utils.utils import init_logger, to_tensor, to_numpy, save_images, mkdirs\n'), ((5878, 5900), 'utils.utils.to_numpy', 'to_numpy', (['g_out', '(False)'], {}), '(g_out, False)\n', (5886, 5900), False, 'from utils.utils import init_logger, to_tensor, to_numpy, save_images, mkdirs\n'), ((6600, 6621), 'utils.utils.to_numpy', 'to_numpy', (['d_fake_loss'], {}), '(d_fake_loss)\n', (6608, 6621), False, 'from utils.utils import init_logger, to_tensor, to_numpy, save_images, mkdirs\n'), ((6623, 6644), 'utils.utils.to_numpy', 'to_numpy', (['d_real_loss'], {}), '(d_real_loss)\n', (6631, 6644), False, 'from utils.utils import init_logger, to_tensor, to_numpy, save_images, mkdirs\n'), ((6646, 6662), 'utils.utils.to_numpy', 'to_numpy', (['d_loss'], {}), '(d_loss)\n', (6654, 6662), False, 'from utils.utils import init_logger, to_tensor, to_numpy, save_images, mkdirs\n'), ((6817, 6848), 'utils.utils.to_numpy', 'to_numpy', (['self.fixed_inp', '(False)'], {}), '(self.fixed_inp, False)\n', (6825, 6848), False, 'from utils.utils import init_logger, to_tensor, to_numpy, save_images, mkdirs\n'), ((6862, 6896), 'utils.utils.to_numpy', 'to_numpy', (['self.fixed_target', '(False)'], {}), '(self.fixed_target, False)\n', (6870, 6896), False, 'from utils.utils import init_logger, to_tensor, to_numpy, save_images, mkdirs\n'), ((7108, 7122), 'oneflow.no_grad', 'flow.no_grad', ([], {}), '()\n', (7120, 7122), True, 'import oneflow as flow\n'), ((688, 699), 'models.networks.Generator', 'Generator', ([], {}), '()\n', (697, 699), False, 'from models.networks import Generator, Discriminator\n'), ((731, 746), 'models.networks.Discriminator', 'Discriminator', ([], {}), '()\n', (744, 746), False, 'from models.networks import Generator, Discriminator\n'), ((3764, 3775), 'time.time', 'time.time', ([], {}), '()\n', (3773, 3775), False, 'import time\n')] |
"""
Copyright 2020 Tianshu AI Platform. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import math
import oneflow as flow
import util.config as configs
from util.util import Snapshot, Summary, InitNodes, Metric, LoadCfg, LoadData
from util.job_function_util import get_train_config, get_val_config
import model.cnn.resnet_model as resnet_model
import model.cnn.vgg_model as vgg_model
import model.cnn.alexnet_model as alexnet_model
import model.cnn.lenet_model as lenet_model
import model.dnn.dnn_model as dnn_model
from util.model_weights import modelWeight
parser = configs.get_parser()
args = parser.parse_args()
configs.print_args(args)
total_device_num = args.num_nodes * args.gpu_num_per_node
train_batch_size = total_device_num * args.batch_size_per_device
val_batch_size = total_device_num * args.val_batch_size_per_device
(C, H, W) = args.image_shape
epoch_size = math.ceil(args.num_examples / train_batch_size)
num_val_steps = int(args.num_val_examples / val_batch_size)
model_dict = {"resnet": resnet_model.resnet50,
"vgg": vgg_model.vgg,
"alexnet": alexnet_model.alexnet,
"alexnet_simple": alexnet_model.alexnet_simple,
"lenet": lenet_model.lenet,
"dnn_2": dnn_model.dnn_2,
"dnn_4": dnn_model.dnn_4,}
flow.config.gpu_device_num(args.gpu_num_per_node)
flow.config.enable_debug_mode(True)
if args.use_boxing_v2:
flow.config.collective_boxing.nccl_fusion_threshold_mb(8)
flow.config.collective_boxing.nccl_fusion_all_reduce_use_buffer(False)
def label_smoothing(labels, classes, eta, dtype):
assert classes > 0
assert eta >= 0.0 and eta < 1.0
return flow.one_hot(labels, depth=classes, dtype=dtype,
on_value=1 - eta + eta / classes, off_value=eta/classes)
@flow.global_function("train", get_train_config(args))
def TrainNet():
cfg = LoadCfg(args=args, model_load_dir=args.model_load_dir, load_type='train')
labels, images = LoadData(args, 'train')
if args.model in ("resnet", "vgg", "alexnet", "alexnet_simple", "lenet"):
logits = model_dict[args.model](images, cfg, optimizer=args.model_update,
need_transpose=False if args.train_data_dir else True,
bn=args.bn)
else:
logits = model_dict[args.model](images, cfg, optimizer=args.model_update)
if args.label_smoothing > 0:
one_hot_labels = label_smoothing(labels, args.num_classes, args.label_smoothing, logits.dtype)
loss = flow.nn.softmax_cross_entropy_with_logits(one_hot_labels, logits, name="softmax_loss")
else:
loss = flow.nn.sparse_softmax_cross_entropy_with_logits(labels, logits, name="softmax_loss")
# lr_scheduler = flow.optimizer.PiecewiseConstantScheduler([], [args.learning_rate])
# flow.optimizer.SGD(lr_scheduler, momentum=args.mom).minimize(loss)
flow.losses.add_loss(loss)
predictions = flow.nn.softmax(logits)
outputs = {"loss": loss, "predictions": predictions, "labels": labels}
# outputs = {"loss": loss, "predictions": predictions, "labels": labels, 'logits':logits}
return outputs
@flow.global_function("predict", get_val_config(args))
def InferenceNet():
cfg = LoadCfg(args=args, model_load_dir=args.model_load_dir, load_type='test')
labels, images = LoadData(args, 'test')
if args.model in ("resnet", "vgg", "alexnet", "alexnet_simple", "lenet"):
logits = model_dict[args.model](images, cfg, optimizer=args.model_update,
need_transpose=False if args.train_data_dir else True,
model_weight=False, bn=args.bn)
else:
logits = model_dict[args.model](images, cfg, optimizer=args.model_update, model_weight=False)
predictions = flow.nn.softmax(logits)
outputs = {"predictions": predictions, "labels": labels}
return outputs
def main():
InitNodes(args)
flow.env.grpc_use_no_signal()
flow.env.log_dir(args.log_dir)
summary = Summary(args.log_dir, args)
snapshot = Snapshot(args.model_save_dir, args.model_load_dir)
#open log file
log_file = open("./log/log_"+args.model+"_"+args.data_type+"_"+args.log_type+".txt", "w")
if not args.before_result_dir:
args.before_result_dir = "./log/before"
if not args.after_result_dir:
args.after_result_dir = "./log/after"
for epoch in range(args.num_epochs):
#config callback func during training
metric = Metric(desc='train', calculate_batches=args.loss_print_every_n_iter,
summary=summary, save_summary_steps=epoch_size,
batch_size=train_batch_size, loss_key='loss')
#training...(epoch times = epoch_size)
for i in range(epoch_size):
TrainNet().async_get(metric.metric_cb(epoch, i))
if args.val_data_dir:
#config callback func during testing
metric = Metric(desc='validation', calculate_batches=num_val_steps, summary=summary,
save_summary_steps=num_val_steps, batch_size=val_batch_size)
#tesing
for i in range(num_val_steps):
InferenceNet().async_get(metric.metric_cb(epoch, i, args=args, log_file=log_file))
if epoch % args.model_save_every_n_epoch == 0:
snapshot.save('epoch_{}'.format(epoch))
flow.sync_default_session()
#save last_snapeshot and model weight
snapshot.save('last')
flow.sync_default_session()
weights_profile_path = os.path.join(args.model_save_dir, "weights_profile_path")
modelWeight.save(weights_profile_path)
if __name__ == "__main__":
os.system("rm -rf {0}".format(args.model_save_dir))
main()
| [
"oneflow.config.collective_boxing.nccl_fusion_threshold_mb",
"oneflow.sync_default_session",
"oneflow.config.collective_boxing.nccl_fusion_all_reduce_use_buffer",
"oneflow.one_hot",
"oneflow.env.log_dir",
"oneflow.config.enable_debug_mode",
"oneflow.nn.softmax_cross_entropy_with_logits",
"oneflow.nn.s... | [((1192, 1212), 'util.config.get_parser', 'configs.get_parser', ([], {}), '()\n', (1210, 1212), True, 'import util.config as configs\n'), ((1240, 1264), 'util.config.print_args', 'configs.print_args', (['args'], {}), '(args)\n', (1258, 1264), True, 'import util.config as configs\n'), ((1499, 1546), 'math.ceil', 'math.ceil', (['(args.num_examples / train_batch_size)'], {}), '(args.num_examples / train_batch_size)\n', (1508, 1546), False, 'import math\n'), ((1926, 1975), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['args.gpu_num_per_node'], {}), '(args.gpu_num_per_node)\n', (1952, 1975), True, 'import oneflow as flow\n'), ((1976, 2011), 'oneflow.config.enable_debug_mode', 'flow.config.enable_debug_mode', (['(True)'], {}), '(True)\n', (2005, 2011), True, 'import oneflow as flow\n'), ((2040, 2097), 'oneflow.config.collective_boxing.nccl_fusion_threshold_mb', 'flow.config.collective_boxing.nccl_fusion_threshold_mb', (['(8)'], {}), '(8)\n', (2094, 2097), True, 'import oneflow as flow\n'), ((2102, 2172), 'oneflow.config.collective_boxing.nccl_fusion_all_reduce_use_buffer', 'flow.config.collective_boxing.nccl_fusion_all_reduce_use_buffer', (['(False)'], {}), '(False)\n', (2165, 2172), True, 'import oneflow as flow\n'), ((2296, 2407), 'oneflow.one_hot', 'flow.one_hot', (['labels'], {'depth': 'classes', 'dtype': 'dtype', 'on_value': '(1 - eta + eta / classes)', 'off_value': '(eta / classes)'}), '(labels, depth=classes, dtype=dtype, on_value=1 - eta + eta /\n classes, off_value=eta / classes)\n', (2308, 2407), True, 'import oneflow as flow\n'), ((2508, 2581), 'util.util.LoadCfg', 'LoadCfg', ([], {'args': 'args', 'model_load_dir': 'args.model_load_dir', 'load_type': '"""train"""'}), "(args=args, model_load_dir=args.model_load_dir, load_type='train')\n", (2515, 2581), False, 'from util.util import Snapshot, Summary, InitNodes, Metric, LoadCfg, LoadData\n'), ((2603, 2626), 'util.util.LoadData', 'LoadData', (['args', '"""train"""'], {}), "(args, 'train')\n", (2611, 2626), False, 'from util.util import Snapshot, Summary, InitNodes, Metric, LoadCfg, LoadData\n'), ((3544, 3570), 'oneflow.losses.add_loss', 'flow.losses.add_loss', (['loss'], {}), '(loss)\n', (3564, 3570), True, 'import oneflow as flow\n'), ((3589, 3612), 'oneflow.nn.softmax', 'flow.nn.softmax', (['logits'], {}), '(logits)\n', (3604, 3612), True, 'import oneflow as flow\n'), ((2458, 2480), 'util.job_function_util.get_train_config', 'get_train_config', (['args'], {}), '(args)\n', (2474, 2480), False, 'from util.job_function_util import get_train_config, get_val_config\n'), ((3887, 3959), 'util.util.LoadCfg', 'LoadCfg', ([], {'args': 'args', 'model_load_dir': 'args.model_load_dir', 'load_type': '"""test"""'}), "(args=args, model_load_dir=args.model_load_dir, load_type='test')\n", (3894, 3959), False, 'from util.util import Snapshot, Summary, InitNodes, Metric, LoadCfg, LoadData\n'), ((3981, 4003), 'util.util.LoadData', 'LoadData', (['args', '"""test"""'], {}), "(args, 'test')\n", (3989, 4003), False, 'from util.util import Snapshot, Summary, InitNodes, Metric, LoadCfg, LoadData\n'), ((4466, 4489), 'oneflow.nn.softmax', 'flow.nn.softmax', (['logits'], {}), '(logits)\n', (4481, 4489), True, 'import oneflow as flow\n'), ((3835, 3855), 'util.job_function_util.get_val_config', 'get_val_config', (['args'], {}), '(args)\n', (3849, 3855), False, 'from util.job_function_util import get_train_config, get_val_config\n'), ((4588, 4603), 'util.util.InitNodes', 'InitNodes', (['args'], {}), '(args)\n', (4597, 4603), False, 'from util.util import Snapshot, Summary, InitNodes, Metric, LoadCfg, LoadData\n'), ((4609, 4638), 'oneflow.env.grpc_use_no_signal', 'flow.env.grpc_use_no_signal', ([], {}), '()\n', (4636, 4638), True, 'import oneflow as flow\n'), ((4643, 4673), 'oneflow.env.log_dir', 'flow.env.log_dir', (['args.log_dir'], {}), '(args.log_dir)\n', (4659, 4673), True, 'import oneflow as flow\n'), ((4689, 4716), 'util.util.Summary', 'Summary', (['args.log_dir', 'args'], {}), '(args.log_dir, args)\n', (4696, 4716), False, 'from util.util import Snapshot, Summary, InitNodes, Metric, LoadCfg, LoadData\n'), ((4734, 4784), 'util.util.Snapshot', 'Snapshot', (['args.model_save_dir', 'args.model_load_dir'], {}), '(args.model_save_dir, args.model_load_dir)\n', (4742, 4784), False, 'from util.util import Snapshot, Summary, InitNodes, Metric, LoadCfg, LoadData\n'), ((6168, 6195), 'oneflow.sync_default_session', 'flow.sync_default_session', ([], {}), '()\n', (6193, 6195), True, 'import oneflow as flow\n'), ((6223, 6280), 'os.path.join', 'os.path.join', (['args.model_save_dir', '"""weights_profile_path"""'], {}), "(args.model_save_dir, 'weights_profile_path')\n", (6235, 6280), False, 'import os\n'), ((6285, 6323), 'util.model_weights.modelWeight.save', 'modelWeight.save', (['weights_profile_path'], {}), '(weights_profile_path)\n', (6301, 6323), False, 'from util.model_weights import modelWeight\n'), ((3177, 3268), 'oneflow.nn.softmax_cross_entropy_with_logits', 'flow.nn.softmax_cross_entropy_with_logits', (['one_hot_labels', 'logits'], {'name': '"""softmax_loss"""'}), "(one_hot_labels, logits, name=\n 'softmax_loss')\n", (3218, 3268), True, 'import oneflow as flow\n'), ((3289, 3379), 'oneflow.nn.sparse_softmax_cross_entropy_with_logits', 'flow.nn.sparse_softmax_cross_entropy_with_logits', (['labels', 'logits'], {'name': '"""softmax_loss"""'}), "(labels, logits, name=\n 'softmax_loss')\n", (3337, 3379), True, 'import oneflow as flow\n'), ((5166, 5337), 'util.util.Metric', 'Metric', ([], {'desc': '"""train"""', 'calculate_batches': 'args.loss_print_every_n_iter', 'summary': 'summary', 'save_summary_steps': 'epoch_size', 'batch_size': 'train_batch_size', 'loss_key': '"""loss"""'}), "(desc='train', calculate_batches=args.loss_print_every_n_iter,\n summary=summary, save_summary_steps=epoch_size, batch_size=\n train_batch_size, loss_key='loss')\n", (5172, 5337), False, 'from util.util import Snapshot, Summary, InitNodes, Metric, LoadCfg, LoadData\n'), ((5622, 5762), 'util.util.Metric', 'Metric', ([], {'desc': '"""validation"""', 'calculate_batches': 'num_val_steps', 'summary': 'summary', 'save_summary_steps': 'num_val_steps', 'batch_size': 'val_batch_size'}), "(desc='validation', calculate_batches=num_val_steps, summary=summary,\n save_summary_steps=num_val_steps, batch_size=val_batch_size)\n", (5628, 5762), False, 'from util.util import Snapshot, Summary, InitNodes, Metric, LoadCfg, LoadData\n'), ((6068, 6095), 'oneflow.sync_default_session', 'flow.sync_default_session', ([], {}), '()\n', (6093, 6095), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow as flow
from oneflow.python.nn.module import Module
from oneflow.python.oneflow_export import oneflow_export, experimental_api
from oneflow.python.framework.tensor import register_tensor_op
class Norm(Module):
def __init__(self, ord=None, dim=None, keepdim=False) -> None:
super().__init__()
self.ord = ord
self.dim = dim
self.keepdim = keepdim
def _vector_norm(self, x, ord, dim):
if isinstance(ord, str) and ord in ["fro", "nuc"]:
raise ValueError("Norm order {} is not supported for vectors".format(ord))
elif isinstance(ord, float) and ord in [float("inf"), float("-inf")]:
if ord == float("inf"):
return flow.experimental.max(flow.experimental.abs(x), dim=dim)
else:
return flow.experimental.min(flow.experimental.abs(x), dim=dim)
elif isinstance(ord, int):
if ord == 0:
# TODO: fix error when input are all zero vector
return flow.tensor([flow.experimental.argwhere(x).shape[0]])
else:
return flow.experimental.pow(
flow.experimental.sum(
flow.experimental.pow(flow.experimental.abs(x), ord), dim=dim
),
1.0 / ord,
)
else:
raise ValueError("Invalid norm order: {}".format(ord))
def _matrix_norm(self, x, ord, dim):
if isinstance(ord, str) and ord in ["fro", "nuc"]:
if ord == "nuc":
raise NotImplementedError
else:
return flow.experimental.sqrt(
flow.experimental.sum(flow.experimental.square(x), dim=dim)
)
elif isinstance(ord, float) and ord in [float("inf"), float("-inf")]:
if ord == float("inf"):
return flow.experimental.max(
flow.experimental.sum(flow.experimental.abs(x), dim=1)
)
else:
return flow.experimental.min(
flow.experimental.sum(flow.experimental.abs(x), dim=1)
)
elif isinstance(ord, int):
if ord == 1:
return flow.experimental.max(
flow.experimental.sum(flow.experimental.abs(x), dim=0)
)
elif ord == -1:
return flow.experimental.min(
flow.experimental.sum(flow.experimental.abs(x), dim=0)
)
elif ord == 2:
raise NotImplementedError
elif ord == -2:
raise NotImplementedError
else:
raise ValueError(
"Norm order {} is not supported for matrices".format(ord)
)
else:
raise ValueError("Invalid norm order: {}".format(ord))
def _whether_keepdim(self, x):
if self.keepdim == True and self.dim != None:
return flow.experimental.unsqueeze(x, self.dim)
else:
return x
def forward(self, x):
num_axes = len(x.shape)
if self.dim == None and self.ord == None:
res = self._vector_norm(x.reshape((1, -1))[0], ord=2, dim=self.dim)
elif self.dim == None and self.ord != None:
assert (
num_axes <= 2
), "input must be 1-D or 2-D when dim is None and ord is not None"
res = (
self._vector_norm(x, self.ord, self.dim)
if num_axes == 1
else self._matrix_norm(x, self.ord, self.dim)
)
elif isinstance(self.dim, (int, tuple, list)):
if isinstance(self.dim, int):
self.dim = self.dim if self.dim >= 0 else self.dim + num_axes
assert 0 <= self.dim < num_axes, "dim out of range"
res = self._vector_norm(
x, ord=2 if self.ord == None else self.ord, dim=self.dim
)
else:
temp = list(self.dim) if isinstance(self.dim, tuple) else self.dim
for i in range(len(temp)):
temp[i] = temp[i] if temp[i] >= 0 else temp[i] + num_axes
assert 0 <= temp[i] < num_axes, "dim out of range"
self.dim = temp
res = self._matrix_norm(
x, ord="fro" if self.ord == None else self.ord, dim=self.dim
)
else:
raise ValueError("Invalid dimension: {}".format(self.dim))
return self._whether_keepdim(res)
@oneflow_export("linalg.norm")
@experimental_api
def norm_op(input, ord=None, dim=None, keepdim=False):
r"""linalg.norm(input, ord=None, dim=None, keepdim=False, *, out=None) -> Tensor
Returns the matrix norm or vector norm of a given tensor.
This function can calculate one of eight different types of matrix norms, or one
of an infinite number of vector norms, depending on both the number of reduction
dimensions and the value of the `ord` parameter.
Args:
input (Tensor): The input tensor. If dim is None, input must be 1-D or 2-D, unless :attr:`ord`
is None. If both :attr:`dim` and :attr:`ord` are None, the 2-norm of the input flattened to 1-D
will be returned. Its data type must be either a floating point or complex type. For complex
inputs, the norm is calculated on of the absolute values of each element. If the input is
complex and neither :attr:`dtype` nor :attr:`out` is specified, the result's data type will
be the corresponding floating point type (e.g. float if :attr:`input` is complexfloat).
ord (int, float, inf, -inf, 'fro', 'nuc', optional): The order of norm.
inf refers to :attr:`float('inf')`, numpy's :attr:`inf` object, or any equivalent object.
The following norms can be calculated:
===== ============================ ==========================
ord norm for matrices norm for vectors
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm -- not supported --
'nuc' -- not supported yet -- -- not supported --
inf max(sum(abs(x), dim=1)) max(abs(x))
-inf min(sum(abs(x), dim=1)) min(abs(x))
0 -- not supported -- sum(x != 0)
1 max(sum(abs(x), dim=0)) as below
-1 min(sum(abs(x), dim=0)) as below
2 -- not supported yet -- as below
-2 -- not supported yet -- as below
other -- not supported -- sum(abs(x)**ord)**(1./ord)
===== ============================ ==========================
Default: ``None``
dim (int, 2-tuple of ints, 2-list of ints, optional): If :attr:`dim` is an int,
vector norm will be calculated over the specified dimension. If :attr:`dim`
is a 2-tuple of ints, matrix norm will be calculated over the specified
dimensions. If :attr:`dim` is None, matrix norm will be calculated
when the input tensor has two dimensions, and vector norm will be
calculated when the input tensor has one dimension. Default: ``None``
keepdim (bool, optional): If set to True, the reduced dimensions are retained
in the result as dimensions with size one. Default: ``False``
out (Tensor, optional): The output tensor.
Examples::
>>> import oneflow.experimental as flow
>>> from oneflow.experimental import linalg as LA
>>> import numpy as np
>>> flow.enable_eager_execution()
>>> a = flow.tensor(np.arange(9, dtype=np.float32) - 4)
>>> a
tensor([-4., -3., -2., -1., 0., 1., 2., 3., 4.], dtype=oneflow.float32)
>>> b = a.reshape((3, 3))
>>> b
tensor([[-4., -3., -2.],
[-1., 0., 1.],
[ 2., 3., 4.]], dtype=oneflow.float32)
>>> LA.norm(a)
tensor([7.746], dtype=oneflow.float32)
>>> LA.norm(b)
tensor([7.746], dtype=oneflow.float32)
>>> LA.norm(b, 'fro')
tensor([7.746], dtype=oneflow.float32)
>>> LA.norm(a, float('inf'))
tensor([4.], dtype=oneflow.float32)
>>> LA.norm(b, float('inf'))
tensor([9.], dtype=oneflow.float32)
>>> LA.norm(a, -float('inf'))
tensor([0.], dtype=oneflow.float32)
>>> LA.norm(b, -float('inf'))
tensor([2.], dtype=oneflow.float32)
>>> LA.norm(a, 1)
tensor([20.], dtype=oneflow.float32)
>>> LA.norm(b, 1)
tensor([7.], dtype=oneflow.float32)
>>> LA.norm(a, -1)
tensor([0.], dtype=oneflow.float32)
>>> LA.norm(b, -1)
tensor([6.], dtype=oneflow.float32)
>>> LA.norm(a, 2)
tensor([7.746], dtype=oneflow.float32)
>>> LA.norm(a, -2)
tensor([0.], dtype=oneflow.float32)
>>> LA.norm(a, 3)
tensor([5.848], dtype=oneflow.float32)
>>> LA.norm(a, -3)
tensor([0.], dtype=oneflow.float32)
Using the :attr:`dim` argument to compute vector norms::
>>> c = flow.tensor([[1., 2., 3.],
... [-1, 1, 4]])
>>> LA.norm(c, dim=0)
tensor([1.4142, 2.2361, 5. ], dtype=oneflow.float32)
>>> LA.norm(c, dim=1, keepdim = True)
tensor([[3.7417],
[4.2426]], dtype=oneflow.float32)
>>> LA.norm(c, ord=1, dim=1)
tensor([6., 6.], dtype=oneflow.float32)
Using the :attr:`dim` argument to compute matrix norms::
>>> m = flow.tensor(np.arange(8, dtype=np.float32)).reshape((2, 2, 2))
>>> LA.norm(m, dim=(1,2))
tensor([ 3.7417, 11.225 ], dtype=oneflow.float32)
"""
return Norm(ord, dim, keepdim)(input)
@register_tensor_op("norm")
@experimental_api
def norm_tensor_op(input, ord=None, dim=None, keepdim=False):
r"""
See :func:`oneflow.experimental.linalg.norm.`
"""
return Norm(ord, dim, keepdim)(input)
if __name__ == "__main__":
import doctest
doctest.testmod(raise_on_error=True)
| [
"oneflow.experimental.unsqueeze",
"oneflow.experimental.abs",
"oneflow.python.framework.tensor.register_tensor_op",
"oneflow.experimental.argwhere",
"oneflow.python.oneflow_export.oneflow_export",
"oneflow.experimental.square"
] | [((5214, 5243), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""linalg.norm"""'], {}), "('linalg.norm')\n", (5228, 5243), False, 'from oneflow.python.oneflow_export import oneflow_export, experimental_api\n'), ((10663, 10689), 'oneflow.python.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""norm"""'], {}), "('norm')\n", (10681, 10689), False, 'from oneflow.python.framework.tensor import register_tensor_op\n'), ((10932, 10968), 'doctest.testmod', 'doctest.testmod', ([], {'raise_on_error': '(True)'}), '(raise_on_error=True)\n', (10947, 10968), False, 'import doctest\n'), ((3607, 3647), 'oneflow.experimental.unsqueeze', 'flow.experimental.unsqueeze', (['x', 'self.dim'], {}), '(x, self.dim)\n', (3634, 3647), True, 'import oneflow as flow\n'), ((1338, 1362), 'oneflow.experimental.abs', 'flow.experimental.abs', (['x'], {}), '(x)\n', (1359, 1362), True, 'import oneflow as flow\n'), ((1436, 1460), 'oneflow.experimental.abs', 'flow.experimental.abs', (['x'], {}), '(x)\n', (1457, 1460), True, 'import oneflow as flow\n'), ((2298, 2325), 'oneflow.experimental.square', 'flow.experimental.square', (['x'], {}), '(x)\n', (2322, 2325), True, 'import oneflow as flow\n'), ((2556, 2580), 'oneflow.experimental.abs', 'flow.experimental.abs', (['x'], {}), '(x)\n', (2577, 2580), True, 'import oneflow as flow\n'), ((2713, 2737), 'oneflow.experimental.abs', 'flow.experimental.abs', (['x'], {}), '(x)\n', (2734, 2737), True, 'import oneflow as flow\n'), ((2912, 2936), 'oneflow.experimental.abs', 'flow.experimental.abs', (['x'], {}), '(x)\n', (2933, 2936), True, 'import oneflow as flow\n'), ((1826, 1850), 'oneflow.experimental.abs', 'flow.experimental.abs', (['x'], {}), '(x)\n', (1847, 1850), True, 'import oneflow as flow\n'), ((3079, 3103), 'oneflow.experimental.abs', 'flow.experimental.abs', (['x'], {}), '(x)\n', (3100, 3103), True, 'import oneflow as flow\n'), ((1632, 1661), 'oneflow.experimental.argwhere', 'flow.experimental.argwhere', (['x'], {}), '(x)\n', (1658, 1661), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import oneflow as flow
import oneflow.typing as oft
def test_non_distribute_optimizer(test_case):
flow.config.gpu_device_num(2)
flow.config.enable_debug_mode(True)
func_config = flow.FunctionConfig()
func_config.default_logical_view(flow.scope.consistent_view())
func_config.train.primary_lr(5)
func_config.train.model_update_conf(dict(naive_conf={}))
func_config.enable_non_distributed_optimizer(True)
@flow.global_function(func_config)
def Foo(x: oft.Numpy.Placeholder((2, 10))):
w = flow.get_variable("w", (10,), initializer=flow.constant_initializer(100))
flow.losses.add_loss(x + w)
Foo(np.ones((2, 10), dtype=np.float32))
def _test_two_job_non_distribute_optimizer(test_case):
flow.config.gpu_device_num(2)
flow.config.enable_debug_mode(True)
eval_config = flow.FunctionConfig()
eval_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(eval_config)
def Bar():
w = flow.get_variable("w", (10,), initializer=flow.constant_initializer(100))
return w
func_config = flow.FunctionConfig()
func_config.default_logical_view(flow.scope.consistent_view())
func_config.train.primary_lr(5)
func_config.train.model_update_conf(dict(naive_conf={}))
func_config.enable_non_distributed_optimizer(True)
@flow.global_function(func_config)
def Foo(x: oft.Numpy.Placeholder((2, 10))):
w = flow.get_variable("w", (10,), initializer=flow.constant_initializer(100))
flow.losses.add_loss(x + w)
Foo(np.ones((2, 10), dtype=np.float32))
def _test_non_distribute_optimizer_var_as_loss(test_case):
flow.config.gpu_device_num(2)
flow.config.enable_debug_mode(True)
func_config = flow.FunctionConfig()
func_config.default_logical_view(flow.scope.consistent_view())
func_config.train.primary_lr(5)
func_config.train.model_update_conf(dict(naive_conf={}))
func_config.enable_non_distributed_optimizer(True)
@flow.global_function(func_config)
def Foo():
w = flow.get_variable("w", (10,), initializer=flow.constant_initializer(100))
flow.losses.add_loss(w)
Foo()
| [
"oneflow.global_function",
"oneflow.typing.Numpy.Placeholder",
"oneflow.constant_initializer",
"oneflow.config.enable_debug_mode",
"oneflow.losses.add_loss",
"oneflow.config.gpu_device_num",
"oneflow.FunctionConfig",
"oneflow.scope.consistent_view"
] | [((713, 742), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['(2)'], {}), '(2)\n', (739, 742), True, 'import oneflow as flow\n'), ((747, 782), 'oneflow.config.enable_debug_mode', 'flow.config.enable_debug_mode', (['(True)'], {}), '(True)\n', (776, 782), True, 'import oneflow as flow\n'), ((801, 822), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (820, 822), True, 'import oneflow as flow\n'), ((1048, 1081), 'oneflow.global_function', 'flow.global_function', (['func_config'], {}), '(func_config)\n', (1068, 1081), True, 'import oneflow as flow\n'), ((1358, 1387), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['(2)'], {}), '(2)\n', (1384, 1387), True, 'import oneflow as flow\n'), ((1392, 1427), 'oneflow.config.enable_debug_mode', 'flow.config.enable_debug_mode', (['(True)'], {}), '(True)\n', (1421, 1427), True, 'import oneflow as flow\n'), ((1446, 1467), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (1465, 1467), True, 'import oneflow as flow\n'), ((1541, 1574), 'oneflow.global_function', 'flow.global_function', (['eval_config'], {}), '(eval_config)\n', (1561, 1574), True, 'import oneflow as flow\n'), ((1712, 1733), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (1731, 1733), True, 'import oneflow as flow\n'), ((1959, 1992), 'oneflow.global_function', 'flow.global_function', (['func_config'], {}), '(func_config)\n', (1979, 1992), True, 'import oneflow as flow\n'), ((2273, 2302), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['(2)'], {}), '(2)\n', (2299, 2302), True, 'import oneflow as flow\n'), ((2307, 2342), 'oneflow.config.enable_debug_mode', 'flow.config.enable_debug_mode', (['(True)'], {}), '(True)\n', (2336, 2342), True, 'import oneflow as flow\n'), ((2361, 2382), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (2380, 2382), True, 'import oneflow as flow\n'), ((2608, 2641), 'oneflow.global_function', 'flow.global_function', (['func_config'], {}), '(func_config)\n', (2628, 2641), True, 'import oneflow as flow\n'), ((860, 888), 'oneflow.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (886, 888), True, 'import oneflow as flow\n'), ((1224, 1251), 'oneflow.losses.add_loss', 'flow.losses.add_loss', (['(x + w)'], {}), '(x + w)\n', (1244, 1251), True, 'import oneflow as flow\n'), ((1261, 1295), 'numpy.ones', 'np.ones', (['(2, 10)'], {'dtype': 'np.float32'}), '((2, 10), dtype=np.float32)\n', (1268, 1295), True, 'import numpy as np\n'), ((1505, 1533), 'oneflow.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (1531, 1533), True, 'import oneflow as flow\n'), ((1771, 1799), 'oneflow.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (1797, 1799), True, 'import oneflow as flow\n'), ((2135, 2162), 'oneflow.losses.add_loss', 'flow.losses.add_loss', (['(x + w)'], {}), '(x + w)\n', (2155, 2162), True, 'import oneflow as flow\n'), ((2172, 2206), 'numpy.ones', 'np.ones', (['(2, 10)'], {'dtype': 'np.float32'}), '((2, 10), dtype=np.float32)\n', (2179, 2206), True, 'import numpy as np\n'), ((2420, 2448), 'oneflow.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (2446, 2448), True, 'import oneflow as flow\n'), ((2751, 2774), 'oneflow.losses.add_loss', 'flow.losses.add_loss', (['w'], {}), '(w)\n', (2771, 2774), True, 'import oneflow as flow\n'), ((1097, 1127), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(2, 10)'], {}), '((2, 10))\n', (1118, 1127), True, 'import oneflow.typing as oft\n'), ((2008, 2038), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(2, 10)'], {}), '((2, 10))\n', (2029, 2038), True, 'import oneflow.typing as oft\n'), ((1184, 1214), 'oneflow.constant_initializer', 'flow.constant_initializer', (['(100)'], {}), '(100)\n', (1209, 1214), True, 'import oneflow as flow\n'), ((1644, 1674), 'oneflow.constant_initializer', 'flow.constant_initializer', (['(100)'], {}), '(100)\n', (1669, 1674), True, 'import oneflow as flow\n'), ((2095, 2125), 'oneflow.constant_initializer', 'flow.constant_initializer', (['(100)'], {}), '(100)\n', (2120, 2125), True, 'import oneflow as flow\n'), ((2711, 2741), 'oneflow.constant_initializer', 'flow.constant_initializer', (['(100)'], {}), '(100)\n', (2736, 2741), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import copy
import os
import unittest
from collections import OrderedDict
import numpy as np
import oneflow as flow
import oneflow.unittest
from oneflow.test_utils.automated_test_util import *
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
@flow.unittest.skip_unless_1n1d()
class TestTensor(flow.unittest.TestCase):
@autotest(check_graph=True)
def test_permute_flow_with_random_data(test_case):
device = random_device()
x = random_tensor(ndim=4).to(device)
y = x.permute(
random(0, 4).to(int),
random(0, 4).to(int),
random(0, 4).to(int),
random(0, 4).to(int),
)
return y
@autotest(n=5, check_graph=True)
def test_transpose_tensor_with_random_data(test_case):
device = random_device()
x = random_tensor(ndim=4).to(device)
y = x.transpose(dim0=random(1, 3).to(int), dim1=random(1, 3).to(int))
return y
@autotest(n=5, check_graph=True)
def test_t_tensor_with_random_data(test_case):
device = random_device()
x = random_tensor(
ndim=constant(2).to(int), dim0=random(0, 64), dim1=random(0, 64)
).to(device)
y = x.t()
return y
@autotest(check_graph=True)
def test_T_tensor_with_random_data(test_case):
device = random_device()
x = random_tensor(ndim=random(1, 4)).to(device)
y = x.T
return y
def test_tensor_where(test_case):
x = flow.tensor(
np.array([[-0.462, 0.3139], [0.3898, -0.7197], [0.0478, -0.1657]]),
dtype=flow.float32,
)
y = flow.tensor(np.ones(shape=(3, 2)), dtype=flow.float32)
condition = flow.tensor(np.array([[0, 1], [1, 0], [1, 0]]), dtype=flow.int32)
of_out = condition.where(x, y)
np_out = np.array([[1.0, 0.3139], [0.3898, 1.0], [0.0478, 1.0]])
test_case.assertTrue(np.allclose(of_out.numpy(), np_out))
def test_tensor_equal(test_case):
arr1 = np.random.randint(1, 10, size=(2, 3, 4, 5))
arr2 = np.random.randint(1, 10, size=(2, 3, 4, 5))
input = flow.tensor(arr1, dtype=flow.float32)
other = flow.tensor(arr2, dtype=flow.float32)
of_out = input.eq(other)
np_out = np.equal(arr1, arr2)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out))
def test_tensor_detach(test_case):
shape = (2, 3, 4, 5)
x = flow.tensor(np.random.randn(*shape), dtype=flow.float32, requires_grad=True)
test_case.assertTrue(np.allclose(x.detach().numpy(), x.numpy(), 0.0001, 0.0001))
test_case.assertEqual(x.detach().requires_grad, False)
y = x * 2
z = y.detach()
test_case.assertEqual(z.is_leaf, True)
test_case.assertEqual(z.grad_fn, None)
def _test_cast_tensor_function(test_case):
shape = (2, 3, 4, 5)
np_arr = np.random.randn(*shape).astype(np.float32)
input = flow.tensor(np_arr, dtype=flow.float32)
output = input.cast(flow.int8)
np_out = np_arr.astype(np.int8)
test_case.assertTrue(np.allclose(output.numpy(), np_out))
def _test_sin_tensor_function(test_case, shape, device):
input = flow.Tensor(np.random.randn(2, 3, 4, 5))
of_out = input.sin()
np_out = np.sin(input.numpy())
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-05, 1e-05))
def test_cos_tensor_function(test_case):
arr = np.random.randn(2, 3, 4, 5)
input = flow.tensor(arr, dtype=flow.float32)
np_out = np.cos(arr)
of_out = input.cos()
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-05, 1e-05))
def test_std_tensor_function(test_case):
np_arr = np.random.randn(9, 8, 7, 6)
input = flow.Tensor(np_arr)
of_out = input.std(dim=1, unbiased=False, keepdim=False)
np_out = np.std(np_arr, axis=1)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-04, 1e-04))
def test_sqrt_tensor_function(test_case):
input_arr = np.random.rand(1, 6, 3, 8)
np_out = np.sqrt(input_arr)
x = flow.Tensor(input_arr)
of_out = x.sqrt()
test_case.assertTrue(
np.allclose(of_out.numpy(), np_out, 1e-05, 1e-05, equal_nan=True)
)
def test_rsqrt_tensor_function(test_case):
np_arr = np.random.rand(3, 2, 5, 7)
np_out = 1 / np.sqrt(np_arr)
x = flow.Tensor(np_arr)
of_out = flow.rsqrt(x)
test_case.assertTrue(
np.allclose(of_out.numpy(), np_out, 1e-05, 1e-05, equal_nan=True)
)
def test_square_tensor_function(test_case):
np_arr = np.random.randn(2, 7, 7, 3)
np_out = np.square(np_arr)
x = flow.Tensor(np_arr)
of_out = x.square()
test_case.assertTrue(
np.allclose(of_out.numpy(), np_out, 1e-05, 1e-05, equal_nan=True)
)
@autotest(check_graph=True)
def test_addmm_tensor_with_random_data(test_case):
device = random_device()
input = random_tensor(ndim=2, dim0=2, dim1=3).to(device)
mat1 = random_tensor(ndim=2, dim0=2, dim1=4).to(device)
mat2 = random_tensor(ndim=2, dim0=4, dim1=3).to(device)
y = input.addmm(
mat1,
mat2,
beta=random().to(float) | nothing(),
alpha=random().to(float) | nothing(),
)
return y
@autotest(check_graph=True)
def test_addmm_broadcast_tensor_with_random_data(test_case):
device = random_device()
input = random_tensor(ndim=2, dim0=1, dim1=1).to(device)
mat1 = random_tensor(ndim=2, dim0=2, dim1=4).to(device)
mat2 = random_tensor(ndim=2, dim0=4, dim1=3).to(device)
y = input.addmm(
mat1,
mat2,
beta=random().to(float) | nothing(),
alpha=random().to(float) | nothing(),
)
return y
@autotest(check_graph=True)
def test_clamp_tensor_with_random_data(test_case):
device = random_device()
input = random_tensor(low=-2, high=2).to(device)
y = input.clamp(
min=random(low=-1, high=-0.5).to(float),
max=random(low=0.5, high=1).to(float),
)
return y
@autotest(check_graph=True)
def test_clamp_inplace_tensor_with_random_data(test_case):
device = random_device()
x = random_tensor(low=-2, high=2).to(device)
y = x + 1
y.clamp_(
min=random(low=-1, high=-0.5).to(float),
max=random(low=0.5, high=1).to(float),
)
return y
@autotest(check_graph=True, auto_backward=False)
def test_clamp_inplace_tensor_no_grad_with_random_data(test_case):
device = random_device()
x = random_tensor(low=-2, high=2).to(device)
y = x + 1
y.clamp_(
min=random(low=-1, high=-0.5).to(float),
max=random(low=0.5, high=1).to(float),
)
return y
@autotest(check_graph=True)
def test_clamp_minnone_tensor_with_random_data(test_case):
device = random_device()
input = random_tensor(low=-2, high=2).to(device)
y = input.clamp(
min=random(low=-1, high=-0.5).to(float) | nothing(),
max=random(low=0.5, high=1).to(float),
)
return y
@flow.unittest.skip_unless_1n1d()
@autotest(check_graph=True, auto_backward=False)
def test_clamp_minnone_tensor_no_grad_with_random_data(test_case):
device = random_device()
input = random_tensor(low=-2, high=2).to(device)
y = input.clamp(
min=random(low=-1, high=-0.5).to(float) | nothing(),
max=random(low=0.5, high=1).to(float),
)
return y
@autotest(check_graph=True)
def test_clamp_inplace_minnone_tensor_with_random_data(test_case):
device = random_device()
x = random_tensor(low=-2, high=2).to(device)
y = x + 1
y.clamp_(
min=random(low=-1, high=-0.5).to(float) | nothing(),
max=random(low=0.5, high=1).to(float),
)
return y
@autotest(check_graph=True, auto_backward=False)
def test_clamp_inplace_minnone_tensor_no_grad_with_random_data(test_case):
device = random_device()
x = random_tensor(low=-2, high=2).to(device)
y = x + 1
y.clamp_(
min=random(low=-1, high=-0.5).to(float) | nothing(),
max=random(low=0.5, high=1).to(float),
)
return y
@autotest(check_graph=True)
def test_clamp_maxnone_tensor_with_random_data(test_case):
device = random_device()
input = random_tensor(low=-2, high=2).to(device)
y = input.clamp(
min=random(low=-1, high=-0.5).to(float),
max=random(low=0.5, high=1).to(float) | nothing(),
)
return y
@autotest(check_graph=True)
def test_clamp_inplace_maxnone_tensor_with_random_data(test_case):
device = random_device()
x = random_tensor(low=-2, high=2).to(device)
y = x + 1
y.clamp_(
min=random(low=-1, high=-0.5).to(float),
max=random(low=0.5, high=1).to(float) | nothing(),
)
return y
@autotest(check_graph=True)
def test_clip_tensor_with_random_data(test_case):
device = random_device()
input = random_tensor(low=-2, high=2).to(device)
y = input.clip(
min=random(low=-1, high=-0.5).to(float),
max=random(low=0.5, high=1).to(float),
)
return y
@autotest(check_graph=True)
def test_clip_inplace_tensor_with_random_data(test_case):
device = random_device()
x = random_tensor(low=-2, high=2).to(device)
y = x + 1
y.clip_(
min=random(low=-1, high=-0.5).to(float),
max=random(low=0.5, high=1).to(float),
)
return y
@autotest(check_graph=True)
def test_clip_minnone_tensor_with_random_data(test_case):
device = random_device()
input = random_tensor(low=-2, high=2).to(device)
y = input.clip(
min=random(low=-1, high=-0.5).to(float) | nothing(),
max=random(low=0.5, high=1).to(float),
)
return y
@autotest(check_graph=True)
def test_clip_inplace_maxnone_tensor_with_random_data(test_case):
device = random_device()
x = random_tensor(low=-2, high=2).to(device)
y = x + 1
y.clip_(
min=random(low=-1, high=-0.5).to(float),
max=random(low=0.5, high=1).to(float) | nothing(),
)
return y
@autotest(check_graph=True)
def test_clip_maxnone_tensor_with_random_data(test_case):
device = random_device()
input = random_tensor().to(device)
y = input.clip(
min=random(low=-1, high=-0.5).to(float),
max=random(low=0.5, high=1).to(float) | nothing(),
)
return y
@autotest(check_graph=True)
def test_clip_inplace_maxnone_tensor_with_random_data(test_case):
device = random_device()
x = random_tensor(low=-2, high=2).to(device)
y = x + 1
y.clip_(
min=random(low=-1, high=-0.5).to(float),
max=random(low=0.5, high=1).to(float) | nothing(),
)
return y
@autotest(check_graph=True)
def test_ceil_tensor_with_random_data(test_case):
device = random_device()
input = random_tensor().to(device)
y = len(input)
return y
@autotest(check_graph=True)
def test_ceil_tensor_with_random_data(test_case):
device = random_device()
input = random_tensor().to(device)
y = input.ceil()
return y
@autotest(check_graph=True)
def test_expm1_tensor_with_random_data(test_case):
device = random_device()
input = random_tensor().to(device)
y = input.expm1()
return y
@autotest(check_graph=True)
def test_floor_tensor_with_random_data(test_case):
device = random_device()
x = random_tensor().to(device)
y = x.floor()
return y
@autotest(check_graph=True)
def test_tensor_var_all_dim_with_random_data(test_case):
device = random_device()
x = random_tensor().to(device)
y = x.var()
return y
# TODO(): 'var backward' is composed of several other ops,
# reducemean doesn't support 0-shape for now
@autotest(n=5, auto_backward=False, check_graph=True)
def test_tensor_var_one_dim_with_random_data(test_case):
device = random_device()
x = random_tensor(ndim=4).to(device)
y = x.var(
dim=random(low=0, high=4).to(int),
unbiased=random().to(bool),
keepdim=random().to(bool),
)
return y
def test_norm_tensor_function(test_case):
input = flow.tensor(
np.array([[-4.0, -3.0, -2.0], [-1.0, 0.0, 1.0], [2.0, 3.0, 4.0]]),
dtype=flow.float32,
)
of_out_1 = input.norm("fro")
np_out_1 = np.linalg.norm(input.numpy(), "fro")
of_out_2 = input.norm(2, dim=1)
np_out_2 = np.linalg.norm(input.numpy(), ord=2, axis=1)
of_out_3 = input.norm(float("inf"), dim=0, keepdim=True)
np_out_3 = np.linalg.norm(
input.numpy(), ord=float("inf"), axis=0, keepdims=True
)
test_case.assertTrue(np.allclose(of_out_1.numpy(), np_out_1, 1e-05, 1e-05))
test_case.assertTrue(np.allclose(of_out_2.numpy(), np_out_2, 1e-05, 1e-05))
test_case.assertTrue(np.allclose(of_out_3.numpy(), np_out_3, 1e-05, 1e-05))
@autotest(check_graph=True)
def test_pow_tensor_with_random_data(test_case):
device = random_device()
x = random_tensor().to(device)
y = random().to(float)
z = x.pow(y)
return z
@autotest(check_graph=True)
def test_atanh_tensor_with_random_data(test_case):
device = random_device()
x = random_tensor(low=-0.5, high=0.49).to(device)
y = x.atanh()
return y
@autotest(check_graph=True)
def test_acos_tensor_with_random_data(test_case):
device = random_device()
x = random_tensor(low=-0.5, high=0.49).to(device)
y = x.acos()
return y
@autotest(check_graph=True)
def test_acosh_tensor_with_random_data(test_case):
device = random_device()
x = random_tensor(low=2.0, high=3.0).to(device)
y = x.acosh()
return y
@autotest(check_graph=True)
def test_atan_tensor_with_random_data(test_case):
device = random_device()
x = random_tensor().to(device)
y = x.atan()
return y
@autotest(check_graph=True)
def test_arctan_tensor_with_random_data(test_case):
device = random_device()
x = random_tensor().to(device)
y = x.arctan()
return y
@autotest(check_graph=True)
def test_tan_tensor_with_random_data(test_case):
device = random_device()
x = random_tensor().to(device)
y = x.tan()
return y
@autotest(check_graph=True)
def test_tan2_tensor_with_random_data(test_case):
device = random_device()
x = random_tensor(ndim=2, dim1=3).to(device)
y = random_tensor(ndim=2, dim1=3).to(device)
z = x.atan2(y)
return z
@autotest(check_graph=True)
def test_arctanh_tensor_with_random_data(test_case):
device = random_device()
x = random_tensor(low=-0.5, high=0.5).to(device)
y = x.arctanh()
return y
# Not check graph because of one reason:
# Reason 1, lazy tensor cannot call .numpy(). tensor.numpy() is not allowed to called in nn.Graph.build(*args) or called by lazy tensor.
# Please refer to File "python/oneflow/nn/modules/nonzero.py", line 29, in nonzero_op.
@autotest(n=5, auto_backward=False, check_graph="ValidatedFlase")
def test_tensor_nonzero_with_random_data(test_case):
device = random_device()
ndim = random(2, 6).to(int)
x = random_tensor(ndim=ndim).to(device)
y = x.nonzero()
return y
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def test_tensor_fmod(test_case):
x = flow.Tensor(np.random.uniform(-100, 100, (5, 5)))
x.requires_grad = True
y = np.random.uniform(-10, 10)
of_out = x.fmod(y)
np_out = np.sign(x.numpy()) * np.abs(np.fmod(x.numpy(), y))
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 0.0001, 0.0001))
of_out = of_out.sum()
of_out.backward()
test_case.assertTrue(
np.allclose(x.grad.numpy(), np.ones((5, 5)), 0.0001, 0.0001)
)
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
"numpy doesn't work in lazy mode",
)
def test_magic_fmod(test_case):
x = flow.Tensor(np.random.uniform(-100, 100, (5, 5)))
x.requires_grad = True
y = np.random.uniform(-10, 10)
of_out = x % y
np_out = np.sign(x.numpy()) * np.abs(np.fmod(x.numpy(), y))
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 0.0001, 0.0001))
of_out = of_out.sum()
of_out.backward()
test_case.assertTrue(
np.allclose(x.grad.numpy(), np.ones((5, 5)), 0.0001, 0.0001)
)
def test_tensor_mish(test_case):
def np_mish(x):
f = 1 + np.exp(x)
y = x * ((f * f - 1) / (f * f + 1))
y_grad = (f * f - 1) / (f * f + 1) + x * (4 * f * (f - 1)) / (
(f * f + 1) * (f * f + 1)
)
return [y, y_grad]
np_input = np.random.randn(2, 4, 5, 6)
of_input = flow.tensor(np_input, dtype=flow.float32, requires_grad=True)
of_out = of_input.mish()
(np_out, np_grad) = np_mish(np_input)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-05, 1e-05))
of_out = of_out.sum()
of_out.backward()
test_case.assertTrue(np.allclose(of_input.grad.numpy(), np_grad, 1e-05, 1e-05))
def test_tensor_triu(test_case):
def np_triu(x, diagonal):
y = np.triu(x, diagonal)
y_grad = np.triu(np.ones_like(x), diagonal)
return [y, y_grad]
diagonal_list = [2, -1]
for diagonal in diagonal_list:
np_input = np.random.randn(2, 4, 6)
of_input = flow.tensor(np_input, dtype=flow.float32, requires_grad=True)
of_out = of_input.triu(diagonal)
(np_out, np_grad) = np_triu(np_input, diagonal)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-05, 1e-05))
of_out = of_out.sum()
of_out.backward()
test_case.assertTrue(
np.allclose(of_input.grad.numpy(), np_grad, 1e-05, 1e-05)
)
def test_tensor_grad_assignment(test_case):
np_input = np.random.randn(2, 4, 5, 6)
of_input = flow.tensor(np_input, dtype=flow.float32, requires_grad=True)
of_output = 2 * of_input
of_output = of_output.sum()
of_output.backward()
new_grad = flow.tensor(
np.full(np_input.shape, np.random.randn(1)), dtype=flow.float32
)
of_input.grad = new_grad
test_case.assertTrue(
np.allclose(of_input.grad.detach().numpy(), new_grad.numpy(), 1e-05, 1e-05)
)
of_input.grad = None
test_case.assertTrue(of_input.grad is None)
def test_tensor_grad_assignment_sum(test_case):
np_input = np.random.randn(1, 5, 7, 3)
of_input = flow.tensor(np_input, dtype=flow.float32, requires_grad=True)
of_output = of_input.sum()
of_output.backward()
rand_init = np.random.randn(1)
rand_scale = np.random.randn(1)
new_grad = flow.tensor(np.full(np_input.shape, rand_init), dtype=flow.float32)
of_input.grad = new_grad
of_output = flow.tensor(rand_scale, dtype=flow.float32) * of_input
of_output = of_output.sum()
of_output.backward()
test_case.assertTrue(
np.allclose(
of_input.grad.detach().numpy(),
np.full(np_input.shape, rand_init + rand_scale),
1e-05,
1e-05,
)
)
of_input.grad = of_input.grad * 2
test_case.assertTrue(
np.allclose(
of_input.grad.detach().numpy(),
2 * np.full(np_input.shape, rand_init + rand_scale),
1e-05,
1e-05,
)
)
def test_tensor_mish(test_case):
def np_mish(x):
f = 1 + np.exp(x)
y = x * ((f * f - 1) / (f * f + 1))
y_grad = (f * f - 1) / (f * f + 1) + x * (4 * f * (f - 1)) / (
(f * f + 1) * (f * f + 1)
)
return [y, y_grad]
np_input = np.random.randn(2, 4, 5, 6,)
of_input = flow.tensor(np_input, dtype=flow.float32, requires_grad=True)
of_out = of_input.mish()
np_out, np_grad = np_mish(np_input)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5))
of_out = of_out.sum()
of_out.backward()
test_case.assertTrue(np.allclose(of_input.grad.numpy(), np_grad, 1e-5, 1e-5))
def test_tensor_silu(test_case):
def np_silu(x):
_sig = 1 / (1 + np.exp(-x))
y = x * _sig
y_grad = _sig * (1 + x * (1 - _sig))
return [y, y_grad]
np_input = np.random.randn(2, 4, 5, 6,)
of_input = flow.tensor(np_input, dtype=flow.float32, requires_grad=True)
of_out = of_input.silu()
np_out, np_grad = np_silu(np_input)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5))
of_out = of_out.sum()
of_out.backward()
test_case.assertTrue(np.allclose(of_input.grad.numpy(), np_grad, 1e-5, 1e-5))
def test_tensor_selu(test_case):
_scale = 1.0507009873554804934193349852946
_alpha = 1.6732632423543772848170429916717
def np_selu(x):
y = np.where(x < 0, _scale * _alpha * (np.exp(x) - 1), _scale * x)
y_grad = np.where(x < 0, _scale * _alpha * np.exp(x), _scale)
return [y, y_grad]
np_input = np.random.randn(2, 4, 5, 6,)
of_input = flow.tensor(np_input, dtype=flow.float32, requires_grad=True)
of_out = of_input.selu()
np_out, np_grad = np_selu(np_input)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5))
of_out = of_out.sum()
of_out.backward()
test_case.assertTrue(np.allclose(of_input.grad.numpy(), np_grad, 1e-5, 1e-5))
@unittest.skip("still have error in ci")
def test_tensor_softsign(test_case):
def np_softsign(x):
y = x / (1 + np.abs(x))
y_grad = 1 / np.square(1 + np.abs(x))
return [y, y_grad]
np_input = np.random.randn(2, 4, 5, 6,)
of_input = flow.tensor(np_input, dtype=flow.float32, requires_grad=True)
of_out = of_input.softsign()
np_out, np_grad = np_softsign(np_input)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5))
of_out = of_out.sum()
of_out.backward()
test_case.assertTrue(np.allclose(of_input.grad.numpy(), np_grad, 1e-5, 1e-5))
@autotest(auto_backward=False, check_graph=True)
def test_eq_tensor_with_random_data(test_case):
device = random_device()
shape = random_tensor().oneflow.shape
x = random_tensor(len(shape), *shape, requires_grad=False).to(device)
y = random_tensor(len(shape), *shape, requires_grad=False).to(device)
return x.eq(y)
@autotest(auto_backward=False, check_graph=True)
def test_eq_tensor_with_same_random_data(test_case):
device = random_device()
shape = random_tensor().oneflow.shape
x = random_tensor(len(shape), *shape, requires_grad=False).to(device)
return x.eq(x)
@autotest(check_graph=True)
def test_erf_tensor_with_random_data(test_case):
device = random_device()
x = random_tensor().to(device)
return x.erf()
@autotest(check_graph=True)
def test_erfc_tensor_with_random_data(test_case):
device = random_device()
x = random_tensor().to(device)
return x.erfc()
@autotest(
check_graph=True, auto_backward=False
) # Todo: After add gradient func, you should set `auto_backward` as True
def test_erfinv_tensor_with_random_data(test_case):
device = random_device()
x = random_tensor(low=-1, high=1).to(device).requires_grad_(False)
return x.erfinv()
@autotest(
n=10, check_graph=True, auto_backward=False
) # Todo: After add gradient func, you should set `auto_backward` as True
def test_erfinv_inplace_tensor_with_random_data(test_case):
device = random_device()
x = random_tensor(low=-1, high=1).to(device).requires_grad_(False)
y = x + 1
y.erfinv_()
return y
@autotest(check_graph=True)
def test_exp_tensor_with_random_data(test_case):
device = random_device()
x = random_tensor().to(device)
return x.exp()
@autotest(check_graph=True)
def test_round_tensor_with_random_data(test_case):
device = random_device()
x = random_tensor().to(device)
return x.round()
@autotest(check_graph=True)
def test_tensor_diag_one_dim(test_case):
device = random_device()
x = random_tensor(ndim=1, dim0=random()).to(device)
return x.diag()
@autotest(check_graph=True)
def test_flow_tensor_expand_with_random_data(test_case):
random_expand_size = random(1, 6).to(int).value()
x = random_tensor(ndim=5, dim0=1, dim1=1, dim2=1, dim3=1, dim4=1)
ndim = 5
expand_size = random_expand_size
dim_size = [1,] * ndim
random_index = random(0, ndim).to(int).value()
dim_size[random_index] = expand_size
return x.expand(*dim_size)
@autotest(check_graph=True)
def test_flow_tensor_expand_with_random_data(test_case):
random_expand_size = random(1, 6).to(int).value()
x = random_tensor(ndim=5, dim0=1, dim1=1, dim2=1, dim3=1, dim4=1)
ndim = 5
expand_size = random_expand_size
dim_size = [1,] * ndim
random_index = random(0, ndim).to(int).value()
dim_size[random_index] = expand_size
y = torch.ones(dim_size)
return x.expand_as(y)
@autotest(check_graph=True)
def test_tensor_diag_other_dim(test_case):
device = random_device()
x = random_tensor(ndim=2, dim0=random(), dim1=random()).to(device)
return x.diag()
@autotest(auto_backward=False, check_graph=True)
def test_floordiv_elementwise_tensor_with_random_data(test_case):
device = random_device()
input = random_tensor(ndim=2, dim0=4, dim1=8).to(device)
other = random_tensor(ndim=2, dim0=4, dim1=8).to(device)
y = input.floor_divide(other)
return y
@autotest(auto_backward=False, check_graph=True)
def test_scalar_floordiv_tensor_with_random_data(test_case):
device = random_device()
input = random_tensor(ndim=2, dim0=4, dim1=8).to(device)
other = random().to(int)
y = input.floor_divide(other)
return y
@flow.unittest.skip_unless_1n4d()
def test_construct_consistent_tensor_by_numpy(test_case):
x = np.ones((4, 4), dtype=np.int32)
placement = flow.placement("cuda", [0, 1, 2, 3])
y = flow.tensor(
x,
dtype=flow.float32,
placement=placement,
sbp=[flow.sbp.split(0)],
requires_grad=False,
)
test_case.assertTrue(y.dtype == flow.float32)
test_case.assertTrue(
np.allclose(y.to_local().numpy(), np.ones((1, 4), dtype=np.float32))
)
test_case.assertEqual(y.placement, placement)
y_default_dtype = flow.tensor(
x, placement=placement, sbp=[flow.sbp.split(0)], requires_grad=False,
)
test_case.assertTrue(y_default_dtype.dtype == flow.int32)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
class TestTensorNumpy(flow.unittest.TestCase):
@flow.unittest.skip_unless_1n2d()
def test_1d_sbp_tensor_numpy_1n2d(test_case):
ori_x = flow.tensor([1, 2, 3, 4]) + flow.env.get_rank()
placement = flow.env.all_device_placement("cpu")
x = ori_x.to_global(placement=placement, sbp=flow.sbp.split(0))
test_case.assertTrue(np.allclose(x.numpy(), [1, 2, 3, 4, 2, 3, 4, 5]))
x = ori_x.to_global(placement=placement, sbp=flow.sbp.broadcast)
test_case.assertTrue(np.allclose(x.numpy(), [1, 2, 3, 4]))
x = ori_x.to_global(placement=placement, sbp=flow.sbp.partial_sum)
test_case.assertTrue(np.allclose(x.numpy(), [3, 5, 7, 9]))
placement = flow.env.all_device_placement("cuda")
x = ori_x.to_global(placement=placement, sbp=flow.sbp.split(0))
test_case.assertTrue(np.allclose(x.numpy(), [1, 2, 3, 4, 2, 3, 4, 5]))
x = ori_x.to_global(placement=placement, sbp=flow.sbp.broadcast)
test_case.assertTrue(np.allclose(x.numpy(), [1, 2, 3, 4]))
x = ori_x.to_global(placement=placement, sbp=flow.sbp.partial_sum)
test_case.assertTrue(np.allclose(x.numpy(), [3, 5, 7, 9]))
@flow.unittest.skip_unless_1n2d()
def test_2d_sbp_tensor_numpy_1n2d(test_case):
ori_x = flow.tensor(np.ones((2, 2))) + flow.env.get_rank()
placement = flow.placement("cuda", [[0], [1]])
x = ori_x.to_global(
placement=placement, sbp=[flow.sbp.split(0), flow.sbp.split(1)]
)
test_case.assertTrue(np.allclose(x.numpy(), [[1, 1], [1, 1], [2, 2], [2, 2]]))
x = ori_x.to_global(
placement=placement, sbp=[flow.sbp.broadcast, flow.sbp.split(0)]
)
test_case.assertTrue(np.allclose(x.numpy(), [[1, 1], [1, 1]]))
x = ori_x.to_global(
placement=placement, sbp=[flow.sbp.partial_sum, flow.sbp.broadcast]
)
test_case.assertTrue(np.allclose(x.numpy(), [[3, 3], [3, 3]]))
@flow.unittest.skip_unless_1n4d()
def test_2d_sbp_tensor_numpy_1n4d(test_case):
ori_x = flow.tensor(np.ones((2, 2))) + flow.env.get_rank()
placement = flow.placement("cuda", [[0, 1], [2, 3]])
x = ori_x.to_global(
placement=placement, sbp=[flow.sbp.split(0), flow.sbp.split(1)]
)
test_case.assertTrue(
np.allclose(
x.numpy(), [[1, 1, 2, 2], [1, 1, 2, 2], [3, 3, 4, 4], [3, 3, 4, 4]]
)
)
x = ori_x.to_global(
placement=placement, sbp=[flow.sbp.split(0), flow.sbp.partial_sum]
)
test_case.assertTrue(np.allclose(x.numpy(), [[3, 3], [3, 3], [7, 7], [7, 7]]))
# TODO: (s0, b) has bug
# x = ori_x.to_global(placement=placement, sbp=[flow.sbp.split(0), flow.sbp.broadcast])
@flow.unittest.skip_unless_1n1d()
@autotest(n=5, check_graph=True)
def test_tensor_bmm(test_case):
t = random(1, 5)
k = random(1, 5)
input1 = random_tensor(ndim=3, dim0=t, dim1=3, dim2=k)
input2 = random_tensor(ndim=3, dim0=t, dim1=k, dim2=5)
of_out = input1.bmm(input2)
return of_out
@flow.unittest.skip_unless_1n1d()
@autotest(n=5, check_graph=True)
def test_tensor_split(test_case):
k0 = random(2, 6)
k1 = random(2, 6)
k2 = random(2, 6)
rand_dim = random(0, 3).to(int)
device = random_device()
x = random_tensor(ndim=3, dim0=k0, dim1=k1, dim2=k2).to(device)
res = x.split(2, dim=rand_dim)
return torch.cat(res, rand_dim)
@flow.unittest.skip_unless_1n1d()
@autotest(n=5, check_graph=True)
def test_tensor_split_sizes(test_case):
k0 = random(2, 6)
k1 = 7
k2 = random(2, 6)
device = random_device()
x = random_tensor(ndim=3, dim0=k0, dim1=k1, dim2=k2).to(device)
res = x.split([1, 2, 3, 1], dim=-2)
return torch.cat(res, dim=1)
@flow.unittest.skip_unless_1n1d()
@autotest(n=5, check_graph=True)
def test_tensor_unbind(test_case):
device = random_device()
x = random_tensor(ndim=4).to(device)
y = x.unbind(random(0, 4).to(int))
return y
@flow.unittest.skip_unless_1n1d()
@autotest(n=5, check_graph=True)
def test_tensor_swapaxes(test_case):
device = random_device()
x = random_tensor(ndim=3).to(device)
y = x.swapaxes(random(0, 2).to(int), random(0, 2).to(int))
return y
@flow.unittest.skip_unless_1n1d()
@autotest(n=5, check_graph=True)
def test_tensor_swapdimst(test_case):
device = random_device()
x = random_tensor(ndim=3).to(device)
y = x.swapdims(random(0, 3).to(int), random(0, 3).to(int))
return y
if __name__ == "__main__":
unittest.main()
| [
"oneflow.unittest.skip_unless_1n4d",
"oneflow.tensor",
"oneflow.env.get_rank",
"oneflow.env.all_device_placement",
"oneflow.unittest.skip_unless_1n2d",
"oneflow.rsqrt",
"oneflow.unittest.env.eager_execution_enabled",
"oneflow.placement",
"oneflow.sbp.split",
"oneflow.Tensor",
"oneflow.unittest.s... | [((864, 896), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (894, 896), True, 'import oneflow as flow\n'), ((7967, 7999), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (7997, 7999), True, 'import oneflow as flow\n'), ((23502, 23541), 'unittest.skip', 'unittest.skip', (['"""still have error in ci"""'], {}), "('still have error in ci')\n", (23515, 23541), False, 'import unittest\n'), ((28243, 28275), 'oneflow.unittest.skip_unless_1n4d', 'flow.unittest.skip_unless_1n4d', ([], {}), '()\n', (28273, 28275), True, 'import oneflow as flow\n'), ((804, 838), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (813, 838), False, 'import os\n'), ((29181, 29213), 'oneflow.unittest.skip_unless_1n2d', 'flow.unittest.skip_unless_1n2d', ([], {}), '()\n', (29211, 29213), True, 'import oneflow as flow\n'), ((30320, 30352), 'oneflow.unittest.skip_unless_1n2d', 'flow.unittest.skip_unless_1n2d', ([], {}), '()\n', (30350, 30352), True, 'import oneflow as flow\n'), ((31112, 31144), 'oneflow.unittest.skip_unless_1n4d', 'flow.unittest.skip_unless_1n4d', ([], {}), '()\n', (31142, 31144), True, 'import oneflow as flow\n'), ((31943, 31975), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (31973, 31975), True, 'import oneflow as flow\n'), ((32289, 32321), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (32319, 32321), True, 'import oneflow as flow\n'), ((32705, 32737), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (32735, 32737), True, 'import oneflow as flow\n'), ((33078, 33110), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (33108, 33110), True, 'import oneflow as flow\n'), ((33331, 33363), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (33361, 33363), True, 'import oneflow as flow\n'), ((33610, 33642), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (33640, 33642), True, 'import oneflow as flow\n'), ((29070, 29104), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (29079, 29104), False, 'import os\n'), ((33917, 33932), 'unittest.main', 'unittest.main', ([], {}), '()\n', (33930, 33932), False, 'import unittest\n'), ((2443, 2498), 'numpy.array', 'np.array', (['[[1.0, 0.3139], [0.3898, 1.0], [0.0478, 1.0]]'], {}), '([[1.0, 0.3139], [0.3898, 1.0], [0.0478, 1.0]])\n', (2451, 2498), True, 'import numpy as np\n'), ((2619, 2662), 'numpy.random.randint', 'np.random.randint', (['(1)', '(10)'], {'size': '(2, 3, 4, 5)'}), '(1, 10, size=(2, 3, 4, 5))\n', (2636, 2662), True, 'import numpy as np\n'), ((2678, 2721), 'numpy.random.randint', 'np.random.randint', (['(1)', '(10)'], {'size': '(2, 3, 4, 5)'}), '(1, 10, size=(2, 3, 4, 5))\n', (2695, 2721), True, 'import numpy as np\n'), ((2738, 2775), 'oneflow.tensor', 'flow.tensor', (['arr1'], {'dtype': 'flow.float32'}), '(arr1, dtype=flow.float32)\n', (2749, 2775), True, 'import oneflow as flow\n'), ((2792, 2829), 'oneflow.tensor', 'flow.tensor', (['arr2'], {'dtype': 'flow.float32'}), '(arr2, dtype=flow.float32)\n', (2803, 2829), True, 'import oneflow as flow\n'), ((2880, 2900), 'numpy.equal', 'np.equal', (['arr1', 'arr2'], {}), '(arr1, arr2)\n', (2888, 2900), True, 'import numpy as np\n'), ((3565, 3604), 'oneflow.tensor', 'flow.tensor', (['np_arr'], {'dtype': 'flow.float32'}), '(np_arr, dtype=flow.float32)\n', (3576, 3604), True, 'import oneflow as flow\n'), ((4077, 4104), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)', '(4)', '(5)'], {}), '(2, 3, 4, 5)\n', (4092, 4104), True, 'import numpy as np\n'), ((4121, 4157), 'oneflow.tensor', 'flow.tensor', (['arr'], {'dtype': 'flow.float32'}), '(arr, dtype=flow.float32)\n', (4132, 4157), True, 'import oneflow as flow\n'), ((4175, 4186), 'numpy.cos', 'np.cos', (['arr'], {}), '(arr)\n', (4181, 4186), True, 'import numpy as np\n'), ((4359, 4386), 'numpy.random.randn', 'np.random.randn', (['(9)', '(8)', '(7)', '(6)'], {}), '(9, 8, 7, 6)\n', (4374, 4386), True, 'import numpy as np\n'), ((4403, 4422), 'oneflow.Tensor', 'flow.Tensor', (['np_arr'], {}), '(np_arr)\n', (4414, 4422), True, 'import oneflow as flow\n'), ((4505, 4527), 'numpy.std', 'np.std', (['np_arr'], {'axis': '(1)'}), '(np_arr, axis=1)\n', (4511, 4527), True, 'import numpy as np\n'), ((4675, 4701), 'numpy.random.rand', 'np.random.rand', (['(1)', '(6)', '(3)', '(8)'], {}), '(1, 6, 3, 8)\n', (4689, 4701), True, 'import numpy as np\n'), ((4719, 4737), 'numpy.sqrt', 'np.sqrt', (['input_arr'], {}), '(input_arr)\n', (4726, 4737), True, 'import numpy as np\n'), ((4750, 4772), 'oneflow.Tensor', 'flow.Tensor', (['input_arr'], {}), '(input_arr)\n', (4761, 4772), True, 'import oneflow as flow\n'), ((4982, 5008), 'numpy.random.rand', 'np.random.rand', (['(3)', '(2)', '(5)', '(7)'], {}), '(3, 2, 5, 7)\n', (4996, 5008), True, 'import numpy as np\n'), ((5058, 5077), 'oneflow.Tensor', 'flow.Tensor', (['np_arr'], {}), '(np_arr)\n', (5069, 5077), True, 'import oneflow as flow\n'), ((5095, 5108), 'oneflow.rsqrt', 'flow.rsqrt', (['x'], {}), '(x)\n', (5105, 5108), True, 'import oneflow as flow\n'), ((5293, 5320), 'numpy.random.randn', 'np.random.randn', (['(2)', '(7)', '(7)', '(3)'], {}), '(2, 7, 7, 3)\n', (5308, 5320), True, 'import numpy as np\n'), ((5338, 5355), 'numpy.square', 'np.square', (['np_arr'], {}), '(np_arr)\n', (5347, 5355), True, 'import numpy as np\n'), ((5368, 5387), 'oneflow.Tensor', 'flow.Tensor', (['np_arr'], {}), '(np_arr)\n', (5379, 5387), True, 'import oneflow as flow\n'), ((17088, 17114), 'numpy.random.uniform', 'np.random.uniform', (['(-10)', '(10)'], {}), '(-10, 10)\n', (17105, 17114), True, 'import numpy as np\n'), ((17731, 17757), 'numpy.random.uniform', 'np.random.uniform', (['(-10)', '(10)'], {}), '(-10, 10)\n', (17748, 17757), True, 'import numpy as np\n'), ((18422, 18449), 'numpy.random.randn', 'np.random.randn', (['(2)', '(4)', '(5)', '(6)'], {}), '(2, 4, 5, 6)\n', (18437, 18449), True, 'import numpy as np\n'), ((18469, 18530), 'oneflow.tensor', 'flow.tensor', (['np_input'], {'dtype': 'flow.float32', 'requires_grad': '(True)'}), '(np_input, dtype=flow.float32, requires_grad=True)\n', (18480, 18530), True, 'import oneflow as flow\n'), ((19678, 19705), 'numpy.random.randn', 'np.random.randn', (['(2)', '(4)', '(5)', '(6)'], {}), '(2, 4, 5, 6)\n', (19693, 19705), True, 'import numpy as np\n'), ((19725, 19786), 'oneflow.tensor', 'flow.tensor', (['np_input'], {'dtype': 'flow.float32', 'requires_grad': '(True)'}), '(np_input, dtype=flow.float32, requires_grad=True)\n', (19736, 19786), True, 'import oneflow as flow\n'), ((20317, 20344), 'numpy.random.randn', 'np.random.randn', (['(1)', '(5)', '(7)', '(3)'], {}), '(1, 5, 7, 3)\n', (20332, 20344), True, 'import numpy as np\n'), ((20364, 20425), 'oneflow.tensor', 'flow.tensor', (['np_input'], {'dtype': 'flow.float32', 'requires_grad': '(True)'}), '(np_input, dtype=flow.float32, requires_grad=True)\n', (20375, 20425), True, 'import oneflow as flow\n'), ((20510, 20528), 'numpy.random.randn', 'np.random.randn', (['(1)'], {}), '(1)\n', (20525, 20528), True, 'import numpy as np\n'), ((20550, 20568), 'numpy.random.randn', 'np.random.randn', (['(1)'], {}), '(1)\n', (20565, 20568), True, 'import numpy as np\n'), ((21673, 21700), 'numpy.random.randn', 'np.random.randn', (['(2)', '(4)', '(5)', '(6)'], {}), '(2, 4, 5, 6)\n', (21688, 21700), True, 'import numpy as np\n'), ((21721, 21782), 'oneflow.tensor', 'flow.tensor', (['np_input'], {'dtype': 'flow.float32', 'requires_grad': '(True)'}), '(np_input, dtype=flow.float32, requires_grad=True)\n', (21732, 21782), True, 'import oneflow as flow\n'), ((22309, 22336), 'numpy.random.randn', 'np.random.randn', (['(2)', '(4)', '(5)', '(6)'], {}), '(2, 4, 5, 6)\n', (22324, 22336), True, 'import numpy as np\n'), ((22357, 22418), 'oneflow.tensor', 'flow.tensor', (['np_input'], {'dtype': 'flow.float32', 'requires_grad': '(True)'}), '(np_input, dtype=flow.float32, requires_grad=True)\n', (22368, 22418), True, 'import oneflow as flow\n'), ((23087, 23114), 'numpy.random.randn', 'np.random.randn', (['(2)', '(4)', '(5)', '(6)'], {}), '(2, 4, 5, 6)\n', (23102, 23114), True, 'import numpy as np\n'), ((23135, 23196), 'oneflow.tensor', 'flow.tensor', (['np_input'], {'dtype': 'flow.float32', 'requires_grad': '(True)'}), '(np_input, dtype=flow.float32, requires_grad=True)\n', (23146, 23196), True, 'import oneflow as flow\n'), ((23748, 23775), 'numpy.random.randn', 'np.random.randn', (['(2)', '(4)', '(5)', '(6)'], {}), '(2, 4, 5, 6)\n', (23763, 23775), True, 'import numpy as np\n'), ((23796, 23857), 'oneflow.tensor', 'flow.tensor', (['np_input'], {'dtype': 'flow.float32', 'requires_grad': '(True)'}), '(np_input, dtype=flow.float32, requires_grad=True)\n', (23807, 23857), True, 'import oneflow as flow\n'), ((28350, 28381), 'numpy.ones', 'np.ones', (['(4, 4)'], {'dtype': 'np.int32'}), '((4, 4), dtype=np.int32)\n', (28357, 28381), True, 'import numpy as np\n'), ((28402, 28438), 'oneflow.placement', 'flow.placement', (['"""cuda"""', '[0, 1, 2, 3]'], {}), "('cuda', [0, 1, 2, 3])\n", (28416, 28438), True, 'import oneflow as flow\n'), ((29348, 29384), 'oneflow.env.all_device_placement', 'flow.env.all_device_placement', (['"""cpu"""'], {}), "('cpu')\n", (29377, 29384), True, 'import oneflow as flow\n'), ((29841, 29878), 'oneflow.env.all_device_placement', 'flow.env.all_device_placement', (['"""cuda"""'], {}), "('cuda')\n", (29870, 29878), True, 'import oneflow as flow\n'), ((30490, 30524), 'oneflow.placement', 'flow.placement', (['"""cuda"""', '[[0], [1]]'], {}), "('cuda', [[0], [1]])\n", (30504, 30524), True, 'import oneflow as flow\n'), ((31282, 31322), 'oneflow.placement', 'flow.placement', (['"""cuda"""', '[[0, 1], [2, 3]]'], {}), "('cuda', [[0, 1], [2, 3]])\n", (31296, 31322), True, 'import oneflow as flow\n'), ((2124, 2190), 'numpy.array', 'np.array', (['[[-0.462, 0.3139], [0.3898, -0.7197], [0.0478, -0.1657]]'], {}), '([[-0.462, 0.3139], [0.3898, -0.7197], [0.0478, -0.1657]])\n', (2132, 2190), True, 'import numpy as np\n'), ((2258, 2279), 'numpy.ones', 'np.ones', ([], {'shape': '(3, 2)'}), '(shape=(3, 2))\n', (2265, 2279), True, 'import numpy as np\n'), ((2333, 2367), 'numpy.array', 'np.array', (['[[0, 1], [1, 0], [1, 0]]'], {}), '([[0, 1], [1, 0], [1, 0]])\n', (2341, 2367), True, 'import numpy as np\n'), ((3060, 3083), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (3075, 3083), True, 'import numpy as np\n'), ((3840, 3867), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)', '(4)', '(5)'], {}), '(2, 3, 4, 5)\n', (3855, 3867), True, 'import numpy as np\n'), ((5030, 5045), 'numpy.sqrt', 'np.sqrt', (['np_arr'], {}), '(np_arr)\n', (5037, 5045), True, 'import numpy as np\n'), ((13562, 13627), 'numpy.array', 'np.array', (['[[-4.0, -3.0, -2.0], [-1.0, 0.0, 1.0], [2.0, 3.0, 4.0]]'], {}), '([[-4.0, -3.0, -2.0], [-1.0, 0.0, 1.0], [2.0, 3.0, 4.0]])\n', (13570, 13627), True, 'import numpy as np\n'), ((17007, 17043), 'numpy.random.uniform', 'np.random.uniform', (['(-100)', '(100)', '(5, 5)'], {}), '(-100, 100, (5, 5))\n', (17024, 17043), True, 'import numpy as np\n'), ((16852, 16895), 'oneflow.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (16893, 16895), True, 'import oneflow as flow\n'), ((17650, 17686), 'numpy.random.uniform', 'np.random.uniform', (['(-100)', '(100)', '(5, 5)'], {}), '(-100, 100, (5, 5))\n', (17667, 17686), True, 'import numpy as np\n'), ((17496, 17539), 'oneflow.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (17537, 17539), True, 'import oneflow as flow\n'), ((18922, 18942), 'numpy.triu', 'np.triu', (['x', 'diagonal'], {}), '(x, diagonal)\n', (18929, 18942), True, 'import numpy as np\n'), ((19125, 19149), 'numpy.random.randn', 'np.random.randn', (['(2)', '(4)', '(6)'], {}), '(2, 4, 6)\n', (19140, 19149), True, 'import numpy as np\n'), ((19173, 19234), 'oneflow.tensor', 'flow.tensor', (['np_input'], {'dtype': 'flow.float32', 'requires_grad': '(True)'}), '(np_input, dtype=flow.float32, requires_grad=True)\n', (19184, 19234), True, 'import oneflow as flow\n'), ((20600, 20634), 'numpy.full', 'np.full', (['np_input.shape', 'rand_init'], {}), '(np_input.shape, rand_init)\n', (20607, 20634), True, 'import numpy as np\n'), ((20709, 20752), 'oneflow.tensor', 'flow.tensor', (['rand_scale'], {'dtype': 'flow.float32'}), '(rand_scale, dtype=flow.float32)\n', (20720, 20752), True, 'import oneflow as flow\n'), ((29280, 29305), 'oneflow.tensor', 'flow.tensor', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (29291, 29305), True, 'import oneflow as flow\n'), ((29308, 29327), 'oneflow.env.get_rank', 'flow.env.get_rank', ([], {}), '()\n', (29325, 29327), True, 'import oneflow as flow\n'), ((30450, 30469), 'oneflow.env.get_rank', 'flow.env.get_rank', ([], {}), '()\n', (30467, 30469), True, 'import oneflow as flow\n'), ((31242, 31261), 'oneflow.env.get_rank', 'flow.env.get_rank', ([], {}), '()\n', (31259, 31261), True, 'import oneflow as flow\n'), ((3506, 3529), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (3521, 3529), True, 'import numpy as np\n'), ((17418, 17433), 'numpy.ones', 'np.ones', (['(5, 5)'], {}), '((5, 5))\n', (17425, 17433), True, 'import numpy as np\n'), ((18057, 18072), 'numpy.ones', 'np.ones', (['(5, 5)'], {}), '((5, 5))\n', (18064, 18072), True, 'import numpy as np\n'), ((18182, 18191), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (18188, 18191), True, 'import numpy as np\n'), ((18972, 18987), 'numpy.ones_like', 'np.ones_like', (['x'], {}), '(x)\n', (18984, 18987), True, 'import numpy as np\n'), ((19953, 19971), 'numpy.random.randn', 'np.random.randn', (['(1)'], {}), '(1)\n', (19968, 19971), True, 'import numpy as np\n'), ((20948, 20995), 'numpy.full', 'np.full', (['np_input.shape', '(rand_init + rand_scale)'], {}), '(np_input.shape, rand_init + rand_scale)\n', (20955, 20995), True, 'import numpy as np\n'), ((21433, 21442), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (21439, 21442), True, 'import numpy as np\n'), ((28754, 28787), 'numpy.ones', 'np.ones', (['(1, 4)'], {'dtype': 'np.float32'}), '((1, 4), dtype=np.float32)\n', (28761, 28787), True, 'import numpy as np\n'), ((29438, 29455), 'oneflow.sbp.split', 'flow.sbp.split', (['(0)'], {}), '(0)\n', (29452, 29455), True, 'import oneflow as flow\n'), ((29932, 29949), 'oneflow.sbp.split', 'flow.sbp.split', (['(0)'], {}), '(0)\n', (29946, 29949), True, 'import oneflow as flow\n'), ((30431, 30446), 'numpy.ones', 'np.ones', (['(2, 2)'], {}), '((2, 2))\n', (30438, 30446), True, 'import numpy as np\n'), ((31223, 31238), 'numpy.ones', 'np.ones', (['(2, 2)'], {}), '((2, 2))\n', (31230, 31238), True, 'import numpy as np\n'), ((21232, 21279), 'numpy.full', 'np.full', (['np_input.shape', '(rand_init + rand_scale)'], {}), '(np_input.shape, rand_init + rand_scale)\n', (21239, 21279), True, 'import numpy as np\n'), ((22172, 22182), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (22178, 22182), True, 'import numpy as np\n'), ((23017, 23026), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (23023, 23026), True, 'import numpy as np\n'), ((23636, 23645), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (23642, 23645), True, 'import numpy as np\n'), ((28561, 28578), 'oneflow.sbp.split', 'flow.sbp.split', (['(0)'], {}), '(0)\n', (28575, 28578), True, 'import oneflow as flow\n'), ((28934, 28951), 'oneflow.sbp.split', 'flow.sbp.split', (['(0)'], {}), '(0)\n', (28948, 28951), True, 'import oneflow as flow\n'), ((30592, 30609), 'oneflow.sbp.split', 'flow.sbp.split', (['(0)'], {}), '(0)\n', (30606, 30609), True, 'import oneflow as flow\n'), ((30611, 30628), 'oneflow.sbp.split', 'flow.sbp.split', (['(1)'], {}), '(1)\n', (30625, 30628), True, 'import oneflow as flow\n'), ((30815, 30832), 'oneflow.sbp.split', 'flow.sbp.split', (['(0)'], {}), '(0)\n', (30829, 30832), True, 'import oneflow as flow\n'), ((31391, 31408), 'oneflow.sbp.split', 'flow.sbp.split', (['(0)'], {}), '(0)\n', (31405, 31408), True, 'import oneflow as flow\n'), ((31410, 31427), 'oneflow.sbp.split', 'flow.sbp.split', (['(1)'], {}), '(1)\n', (31424, 31427), True, 'import oneflow as flow\n'), ((31670, 31687), 'oneflow.sbp.split', 'flow.sbp.split', (['(0)'], {}), '(0)\n', (31684, 31687), True, 'import oneflow as flow\n'), ((22934, 22943), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (22940, 22943), True, 'import numpy as np\n'), ((23686, 23695), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (23692, 23695), True, 'import numpy as np\n')] |
from models.pix2pix_model import Pix2PixModel
import oneflow as flow
import oneflow.typing as tp
import numpy as np
from options import BaseOptions
from pre_process import preprocess_input
import cv2
from data.base_method.what_name import Dataset_Help
import util.util as util
opt = BaseOptions().parse()
opt.phase = 'test'
device_type = 'gpu' if opt.gpu_nums > 0 else 'cpu'
# device_type = 'cpu'
if device_type == 'gpu':
flow.config.gpu_device_num(opt.gpu_nums)
flow.env.init()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
# func_config.default_logical_view(flow.scope.consistent_view())
# func_config.default_placement_scope(flow.scope.placement("gpu", "0:2"))
batch = opt.batch_size
label_class_num = opt.label_nc
if not opt.no_instance:
label_class_num+=1
image_channel = opt.input_nc
height, width = opt.my_size_h, opt.my_size_w
pix2pix = Pix2PixModel(opt)
dataset = Dataset_Help(opt)
@flow.global_function('predict', func_config)
def InferenceG(
input_semantics_32: tp.Numpy.Placeholder((opt.batch_size, 37, height//32, width//32), dtype=flow.float),
input_semantics_16: tp.Numpy.Placeholder((opt.batch_size, 37, height//16, width//16), dtype=flow.float),
input_semantics_8: tp.Numpy.Placeholder((opt.batch_size, 37, height//8, width//8), dtype=flow.float),
input_semantics_4: tp.Numpy.Placeholder((opt.batch_size, 37, height//4, width//4), dtype=flow.float),
input_semantics_2: tp.Numpy.Placeholder((opt.batch_size, 37, height//2, width//2), dtype=flow.float),
input_semantics_1: tp.Numpy.Placeholder((opt.batch_size, 37, height, width), dtype=flow.float),
):
# with flow.scope.placement('gpu', '0:2'):
fake_image, kld_loss = pix2pix.generate_fake(input_semantics_32, input_semantics_16, input_semantics_8, input_semantics_4, input_semantics_2, input_semantics_1, None, opt, trainable=False)
return fake_image
def out_scale_semantic(input_semantics):
bs, c, h, w = input_semantics.shape
is_32 = np.zeros((bs, c, h // 32, w // 32))
is_16 = np.zeros((bs, c, h // 16, w // 16))
is_8 = np.zeros((bs, c, h // 8, w // 8))
is_4 = np.zeros((bs, c, h // 4, w // 4))
is_2 = np.zeros((bs, c, h // 2, w // 2))
is_1 = np.zeros((bs, c, h // 1, w // 1))
for b in range(bs): # 遍历 batchsize
for c_ in range(c): # 遍历 通道
is_32[b][c_] = cv2.resize(input_semantics[b, c_, :, :].reshape((h, w, 1)), (w // 32, h // 32))
is_16[b][c_] = cv2.resize(input_semantics[b, c_, :, :].reshape((h, w, 1)), (w // 16, h // 16))
is_8[b][c_] = cv2.resize(input_semantics[b, c_, :, :].reshape((h, w, 1)), (w // 8, h // 8))
is_4[b][c_] = cv2.resize(input_semantics[b, c_, :, :].reshape((h, w, 1)), (w // 4, h // 4))
is_2[b][c_] = cv2.resize(input_semantics[b, c_, :, :].reshape((h, w, 1)), (w // 2, h // 2))
is_1[b][c_] = cv2.resize(input_semantics[b, c_, :, :].reshape((h, w, 1)), (w // 1, h // 1))
return is_32, is_16, is_8, is_4, is_2, is_1
if opt.pre_G_D!='':
flow.load_variables(flow.checkpoint.get(opt.pre_G_D))
print('Load checkpoint G and D success')
for i in range(dataset.lenOfIter_perBatch()):
data_dict = dataset[i]
# image = data_dict['real_image']
label = data_dict['label']
instance = data_dict['instance']
if opt.batch_size != 1:
for b in range(1, opt.batch_size):
data_dict = dataset[i+b]
# image_ = data_dict['real_image']
label_ = data_dict['label']
instance_ = data_dict['instance']
# image = np.concatenate((image, image_), axis=0)
label = np.concatenate((label, label_), axis=0)
instance = np.concatenate((instance, instance_), axis=0)
i = i+b
# data = {'label': label, 'image': image, 'instance': instance}
data = {'label': label, 'instance': instance}
input_semantics, real_image = preprocess_input(data, opt)
is_32, is_16, is_8, is_4, is_2, is_1 = out_scale_semantic(input_semantics)
fake_image = InferenceG(is_32, is_16, is_8, is_4, is_2, is_1).get()
fake = util.tensor2im(fake_image.numpy()[0])
label = util.onehot2label(is_1[0], opt.label_nc)
out = np.concatenate((label, fake), axis=0)
cv2.imwrite('inference'+str(i)+'_.jpg', out)
| [
"oneflow.env.init",
"oneflow.global_function",
"oneflow.typing.Numpy.Placeholder",
"oneflow.checkpoint.get",
"oneflow.config.gpu_device_num",
"oneflow.FunctionConfig"
] | [((469, 484), 'oneflow.env.init', 'flow.env.init', ([], {}), '()\n', (482, 484), True, 'import oneflow as flow\n'), ((499, 520), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (518, 520), True, 'import oneflow as flow\n'), ((889, 906), 'models.pix2pix_model.Pix2PixModel', 'Pix2PixModel', (['opt'], {}), '(opt)\n', (901, 906), False, 'from models.pix2pix_model import Pix2PixModel\n'), ((917, 934), 'data.base_method.what_name.Dataset_Help', 'Dataset_Help', (['opt'], {}), '(opt)\n', (929, 934), False, 'from data.base_method.what_name import Dataset_Help\n'), ((937, 981), 'oneflow.global_function', 'flow.global_function', (['"""predict"""', 'func_config'], {}), "('predict', func_config)\n", (957, 981), True, 'import oneflow as flow\n'), ((427, 467), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['opt.gpu_nums'], {}), '(opt.gpu_nums)\n', (453, 467), True, 'import oneflow as flow\n'), ((1993, 2028), 'numpy.zeros', 'np.zeros', (['(bs, c, h // 32, w // 32)'], {}), '((bs, c, h // 32, w // 32))\n', (2001, 2028), True, 'import numpy as np\n'), ((2041, 2076), 'numpy.zeros', 'np.zeros', (['(bs, c, h // 16, w // 16)'], {}), '((bs, c, h // 16, w // 16))\n', (2049, 2076), True, 'import numpy as np\n'), ((2088, 2121), 'numpy.zeros', 'np.zeros', (['(bs, c, h // 8, w // 8)'], {}), '((bs, c, h // 8, w // 8))\n', (2096, 2121), True, 'import numpy as np\n'), ((2133, 2166), 'numpy.zeros', 'np.zeros', (['(bs, c, h // 4, w // 4)'], {}), '((bs, c, h // 4, w // 4))\n', (2141, 2166), True, 'import numpy as np\n'), ((2178, 2211), 'numpy.zeros', 'np.zeros', (['(bs, c, h // 2, w // 2)'], {}), '((bs, c, h // 2, w // 2))\n', (2186, 2211), True, 'import numpy as np\n'), ((2223, 2256), 'numpy.zeros', 'np.zeros', (['(bs, c, h // 1, w // 1)'], {}), '((bs, c, h // 1, w // 1))\n', (2231, 2256), True, 'import numpy as np\n'), ((3916, 3943), 'pre_process.preprocess_input', 'preprocess_input', (['data', 'opt'], {}), '(data, opt)\n', (3932, 3943), False, 'from pre_process import preprocess_input\n'), ((4158, 4198), 'util.util.onehot2label', 'util.onehot2label', (['is_1[0]', 'opt.label_nc'], {}), '(is_1[0], opt.label_nc)\n', (4175, 4198), True, 'import util.util as util\n'), ((4209, 4246), 'numpy.concatenate', 'np.concatenate', (['(label, fake)'], {'axis': '(0)'}), '((label, fake), axis=0)\n', (4223, 4246), True, 'import numpy as np\n'), ((284, 297), 'options.BaseOptions', 'BaseOptions', ([], {}), '()\n', (295, 297), False, 'from options import BaseOptions\n'), ((1022, 1114), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', (['(opt.batch_size, 37, height // 32, width // 32)'], {'dtype': 'flow.float'}), '((opt.batch_size, 37, height // 32, width // 32), dtype\n =flow.float)\n', (1042, 1114), True, 'import oneflow.typing as tp\n'), ((1131, 1223), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', (['(opt.batch_size, 37, height // 16, width // 16)'], {'dtype': 'flow.float'}), '((opt.batch_size, 37, height // 16, width // 16), dtype\n =flow.float)\n', (1151, 1223), True, 'import oneflow.typing as tp\n'), ((1239, 1329), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', (['(opt.batch_size, 37, height // 8, width // 8)'], {'dtype': 'flow.float'}), '((opt.batch_size, 37, height // 8, width // 8), dtype=\n flow.float)\n', (1259, 1329), True, 'import oneflow.typing as tp\n'), ((1345, 1435), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', (['(opt.batch_size, 37, height // 4, width // 4)'], {'dtype': 'flow.float'}), '((opt.batch_size, 37, height // 4, width // 4), dtype=\n flow.float)\n', (1365, 1435), True, 'import oneflow.typing as tp\n'), ((1451, 1541), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', (['(opt.batch_size, 37, height // 2, width // 2)'], {'dtype': 'flow.float'}), '((opt.batch_size, 37, height // 2, width // 2), dtype=\n flow.float)\n', (1471, 1541), True, 'import oneflow.typing as tp\n'), ((1557, 1632), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', (['(opt.batch_size, 37, height, width)'], {'dtype': 'flow.float'}), '((opt.batch_size, 37, height, width), dtype=flow.float)\n', (1577, 1632), True, 'import oneflow.typing as tp\n'), ((3057, 3089), 'oneflow.checkpoint.get', 'flow.checkpoint.get', (['opt.pre_G_D'], {}), '(opt.pre_G_D)\n', (3076, 3089), True, 'import oneflow as flow\n'), ((3639, 3678), 'numpy.concatenate', 'np.concatenate', (['(label, label_)'], {'axis': '(0)'}), '((label, label_), axis=0)\n', (3653, 3678), True, 'import numpy as np\n'), ((3702, 3747), 'numpy.concatenate', 'np.concatenate', (['(instance, instance_)'], {'axis': '(0)'}), '((instance, instance_), axis=0)\n', (3716, 3747), True, 'import numpy as np\n')] |
r"""
reference to PyTorch
MultiheadAttention in torch.nn.activation
multi_head_attention_forward in torch.nn.functional
"""
from typing import Optional, Tuple
import oneflow as flow
from oneflow import Tensor
from oneflow.nn import Module, Parameter, Linear
from oneflow.nn.init import xavier_uniform_, constant_, xavier_normal_
from .utils import (
_in_projection_packed,
_scaled_dot_product_attention,
linear,
_in_projection,
pad,
)
class MultiheadAttention(Module):
def force_mirrored_forward(self, *args):
pass
__constants__ = ["batch_first"]
bias_k: Optional[Tensor]
bias_v: Optional[Tensor]
def __init__(
self,
embed_dim,
num_heads,
dropout=0.0,
bias=True,
add_bias_kv=False,
add_zero_attn=False,
kdim=None,
vdim=None,
batch_first=False,
device=None,
dtype=None,
) -> None:
# factory_kwargs = {'device': device, 'dtype': dtype}
super(MultiheadAttention, self).__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.batch_first = batch_first
self.head_dim = embed_dim // num_heads
assert (
self.head_dim * num_heads == self.embed_dim
), "embed_dim must be divisible by num_heads"
if self._qkv_same_embed_dim is False:
self.q_proj_weight = Parameter(flow.zeros((embed_dim, embed_dim)))
self.k_proj_weight = Parameter(flow.zeros((embed_dim, self.kdim)))
self.v_proj_weight = Parameter(flow.zeros((embed_dim, self.vdim)))
self.register_parameter("in_proj_weight", None)
else:
self.in_proj_weight = Parameter(flow.zeros((3 * embed_dim, embed_dim)))
self.register_parameter("q_proj_weight", None)
self.register_parameter("k_proj_weight", None)
self.register_parameter("v_proj_weight", None)
if bias:
self.in_proj_bias = Parameter(flow.zeros(3 * embed_dim))
else:
self.register_parameter("in_proj_bias", None)
self.out_proj = Linear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
self.bias_k = Parameter(flow.zeros((1, 1, embed_dim)))
self.bias_v = Parameter(flow.zeros((1, 1, embed_dim)))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self._reset_parameters()
def _reset_parameters(self):
if self._qkv_same_embed_dim:
xavier_uniform_(self.in_proj_weight)
else:
xavier_uniform_(self.q_proj_weight)
xavier_uniform_(self.k_proj_weight)
xavier_uniform_(self.v_proj_weight)
if self.in_proj_bias is not None:
constant_(self.in_proj_bias, 0.0)
constant_(self.out_proj.bias, 0.0)
if self.bias_k is not None:
xavier_normal_(self.bias_k)
if self.bias_v is not None:
xavier_normal_(self.bias_v)
def forward(
self,
query: Tensor,
key: Tensor,
value: Tensor,
key_padding_mask: Optional[Tensor] = None,
need_weights: bool = True,
attn_mask: Optional[Tensor] = None,
) -> Tuple[Tensor, Optional[Tensor]]:
if self.batch_first:
query, key, value = [x.transpose(1, 0) for x in (query, key, value)]
if not self._qkv_same_embed_dim:
attn_output, attn_output_weights = multi_head_attention_forward(
query,
key,
value,
self.embed_dim,
self.num_heads,
self.in_proj_weight,
self.in_proj_bias,
self.bias_k,
self.bias_v,
self.add_zero_attn,
self.dropout,
self.out_proj.weight,
self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask,
need_weights=need_weights,
attn_mask=attn_mask,
use_separate_proj_weight=True,
q_proj_weight=self.q_proj_weight,
k_proj_weight=self.k_proj_weight,
v_proj_weight=self.v_proj_weight,
)
else:
attn_output, attn_output_weights = multi_head_attention_forward(
query,
key,
value,
self.embed_dim,
self.num_heads,
self.in_proj_weight,
self.in_proj_bias,
self.bias_k,
self.bias_v,
self.add_zero_attn,
self.dropout,
self.out_proj.weight,
self.out_proj.bias,
training=self.training,
key_padding_mask=key_padding_mask,
need_weights=need_weights,
attn_mask=attn_mask,
)
if self.batch_first:
return attn_output.transpose(1, 0), attn_output_weights
else:
return attn_output, attn_output_weights
def multi_head_attention_forward(
query: Tensor,
key: Tensor,
value: Tensor,
embed_dim_to_check: int,
num_heads: int,
in_proj_weight: Tensor,
in_proj_bias: Optional[Tensor],
bias_k: Optional[Tensor],
bias_v: Optional[Tensor],
add_zero_attn: bool,
dropout_p: float,
out_proj_weight: Tensor,
out_proj_bias: Optional[Tensor],
training: bool = True,
key_padding_mask: Optional[Tensor] = None,
need_weights: bool = True,
attn_mask: Optional[Tensor] = None,
use_separate_proj_weight: bool = False,
q_proj_weight: Optional[Tensor] = None,
k_proj_weight: Optional[Tensor] = None,
v_proj_weight: Optional[Tensor] = None,
static_k: Optional[Tensor] = None,
static_v: Optional[Tensor] = None,
) -> Tuple[Tensor, Optional[Tensor]]:
# set up shape vars
tgt_len, bsz, embed_dim = query.shape
src_len, _, _ = key.shape
assert (
embed_dim == embed_dim_to_check
), f"was expecting embedding dimension of {embed_dim_to_check}, but got {embed_dim}"
if isinstance(embed_dim, Tensor):
# embed_dim can be a tensor when JIT tracing
head_dim = embed_dim.div(num_heads)
else:
head_dim = embed_dim // num_heads
assert (
head_dim * num_heads == embed_dim
), f"embed_dim {embed_dim} not divisible by num_heads {num_heads}"
if use_separate_proj_weight:
# allow MHA to have different embedding dimensions when separate projection weights are used
assert (
key.shape[:2] == value.shape[:2]
), f"key's sequence and batch dims {key.shape[:2]} do not match value's {value.shape[:2]}"
else:
assert (
key.shape == value.shape
), f"key shape {key.shape} does not match value shape {value.shape}"
#
# compute in-projection
#
if not use_separate_proj_weight:
q, k, v = _in_projection_packed(query, key, value, in_proj_weight, in_proj_bias)
else:
assert (
q_proj_weight is not None
), "use_separate_proj_weight is True but q_proj_weight is None"
assert (
k_proj_weight is not None
), "use_separate_proj_weight is True but k_proj_weight is None"
assert (
v_proj_weight is not None
), "use_separate_proj_weight is True but v_proj_weight is None"
if in_proj_bias is None:
b_q = b_k = b_v = None
else:
b_q, b_k, b_v = in_proj_bias.chunk(3, dim=0)
q, k, v = _in_projection(
query,
key,
value,
q_proj_weight,
k_proj_weight,
v_proj_weight,
b_q,
b_k,
b_v,
)
# prep attention mask
if attn_mask is not None:
# TODO
# assert attn_mask.dtype.is_floating_point or attn_mask.dtype == flow.uint8, \
# f"Only float, byte, and uint8 type are supported for attn_mask, not {attn_mask.dtype}"
# ensure attn_mask's dim is 3
if attn_mask.dim() == 2:
correct_2d_size = (tgt_len, src_len)
if attn_mask.shape != correct_2d_size:
raise RuntimeError(
f"The shape of the 2D attn_mask is {attn_mask.shape}, but should be {correct_2d_size}."
)
attn_mask = attn_mask.unsqueeze(0)
elif attn_mask.dim() == 3:
correct_3d_size = (bsz * num_heads, tgt_len, src_len)
if attn_mask.shape != correct_3d_size:
raise RuntimeError(
f"The shape of the 3D attn_mask is {attn_mask.shape}, but should be {correct_3d_size}."
)
else:
raise RuntimeError(
f"attn_mask's dimension {attn_mask.dim()} is not supported"
)
# add bias along batch dimension (currently second)
if bias_k is not None and bias_v is not None:
assert static_k is None, "bias cannot be added to static key."
assert static_v is None, "bias cannot be added to static value."
k = flow.cat([k, bias_k.repeat((1, bsz, 1))])
v = flow.cat([v, bias_v.repeat((1, bsz, 1))])
if attn_mask is not None:
attn_mask = pad(attn_mask, (0, 1, 0, 0))
if key_padding_mask is not None:
key_padding_mask = pad(key_padding_mask, (0, 1, 0, 0))
else:
assert bias_k is None
assert bias_v is None
#
# reshape q, k, v for multihead attention and make em batch first
#
# replace torch.contiguous with reshape
q = q.reshape(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
if static_k is None:
k = k.reshape(-1, bsz * num_heads, head_dim).transpose(0, 1)
else:
assert (
static_k.size(0) == bsz * num_heads
), f"expecting static_k.size(0) of {bsz * num_heads}, but got {static_k.size(0)}"
assert (
static_k.size(2) == head_dim
), f"expecting static_k.size(2) of {head_dim}, but got {static_k.size(2)}"
k = static_k
if static_v is None:
v = v.reshape(-1, bsz * num_heads, head_dim).transpose(0, 1)
else:
assert (
static_v.size(0) == bsz * num_heads
), f"expecting static_v.size(0) of {bsz * num_heads}, but got {static_v.size(0)}"
assert (
static_v.size(2) == head_dim
), f"expecting static_v.size(2) of {head_dim}, but got {static_v.size(2)}"
v = static_v
# add zero attention along batch dimension (now first)
if add_zero_attn:
zero_attn_shape = (bsz * num_heads, 1, head_dim)
k = flow.cat(
[k, flow.zeros(zero_attn_shape, dtype=k.dtype, device=k.device)], dim=1
)
v = flow.cat(
[v, flow.zeros(zero_attn_shape, dtype=v.dtype, device=v.device)], dim=1
)
if attn_mask is not None:
attn_mask = pad(attn_mask, (0, 1, 0, 0))
if key_padding_mask is not None:
key_padding_mask = pad(key_padding_mask, (0, 1, 0, 0))
# update source sequence length after adjustments
src_len = k.size(1)
# merge key padding and attention masks
if key_padding_mask is not None:
assert key_padding_mask.shape == (
bsz,
src_len,
), f"expecting key_padding_mask shape of {(bsz, src_len)}, but got {key_padding_mask.shape}"
key_padding_mask = (
key_padding_mask.reshape(bsz, 1, 1, src_len)
.expand(-1, num_heads, tgt_len, -1)
.reshape(bsz * num_heads, tgt_len, src_len)
)
if attn_mask is not None:
attn_mask = attn_mask.expand(bsz * num_heads, -1, -1)
if attn_mask is None:
attn_mask = key_padding_mask
elif attn_mask.dtype == flow.int32:
attn_mask = flow.logical_or(attn_mask, key_padding_mask)
else:
attn_mask = attn_mask.masked_fill(key_padding_mask, float("-inf"))
# convert mask to float
if attn_mask is not None and attn_mask.dtype == flow.int32:
new_attn_mask = flow.zeros_like(attn_mask).to(flow.float)
new_attn_mask = new_attn_mask.masked_fill(attn_mask, float("-inf"))
attn_mask = new_attn_mask
# adjust dropout probability
if not training:
dropout_p = 0.0
#
# (deep breath) calculate attention and out projection
#
attn_output, attn_output_weights = _scaled_dot_product_attention(
q, k, v, attn_mask, dropout_p
)
attn_output = attn_output.transpose(0, 1).reshape(tgt_len, bsz, embed_dim)
attn_output = linear(attn_output, out_proj_weight, out_proj_bias)
if need_weights:
# average attention weights over heads
attn_output_weights = attn_output_weights.reshape(
bsz, num_heads, tgt_len, src_len
)
return attn_output, attn_output_weights.sum(dim=1) / num_heads
else:
return attn_output, None
| [
"oneflow.nn.init.xavier_normal_",
"oneflow.nn.Linear",
"oneflow.logical_or",
"oneflow.zeros",
"oneflow.nn.init.constant_",
"oneflow.zeros_like",
"oneflow.nn.init.xavier_uniform_"
] | [((2387, 2426), 'oneflow.nn.Linear', 'Linear', (['embed_dim', 'embed_dim'], {'bias': 'bias'}), '(embed_dim, embed_dim, bias=bias)\n', (2393, 2426), False, 'from oneflow.nn import Module, Parameter, Linear\n'), ((2806, 2842), 'oneflow.nn.init.xavier_uniform_', 'xavier_uniform_', (['self.in_proj_weight'], {}), '(self.in_proj_weight)\n', (2821, 2842), False, 'from oneflow.nn.init import xavier_uniform_, constant_, xavier_normal_\n'), ((2869, 2904), 'oneflow.nn.init.xavier_uniform_', 'xavier_uniform_', (['self.q_proj_weight'], {}), '(self.q_proj_weight)\n', (2884, 2904), False, 'from oneflow.nn.init import xavier_uniform_, constant_, xavier_normal_\n'), ((2917, 2952), 'oneflow.nn.init.xavier_uniform_', 'xavier_uniform_', (['self.k_proj_weight'], {}), '(self.k_proj_weight)\n', (2932, 2952), False, 'from oneflow.nn.init import xavier_uniform_, constant_, xavier_normal_\n'), ((2965, 3000), 'oneflow.nn.init.xavier_uniform_', 'xavier_uniform_', (['self.v_proj_weight'], {}), '(self.v_proj_weight)\n', (2980, 3000), False, 'from oneflow.nn.init import xavier_uniform_, constant_, xavier_normal_\n'), ((3055, 3088), 'oneflow.nn.init.constant_', 'constant_', (['self.in_proj_bias', '(0.0)'], {}), '(self.in_proj_bias, 0.0)\n', (3064, 3088), False, 'from oneflow.nn.init import xavier_uniform_, constant_, xavier_normal_\n'), ((3101, 3135), 'oneflow.nn.init.constant_', 'constant_', (['self.out_proj.bias', '(0.0)'], {}), '(self.out_proj.bias, 0.0)\n', (3110, 3135), False, 'from oneflow.nn.init import xavier_uniform_, constant_, xavier_normal_\n'), ((3184, 3211), 'oneflow.nn.init.xavier_normal_', 'xavier_normal_', (['self.bias_k'], {}), '(self.bias_k)\n', (3198, 3211), False, 'from oneflow.nn.init import xavier_uniform_, constant_, xavier_normal_\n'), ((3260, 3287), 'oneflow.nn.init.xavier_normal_', 'xavier_normal_', (['self.bias_v'], {}), '(self.bias_v)\n', (3274, 3287), False, 'from oneflow.nn.init import xavier_uniform_, constant_, xavier_normal_\n'), ((1675, 1709), 'oneflow.zeros', 'flow.zeros', (['(embed_dim, embed_dim)'], {}), '((embed_dim, embed_dim))\n', (1685, 1709), True, 'import oneflow as flow\n'), ((1754, 1788), 'oneflow.zeros', 'flow.zeros', (['(embed_dim, self.kdim)'], {}), '((embed_dim, self.kdim))\n', (1764, 1788), True, 'import oneflow as flow\n'), ((1833, 1867), 'oneflow.zeros', 'flow.zeros', (['(embed_dim, self.vdim)'], {}), '((embed_dim, self.vdim))\n', (1843, 1867), True, 'import oneflow as flow\n'), ((1987, 2025), 'oneflow.zeros', 'flow.zeros', (['(3 * embed_dim, embed_dim)'], {}), '((3 * embed_dim, embed_dim))\n', (1997, 2025), True, 'import oneflow as flow\n'), ((2264, 2289), 'oneflow.zeros', 'flow.zeros', (['(3 * embed_dim)'], {}), '(3 * embed_dim)\n', (2274, 2289), True, 'import oneflow as flow\n'), ((2488, 2517), 'oneflow.zeros', 'flow.zeros', (['(1, 1, embed_dim)'], {}), '((1, 1, embed_dim))\n', (2498, 2517), True, 'import oneflow as flow\n'), ((2555, 2584), 'oneflow.zeros', 'flow.zeros', (['(1, 1, embed_dim)'], {}), '((1, 1, embed_dim))\n', (2565, 2584), True, 'import oneflow as flow\n'), ((11069, 11128), 'oneflow.zeros', 'flow.zeros', (['zero_attn_shape'], {'dtype': 'k.dtype', 'device': 'k.device'}), '(zero_attn_shape, dtype=k.dtype, device=k.device)\n', (11079, 11128), True, 'import oneflow as flow\n'), ((11185, 11244), 'oneflow.zeros', 'flow.zeros', (['zero_attn_shape'], {'dtype': 'v.dtype', 'device': 'v.device'}), '(zero_attn_shape, dtype=v.dtype, device=v.device)\n', (11195, 11244), True, 'import oneflow as flow\n'), ((12240, 12284), 'oneflow.logical_or', 'flow.logical_or', (['attn_mask', 'key_padding_mask'], {}), '(attn_mask, key_padding_mask)\n', (12255, 12284), True, 'import oneflow as flow\n'), ((12495, 12521), 'oneflow.zeros_like', 'flow.zeros_like', (['attn_mask'], {}), '(attn_mask)\n', (12510, 12521), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import oneflow as of
@flow.global_function
def variable_scope_test_job_1(a=of.FixedTensorDef((1, 3, 6, 6))):
with of.scope.namespace("job1_scope1"):
convw = of.get_variable(
"conv_weight",
shape=(5, 3, 3, 3),
dtype=a.dtype,
initializer=of.random_uniform_initializer(),
trainable=True,
)
conv = of.nn.conv2d(a, convw, 1, "SAME", None, "NCHW", name="conv")
with of.scope.namespace("job1_scope2"):
fcw = of.get_variable(
"fc_weight",
shape=(180, 10),
dtype=a.dtype,
initializer=of.random_uniform_initializer(),
trainable=True,
)
fc = of.matmul(of.reshape(conv, (conv.shape[0], -1)), fcw, name="fc")
fcb = of.get_variable(
"fc_bias",
shape=(10,),
dtype=a.dtype,
initializer=of.constant_initializer(1.0),
trainable=True,
)
fc_bias = of.nn.bias_add(fc, fcb)
fcw2 = of.get_variable(
"fc2_weight",
shape=(10, 20),
dtype=a.dtype,
initializer=of.random_uniform_initializer(),
trainable=True,
)
fc2 = of.matmul(fc_bias, fcw2, name="fc2")
print("conv_weight op name: ", convw.op_name)
print("conv op name: ", conv.op_name)
print("fc_weight op name: ", fcw.op_name)
print("fc_bias op name: ", fcb.op_name)
print("fc op name: ", fc.op_name)
print("fc2_weight op name: ", fcw2.op_name)
print("fc2 op name: ", fc2.op_name)
return fc2
@flow.global_function
def variable_scope_test_job_2(a=of.FixedTensorDef((2, 5))):
with of.scope.namespace("job2_scope1"):
indices = of.get_variable(
"gather_inds",
shape=(2,),
dtype=of.int32,
initializer=of.constant_initializer(1),
trainable=False,
)
output = of.gather(a, indices, axis=1)
print("indices op name: ", indices.op_name)
print("gather op name: ", output.op_name)
return output
a1 = np.random.rand(1, 3, 6, 6).astype(np.float32)
a2 = np.arange(10, dtype=np.float32).reshape(2, 5)
ret1 = variable_scope_test_job_1.run(a1).get()
ret2 = variable_scope_test_job_2(a2).get()
print("Job1 result: ")
print(ret1)
print("shape: ", ret1.shape)
print("\n")
print("Job2 result: ")
print(ret2)
print("shape: ", ret2.shape)
| [
"oneflow.scope.namespace",
"oneflow.nn.conv2d",
"oneflow.gather",
"oneflow.constant_initializer",
"oneflow.random_uniform_initializer",
"oneflow.FixedTensorDef",
"oneflow.nn.bias_add",
"oneflow.reshape",
"oneflow.matmul"
] | [((686, 717), 'oneflow.FixedTensorDef', 'of.FixedTensorDef', (['(1, 3, 6, 6)'], {}), '((1, 3, 6, 6))\n', (703, 717), True, 'import oneflow as of\n'), ((2333, 2358), 'oneflow.FixedTensorDef', 'of.FixedTensorDef', (['(2, 5)'], {}), '((2, 5))\n', (2350, 2358), True, 'import oneflow as of\n'), ((729, 762), 'oneflow.scope.namespace', 'of.scope.namespace', (['"""job1_scope1"""'], {}), "('job1_scope1')\n", (747, 762), True, 'import oneflow as of\n'), ((993, 1053), 'oneflow.nn.conv2d', 'of.nn.conv2d', (['a', 'convw', '(1)', '"""SAME"""', 'None', '"""NCHW"""'], {'name': '"""conv"""'}), "(a, convw, 1, 'SAME', None, 'NCHW', name='conv')\n", (1005, 1053), True, 'import oneflow as of\n'), ((1915, 1951), 'oneflow.matmul', 'of.matmul', (['fc_bias', 'fcw2'], {'name': '"""fc2"""'}), "(fc_bias, fcw2, name='fc2')\n", (1924, 1951), True, 'import oneflow as of\n'), ((2370, 2403), 'oneflow.scope.namespace', 'of.scope.namespace', (['"""job2_scope1"""'], {}), "('job2_scope1')\n", (2388, 2403), True, 'import oneflow as of\n'), ((2627, 2656), 'oneflow.gather', 'of.gather', (['a', 'indices'], {'axis': '(1)'}), '(a, indices, axis=1)\n', (2636, 2656), True, 'import oneflow as of\n'), ((2777, 2803), 'numpy.random.rand', 'np.random.rand', (['(1)', '(3)', '(6)', '(6)'], {}), '(1, 3, 6, 6)\n', (2791, 2803), True, 'import numpy as np\n'), ((2828, 2859), 'numpy.arange', 'np.arange', (['(10)'], {'dtype': 'np.float32'}), '(10, dtype=np.float32)\n', (2837, 2859), True, 'import numpy as np\n'), ((1068, 1101), 'oneflow.scope.namespace', 'of.scope.namespace', (['"""job1_scope2"""'], {}), "('job1_scope2')\n", (1086, 1101), True, 'import oneflow as of\n'), ((1668, 1691), 'oneflow.nn.bias_add', 'of.nn.bias_add', (['fc', 'fcb'], {}), '(fc, fcb)\n', (1682, 1691), True, 'import oneflow as of\n'), ((907, 938), 'oneflow.random_uniform_initializer', 'of.random_uniform_initializer', ([], {}), '()\n', (936, 938), True, 'import oneflow as of\n'), ((1365, 1402), 'oneflow.reshape', 'of.reshape', (['conv', '(conv.shape[0], -1)'], {}), '(conv, (conv.shape[0], -1))\n', (1375, 1402), True, 'import oneflow as of\n'), ((1830, 1861), 'oneflow.random_uniform_initializer', 'of.random_uniform_initializer', ([], {}), '()\n', (1859, 1861), True, 'import oneflow as of\n'), ((2543, 2569), 'oneflow.constant_initializer', 'of.constant_initializer', (['(1)'], {}), '(1)\n', (2566, 2569), True, 'import oneflow as of\n'), ((1259, 1290), 'oneflow.random_uniform_initializer', 'of.random_uniform_initializer', ([], {}), '()\n', (1288, 1290), True, 'import oneflow as of\n'), ((1570, 1598), 'oneflow.constant_initializer', 'of.constant_initializer', (['(1.0)'], {}), '(1.0)\n', (1593, 1598), True, 'import oneflow as of\n')] |
import argparse
import oneflow as flow
from classifier_flow import ClueAFQMCCPT
from tokenizer.tokenization_bert import BertTokenizer
def inference_afqmc(args):
tokenizer = BertTokenizer.from_pretrained("hfl/chinese-roberta-wwm-ext")
model = ClueAFQMCCPT(args.pretrain_dir, args.n_classes, args.is_train).to(
args.device
)
vec = tokenizer(args.text1, args.text2)
input_ids = vec["input_ids"]
attention_mask = vec["attention_mask"]
input_ids = flow.tensor(input_ids, dtype=flow.int32).reshape(1, -1).to(args.device)
attention_mask = (
flow.tensor(attention_mask, dtype=flow.int32).reshape(1, -1).to(args.device)
)
model.load_state_dict(flow.load(args.model_load_dir))
model.eval()
output = model(input_ids, attention_mask)
output = flow.softmax(output)
label = flow.argmax(output)
print("Softmax output:", output.numpy())
print("Predict:", label.numpy())
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--pretrain_dir",
type=str,
default="/remote-home/share/shxing/cpt_pretrain_oneflow/cpt-base",
)
parser.add_argument("--model_load_dir", type=str, default="cpt_pretrain_afqmc")
parser.add_argument("--text1", type=str, default="双十一花呗提额在哪")
parser.add_argument("--text2", type=str, default="里可以提花呗额度")
parser.add_argument("--task", type=str, default="afqmc")
parser.add_argument("--cuda", action="store_true")
args = parser.parse_args()
args.is_train = False
args.device = "cuda" if args.cuda else "cpu"
if args.task == "afqmc":
args.n_classes = 2
inference_afqmc(args)
else:
raise NotImplementedError
| [
"oneflow.argmax",
"oneflow.tensor",
"oneflow.softmax",
"oneflow.load"
] | [((182, 242), 'tokenizer.tokenization_bert.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['"""hfl/chinese-roberta-wwm-ext"""'], {}), "('hfl/chinese-roberta-wwm-ext')\n", (211, 242), False, 'from tokenizer.tokenization_bert import BertTokenizer\n'), ((806, 826), 'oneflow.softmax', 'flow.softmax', (['output'], {}), '(output)\n', (818, 826), True, 'import oneflow as flow\n'), ((839, 858), 'oneflow.argmax', 'flow.argmax', (['output'], {}), '(output)\n', (850, 858), True, 'import oneflow as flow\n'), ((983, 1008), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1006, 1008), False, 'import argparse\n'), ((698, 728), 'oneflow.load', 'flow.load', (['args.model_load_dir'], {}), '(args.model_load_dir)\n', (707, 728), True, 'import oneflow as flow\n'), ((255, 317), 'classifier_flow.ClueAFQMCCPT', 'ClueAFQMCCPT', (['args.pretrain_dir', 'args.n_classes', 'args.is_train'], {}), '(args.pretrain_dir, args.n_classes, args.is_train)\n', (267, 317), False, 'from classifier_flow import ClueAFQMCCPT\n'), ((485, 525), 'oneflow.tensor', 'flow.tensor', (['input_ids'], {'dtype': 'flow.int32'}), '(input_ids, dtype=flow.int32)\n', (496, 525), True, 'import oneflow as flow\n'), ((588, 633), 'oneflow.tensor', 'flow.tensor', (['attention_mask'], {'dtype': 'flow.int32'}), '(attention_mask, dtype=flow.int32)\n', (599, 633), True, 'import oneflow as flow\n')] |
# !/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Copyright 2020 Tianshu AI Platform. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=============================================================
"""
import logging
import json
import os
import struct
import cv2
import sched
import numpy as np
import oneflow.core.record.record_pb2 as of_record
import time
from abc import ABC
from program.abstract.algorithm import Algorithm
schedule = sched.scheduler(time.time, time.sleep)
delayId = ""
basePath = '/nfs/'
descPath = 'ofrecord/train'
class ImageCoder(object):
"""Helper class that provides image coding utilities."""
def __init__(self, size=None):
self.size = size
def _resize(self, image_data):
if self.size is not None and image_data.shape[:2] != self.size:
return cv2.resize(image_data, self.size)
return image_data
def image_to_jpeg(self, image_data):
image_data = cv2.imdecode(np.frombuffer(image_data, np.uint8), 1)
image_data = self._resize(image_data)
return cv2.imencode(".jpg", image_data)[1].tobytes(
), image_data.shape[0], image_data.shape[1]
class Ofrecord(Algorithm, ABC):
def __init__(self):
pass
def execute(task):
return Ofrecord.start_ofrecord(task)
def start_ofrecord(jsonStr):
label_map = {}
index = 0
for item in jsonStr["datasetLabels"].keys():
if index >= 0 and item != '@type':
label_map[item] = jsonStr["datasetLabels"][item]
index += 1
Ofrecord.executor(os.path.join(basePath, jsonStr["datasetPath"]),
os.path.join(basePath, jsonStr["datasetPath"], descPath),
label_map,
jsonStr["files"],
jsonStr["partNum"])
result = True
finish_data = {"reTaskId": jsonStr["reTaskId"]}
return finish_data, result
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
with open(filename, 'rb') as f:
image_data = f.read()
image_data, height, width = coder.image_to_jpeg(image_data)
return image_data, height, width
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
return of_record.Feature(bytes_list=of_record.BytesList(value=[value]))
def dense_to_one_hot(labels_dense, num_classes):
"""Convert class labels from scalars to one-hot vectors."""
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
def extract_img_label(names, path):
"""Extract the images and labels into np array [index].
Args:
f: A file object that contain images and annotations.
Returns:
data: A 4D uint8 np array [index, h, w, depth].
labels: a 1D uint8 np array.
num_img: the number of images.
"""
train_img = os.path.join(path, 'origin/')
train_label = os.path.join(path, 'annotation/')
num_imgs = len(names)
data = []
labels = []
print('^^^^^^^^^^ start img_set for sycle')
for i in names:
name = os.path.splitext(i)[0]
print(name)
coder = ImageCoder((224, 224))
image_buffer, height, width = Ofrecord._process_image(
os.path.join(train_img, i), coder)
data += [image_buffer]
if os.path.exists(os.path.join(train_label, name)):
with open(os.path.join(train_label, name), "r", encoding='utf-8') as jsonFile:
la = json.load(jsonFile)
if la:
labels += [la[0]['category_id']]
else:
data.pop()
num_imgs -= 1
else:
print('File is not found')
print('^^^^^^^^^ img_set for end')
data = np.array(data)
labels = np.array(labels)
print(data.shape, labels.shape)
return num_imgs, data, labels
def executor(src_path, desc, label_map, files, part_id):
"""Execute ofrecord task method."""
global delayId
logging.info(part_id)
num_imgs, images, labels = Ofrecord.extract_img_label(files, src_path)
keys = sorted(list(map(int, label_map.keys())))
label_map_new = {}
for i in range(len(keys)):
label_map_new[label_map[str(keys[i])]] = i
if not num_imgs:
return False, 0, 0
try:
os.makedirs(desc)
except Exception as e:
print('{} exists.'.format(desc))
filename = 'part-{}'.format(part_id)
filename = os.path.join(desc, filename)
f = open(filename, 'wb')
print(filename)
for i in range(num_imgs):
img = images[i]
label = label_map_new[str(labels[i])]
sample = of_record.OFRecord(feature={
'class/label': of_record.Feature(int32_list=of_record.Int32List(value=[label])),
'encoded': Ofrecord._bytes_feature(img)
})
size = sample.ByteSize()
f.write(struct.pack("q", size))
f.write(sample.SerializeToString())
if f:
f.close()
| [
"oneflow.core.record.record_pb2.Int32List",
"oneflow.core.record.record_pb2.BytesList"
] | [((939, 977), 'sched.scheduler', 'sched.scheduler', (['time.time', 'time.sleep'], {}), '(time.time, time.sleep)\n', (954, 977), False, 'import sched\n'), ((3577, 3612), 'numpy.zeros', 'np.zeros', (['(num_labels, num_classes)'], {}), '((num_labels, num_classes))\n', (3585, 3612), True, 'import numpy as np\n'), ((4082, 4111), 'os.path.join', 'os.path.join', (['path', '"""origin/"""'], {}), "(path, 'origin/')\n", (4094, 4111), False, 'import os\n'), ((4134, 4167), 'os.path.join', 'os.path.join', (['path', '"""annotation/"""'], {}), "(path, 'annotation/')\n", (4146, 4167), False, 'import os\n'), ((5083, 5097), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (5091, 5097), True, 'import numpy as np\n'), ((5115, 5131), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (5123, 5131), True, 'import numpy as np\n'), ((5347, 5368), 'logging.info', 'logging.info', (['part_id'], {}), '(part_id)\n', (5359, 5368), False, 'import logging\n'), ((5860, 5888), 'os.path.join', 'os.path.join', (['desc', 'filename'], {}), '(desc, filename)\n', (5872, 5888), False, 'import os\n'), ((1316, 1349), 'cv2.resize', 'cv2.resize', (['image_data', 'self.size'], {}), '(image_data, self.size)\n', (1326, 1349), False, 'import cv2\n'), ((1452, 1487), 'numpy.frombuffer', 'np.frombuffer', (['image_data', 'np.uint8'], {}), '(image_data, np.uint8)\n', (1465, 1487), True, 'import numpy as np\n'), ((2080, 2126), 'os.path.join', 'os.path.join', (['basePath', "jsonStr['datasetPath']"], {}), "(basePath, jsonStr['datasetPath'])\n", (2092, 2126), False, 'import os\n'), ((2153, 2209), 'os.path.join', 'os.path.join', (['basePath', "jsonStr['datasetPath']", 'descPath'], {}), "(basePath, jsonStr['datasetPath'], descPath)\n", (2165, 2209), False, 'import os\n'), ((3516, 3537), 'numpy.arange', 'np.arange', (['num_labels'], {}), '(num_labels)\n', (3525, 3537), True, 'import numpy as np\n'), ((5702, 5719), 'os.makedirs', 'os.makedirs', (['desc'], {}), '(desc)\n', (5713, 5719), False, 'import os\n'), ((3292, 3326), 'oneflow.core.record.record_pb2.BytesList', 'of_record.BytesList', ([], {'value': '[value]'}), '(value=[value])\n', (3311, 3326), True, 'import oneflow.core.record.record_pb2 as of_record\n'), ((4331, 4350), 'os.path.splitext', 'os.path.splitext', (['i'], {}), '(i)\n', (4347, 4350), False, 'import os\n'), ((4504, 4530), 'os.path.join', 'os.path.join', (['train_img', 'i'], {}), '(train_img, i)\n', (4516, 4530), False, 'import os\n'), ((4606, 4637), 'os.path.join', 'os.path.join', (['train_label', 'name'], {}), '(train_label, name)\n', (4618, 4637), False, 'import os\n'), ((6333, 6355), 'struct.pack', 'struct.pack', (['"""q"""', 'size'], {}), "('q', size)\n", (6344, 6355), False, 'import struct\n'), ((4761, 4780), 'json.load', 'json.load', (['jsonFile'], {}), '(jsonFile)\n', (4770, 4780), False, 'import json\n'), ((1553, 1585), 'cv2.imencode', 'cv2.imencode', (['""".jpg"""', 'image_data'], {}), "('.jpg', image_data)\n", (1565, 1585), False, 'import cv2\n'), ((4667, 4698), 'os.path.join', 'os.path.join', (['train_label', 'name'], {}), '(train_label, name)\n', (4679, 4698), False, 'import os\n'), ((6168, 6202), 'oneflow.core.record.record_pb2.Int32List', 'of_record.Int32List', ([], {'value': '[label]'}), '(value=[label])\n', (6187, 6202), True, 'import oneflow.core.record.record_pb2 as of_record\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import os
from collections import OrderedDict
import numpy as np
import oneflow as flow
import oneflow.typing as tp
from oneflow.python.test.ops.test_util import GenArgList
def compare_with_np(device_type, x_shape, y_shape, axis):
def _np_concat(x, y):
return np.concatenate((x, y), axis)
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float32)
func_config.default_placement_scope(flow.scope.placement(device_type, "0:0"))
flow.config.enable_legacy_model_io(True)
@flow.global_function(type="predict", function_config=func_config)
def ConcatJob(
x: tp.Numpy.Placeholder(shape=x_shape, dtype=flow.float32),
y: tp.Numpy.Placeholder(shape=y_shape, dtype=flow.float32),
) -> tp.Numpy:
x_var = flow.get_variable(
"x",
shape=x_shape,
dtype=flow.float32,
initializer=flow.constant_initializer(0),
trainable=False,
)
x_var = x_var + x
y_var = flow.get_variable(
"y",
shape=y_shape,
dtype=flow.float32,
initializer=flow.constant_initializer(0),
trainable=False,
)
y_var = y_var + y
out = flow.concat([x_var, y_var], axis)
return out
check_point = flow.train.CheckPoint()
check_point.init()
data_x = np.random.random(size=x_shape)
data_y = np.random.random(size=y_shape)
of_out = ConcatJob(data_x, data_y)
np_out = _np_concat(data_x, data_y)
assert np.allclose(of_out, np_out, rtol=1e-4, atol=1e-4)
@flow.unittest.skip_unless_1n1d()
class TestTranspose(flow.unittest.TestCase):
def test_transpose(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["cambricon"]
arg_dict["x_shape"] = [(10, 20, 30, 40)]
arg_dict["y_shape"] = [(10, 20, 30, 40)]
arg_dict["axis"] = [0, 1, 2, 3]
for arg in GenArgList(arg_dict):
compare_with_np(*arg)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.unittest.skip_unless_1n1d",
"oneflow.clear_default_session",
"oneflow.global_function",
"oneflow.train.CheckPoint",
"oneflow.python.test.ops.test_util.GenArgList",
"oneflow.scope.placement",
"oneflow.concat",
"oneflow.typing.Numpy.Placeholder",
"oneflow.constant_initializer",
"oneflow.con... | [((2818, 2850), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (2848, 2850), True, 'import oneflow as flow\n'), ((1502, 1530), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (1528, 1530), True, 'import oneflow as flow\n'), ((1549, 1570), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (1568, 1570), True, 'import oneflow as flow\n'), ((1705, 1745), 'oneflow.config.enable_legacy_model_io', 'flow.config.enable_legacy_model_io', (['(True)'], {}), '(True)\n', (1739, 1745), True, 'import oneflow as flow\n'), ((1752, 1817), 'oneflow.global_function', 'flow.global_function', ([], {'type': '"""predict"""', 'function_config': 'func_config'}), "(type='predict', function_config=func_config)\n", (1772, 1817), True, 'import oneflow as flow\n'), ((2540, 2563), 'oneflow.train.CheckPoint', 'flow.train.CheckPoint', ([], {}), '()\n', (2561, 2563), True, 'import oneflow as flow\n'), ((2600, 2630), 'numpy.random.random', 'np.random.random', ([], {'size': 'x_shape'}), '(size=x_shape)\n', (2616, 2630), True, 'import numpy as np\n'), ((2644, 2674), 'numpy.random.random', 'np.random.random', ([], {'size': 'y_shape'}), '(size=y_shape)\n', (2660, 2674), True, 'import numpy as np\n'), ((2765, 2818), 'numpy.allclose', 'np.allclose', (['of_out', 'np_out'], {'rtol': '(0.0001)', 'atol': '(0.0001)'}), '(of_out, np_out, rtol=0.0001, atol=0.0001)\n', (2776, 2818), True, 'import numpy as np\n'), ((3258, 3273), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3271, 3273), False, 'import unittest\n'), ((1468, 1496), 'numpy.concatenate', 'np.concatenate', (['(x, y)', 'axis'], {}), '((x, y), axis)\n', (1482, 1496), True, 'import numpy as np\n'), ((1659, 1699), 'oneflow.scope.placement', 'flow.scope.placement', (['device_type', '"""0:0"""'], {}), "(device_type, '0:0')\n", (1679, 1699), True, 'import oneflow as flow\n'), ((2468, 2501), 'oneflow.concat', 'flow.concat', (['[x_var, y_var]', 'axis'], {}), '([x_var, y_var], axis)\n', (2479, 2501), True, 'import oneflow as flow\n'), ((2950, 2963), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2961, 2963), False, 'from collections import OrderedDict\n'), ((3169, 3189), 'oneflow.python.test.ops.test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (3179, 3189), False, 'from oneflow.python.test.ops.test_util import GenArgList\n'), ((1848, 1903), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', ([], {'shape': 'x_shape', 'dtype': 'flow.float32'}), '(shape=x_shape, dtype=flow.float32)\n', (1868, 1903), True, 'import oneflow.typing as tp\n'), ((1916, 1971), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', ([], {'shape': 'y_shape', 'dtype': 'flow.float32'}), '(shape=y_shape, dtype=flow.float32)\n', (1936, 1971), True, 'import oneflow.typing as tp\n'), ((2127, 2155), 'oneflow.constant_initializer', 'flow.constant_initializer', (['(0)'], {}), '(0)\n', (2152, 2155), True, 'import oneflow as flow\n'), ((2358, 2386), 'oneflow.constant_initializer', 'flow.constant_initializer', (['(0)'], {}), '(0)\n', (2383, 2386), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow.python.framework.interpret_util as interpret_util
import oneflow.python.framework.remote_blob as remote_blob_util
import oneflow.python.framework.c_api_util as c_api_util
import oneflow.python.framework.compile_context as compile_context
import oneflow.python.framework.distribute as distribute
import oneflow.python.framework.hob as hob
import oneflow.python.framework.remote_blob as remote_blob_util
import oneflow.python.lib.core.enable_if as enable_if
import oneflow.core.operator.op_conf_pb2 as op_conf_util
import oneflow.core.framework.user_op_attr_pb2 as attr_value_pb
import oneflow.core.register.logical_blob_id_pb2 as logical_blob_id_util
import oneflow.core.common.shape_pb2 as shape_util
import oneflow
from oneflow.python.oneflow_export import oneflow_export
import oneflow.python.framework.hob as hob
import oneflow.python.experimental.name_scope as name_scope
import oneflow.core.eager.eager_symbol_pb2 as eager_symbol_util
import oneflow.python.eager.eager_blob_util as eager_blob_util
import oneflow.python.lib.core.enable_if as enable_if
import random
import oneflow.python.eager.gradient_util as gradient_util
import oneflow as flow
import oneflow_api
import traceback
blob_register = oneflow_api.GetDefaultBlobRegister()
class UserOp(object):
def __init__(self, op_name, op_type_name=None):
self.op_conf_ = op_conf_util.OperatorConf()
self.op_conf_.name = op_name
if op_type_name is not None:
self.op_conf_.user_conf.op_type_name = op_type_name
device_tag = oneflow.current_scope().device_parallel_desc_symbol.device_tag
self.op_conf_.device_tag = device_tag
self.output_arg_key_list_ = []
@property
def op_conf(self):
return self.op_conf_
def InferAndTryRun(self):
raise NotImplementedError
def MakeRemoteBlob(self, lbi):
raise NotImplementedError
def RemoteBlobList(self):
remote_blob_list = []
for k in self.op_conf_.user_conf.output:
if k not in self.output_arg_key_list_:
raise ValueError(
"output_arg_name {} of {} op is not set in python op builder".format(
k, self.op_conf_.name
)
)
for output_arg_name in self.output_arg_key_list_:
assert output_arg_name in self.op_conf_.user_conf.output
for i in range(len(self.op_conf_.user_conf.output[output_arg_name].s)):
lbi = logical_blob_id_util.LogicalBlobId()
lbi.op_name = self.op_conf_.name
lbi.blob_name = "{}_{}".format(output_arg_name, i)
remote_blob_obj = self.MakeRemoteBlob(lbi)
remote_blob_list.append(remote_blob_obj)
if flow.eager_execution_enabled():
gradient_util.GetDefaultBackwardBlobRegister().TrySetObject4BlobName(
remote_blob_obj.logical_blob_name, remote_blob_obj.blob_object
)
return tuple(remote_blob_list)
def RemoteBlobDict(self):
remote_blob_dict = {}
for k in self.op_conf_.user_conf.output:
if k not in self.output_arg_key_list_:
raise ValueError(
"output_arg_name {} of {} op is not set in python op builder".format(
k, self.op_conf_.name
)
)
for output_arg_name in self.output_arg_key_list_:
assert output_arg_name in self.op_conf_.user_conf.output
if output_arg_name not in remote_blob_dict:
remote_blob_dict[output_arg_name] = []
for i in range(len(self.op_conf_.user_conf.output[output_arg_name].s)):
lbi = logical_blob_id_util.LogicalBlobId()
lbi.op_name = self.op_conf_.name
lbi.blob_name = "{}_{}".format(output_arg_name, i)
remote_blob_dict[output_arg_name].append(self.MakeRemoteBlob(lbi))
return remote_blob_dict
def SoleOutputBlob(self):
blobs = self.RemoteBlobList()
assert len(blobs) == 1
return blobs[0]
class UserOpModule(object):
@property
def opkernel_object(self):
return self.opkernel_object_
def set_opkernel_object(self, opkernel_object):
assert not hasattr(self, "opkernel_object_")
self.opkernel_object_ = opkernel_object
def InitOpKernel(self):
raise NotImplementedError
@oneflow_export("user_op_builder")
def api_user_op_builder(op_name):
r"""Build a wrapper of user op.
For instance::
def myargmax(
input: oneflow_api.BlobDesc) -> oneflow_api.BlobDesc:
return (
flow.user_op_builder("myargmax")
.Op("argmax")
.Input("in", [input])
.Output("out")
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
Args:
op_name (str): name of new user op
Returns:
UserOpConfBuilder: `UserOpConfBuilder` object used to build a wrapper of user op.
"""
api = enable_if.unique([lazy_user_op_builder, eager_user_op_builder])
return api(op_name)
@enable_if.condition(hob.in_global_mode & ~hob.eager_execution_enabled)
def lazy_user_op_builder(op_name):
job_name = oneflow_api.JobBuildAndInferCtx_GetCurrentJobName()
op_name = name_scope.GetJobNameScopePrefix(job_name) + op_name
return UserOpConfBuilder(LazyUserOp, op_name, None)
class LazyUserOp(UserOp):
def __init__(self, op_name, op_type_name):
UserOp.__init__(self, op_name, op_type_name)
def InferAndTryRun(self):
compile_context.CurJobAddOp(self.op_conf_)
return self
def MakeRemoteBlob(self, lbi):
return remote_blob_util.RemoteBlob(lbi)
@enable_if.condition(hob.in_global_mode & hob.eager_execution_enabled)
def eager_user_op_builder(op_name):
job_name = oneflow_api.JobBuildAndInferCtx_GetCurrentJobName()
op_name = name_scope.GetJobNameScopePrefix(job_name) + op_name
return UserOpConfBuilder(EagerUserOp, op_name, None)
class EagerUserOp(UserOp):
def __init__(self, op_name, op_type_name):
UserOp.__init__(self, op_name, op_type_name)
def InferAndTryRun(self):
interpret_util.Forward(self.op_conf_)
return self
def MakeRemoteBlob(self, lbi):
return remote_blob_util.EagerLogicalBlob(lbi)
@oneflow_export("consistent_user_op_builder")
def api_consistent_user_op_builder(op_name):
job_name = oneflow_api.JobBuildAndInferCtx_GetCurrentJobName()
op_name = name_scope.GetJobNameScopePrefix(job_name) + op_name
return UserOpConfBuilder(ConsistentUserOp, op_name, None)
class ConsistentUserOp(UserOp):
def __init__(self, op_name, op_type_name):
UserOp.__init__(self, op_name, op_type_name)
def InferAndTryRun(self):
interpret_util.ConsistentForward(self.op_conf_)
return self
def MakeRemoteBlob(self, lbi):
return remote_blob_util.RemoteBlob(lbi)
class UserOpConfBuilder(object):
def __init__(self, user_op_or_module_class, op_name, op_type_name):
self.user_op_ = user_op_or_module_class(op_name, op_type_name)
def CheckAndComplete(self):
assert self.user_op_.op_conf_.user_conf.op_type_name != ""
self.user_op_.op_conf_ = c_api_util.CheckAndCompleteUserOpConf(
self.user_op_.op_conf_
)
return self
def Build(self):
r"""Build op when in/output and other attribute set up.
Returns:
self
"""
return self.CheckAndComplete().user_op_
def OpName(self, op_name):
job_name = oneflow_api.JobBuildAndInferCtx_GetCurrentJobName()
op_name = name_scope.GetJobNameScopePrefix(job_name) + op_name
self.user_op_.op_conf_.name = op_name
user_conf = self.user_op_.op_conf_.user_conf
def GetLbn(output_name, i):
return "{}/{}_{}".format(op_name, output_name, i)
for output_name, output in user_conf.output.items():
output.s[:] = [GetLbn(output_name, i) for i in range(len(output.s))]
return self
def Op(self, op_type_name):
r"""set typename of op
Args:
op_type_name (string): op type name
Returns:
self
"""
self.user_op_.op_conf_.user_conf.op_type_name = op_type_name
return self
def Input(self, input_name, input_blob_list):
r"""Set input blob of op
Args:
input_name (str): input name of blob
input_blob_list : list of blobs
Returns:
self
"""
assert isinstance(input_blob_list, (tuple, list))
input_conf = self.user_op_.op_conf_.user_conf.input
input_conf[input_name].ClearField("s")
for input_blob in input_blob_list:
# assert type(input_blob) is blob_desc.BlobDesc
input_conf[input_name].s.append(input_blob.unique_name)
return self
def InputSize(self, input_name, input_blob_size):
input_conf = self.user_op_.op_conf_.user_conf.input
assert input_blob_size >= 0
assert input_name not in input_conf
for i in range(input_blob_size):
unique_name = "%s/%s_%s" % (self.user_op_.op_conf_.name, input_name, i)
input_conf[input_name].s.append(unique_name)
return self
def Output(self, output_name, num=1):
r"""Set output blob of op
Args:
output_name (str): name of output blob
num (int, optional): Defaults to 1.
Returns:
self
"""
assert isinstance(num, int) and num >= 1
out_lbns = []
for i in range(num):
lbn = "{}/{}_{}".format(self.user_op_.op_conf_.name, output_name, i)
out_lbns.append(lbn)
self.user_op_.op_conf_.user_conf.output[output_name].s[:] = out_lbns
self.user_op_.output_arg_key_list_.append(output_name)
return self
def Attr(self, attr_name, attr_value, attr_type_name=None):
r"""Set value of op's attribute.
Args:
attr_name (str): attribute name of op
attr_value (Any): attribute value of op
Raises:
ValueError: raised when value is not idential to op's attribute type.
Returns:
[type]: [description]
"""
if attr_type_name != None:
print(
"""WARNING: Argument 'attr_type_name' of UserOpConfBuilder.Attr has been deprecated. Please remove it.
For instance:
- .Attr("out_num", out_num, "AttrTypeInt64")
+ .Attr("out_num", out_num)
"""
)
print(traceback.format_stack()[-2])
attribute = attr_value_pb.AttrValue()
assert isinstance(attr_name, str)
attr_type = oneflow_api.GetUserOpAttrType(
self.user_op_.op_conf_.user_conf.op_type_name, attr_name
)
if attr_type == attr_value_pb.kAtInt32:
assert isinstance(attr_value, int)
attribute.at_int32 = attr_value
elif attr_type == attr_value_pb.kAtInt64:
assert isinstance(attr_value, int)
attribute.at_int64 = attr_value
elif attr_type == attr_value_pb.kAtBool:
assert isinstance(attr_value, bool)
attribute.at_bool = attr_value
elif attr_type == attr_value_pb.kAtFloat:
assert isinstance(attr_value, float)
attribute.at_float = attr_value
elif attr_type == attr_value_pb.kAtDouble:
assert isinstance(attr_value, float)
attribute.at_double = attr_value
elif attr_type == attr_value_pb.kAtString:
assert isinstance(attr_value, str)
attribute.at_string = attr_value
elif attr_type == attr_value_pb.kAtShape:
assert isinstance(attr_value, (tuple, list))
assert all(isinstance(x, int) for x in attr_value)
attribute.at_shape.dim[:] = list(attr_value)
elif attr_type == attr_value_pb.kAtDataType:
assert (
isinstance(
oneflow_api.deprecated.GetProtoDtype4OfDtype(attr_value), int
)
and attr_value in oneflow.dtypes()
)
attribute.at_data_type = oneflow_api.deprecated.GetProtoDtype4OfDtype(
attr_value
)
elif attr_type == attr_value_pb.kAtListInt32:
assert isinstance(attr_value, (tuple, list))
assert all(isinstance(x, int) for x in attr_value)
attribute.at_list_int32.val[:] = list(attr_value)
elif attr_type == attr_value_pb.kAtListInt64:
assert isinstance(attr_value, (tuple, list))
assert all(isinstance(x, int) for x in attr_value)
attribute.at_list_int64.val[:] = list(attr_value)
elif attr_type == attr_value_pb.kAtListFloat:
assert isinstance(attr_value, (tuple, list))
assert all(isinstance(x, float) for x in attr_value)
attribute.at_list_float.val[:] = list(attr_value)
elif attr_type == attr_value_pb.kAtListDataType:
assert isinstance(attr_value, (tuple, list))
assert all(
isinstance(oneflow_api.deprecated.GetProtoDtype4OfDtype(x), int)
and x in oneflow.dtypes()
for x in attr_value
)
attribute.at_list_data_type.val[:] = list(
[oneflow_api.deprecated.GetProtoDtype4OfDtype(x) for x in attr_value]
)
elif attr_type == attr_value_pb.kAtListShape:
assert isinstance(attr_value, (tuple, list))
assert all(isinstance(x, tuple) or isinstance(x, list) for x in attr_value)
for i in range(len(attr_value)):
shape = shape_util.ShapeProto()
shape.dim[:] = list(attr_value[i])
attribute.at_list_shape.val.append(shape)
elif attr_type == attr_value_pb.kAtListString:
assert isinstance(attr_value, (tuple, list))
assert all(isinstance(x, str) for x in attr_value)
attribute.at_list_string.val[:] = list(attr_value)
else:
raise ValueError("Invalid op attribute type {}".format(attr_type))
self.user_op_.op_conf_.user_conf.attr[attr_name].CopyFrom(attribute)
return self
@oneflow_export("user_op_module_builder")
def api_user_op_module_builder(op_type_name):
api = enable_if.unique(
[lazy_user_op_module_builder, eager_logical_user_op_module_builder]
)
return api(op_type_name)
class UserOpModuleBuilder(UserOpConfBuilder):
def __init__(self, *args, **kwargs):
UserOpConfBuilder.__init__(self, *args, **kwargs)
self.user_op_module.op_conf.scope_symbol_id = flow.current_scope().symbol_id
@property
def user_op_module(self):
return self.user_op_
def Op(self, op_type_name):
raise ValueError(
"user op module builder of {} can't call '.Op(op_type_name)' method".format(
op_type_name
)
)
@enable_if.condition(hob.in_global_mode & ~hob.eager_execution_enabled)
def lazy_user_op_module_builder(op_type_name):
job_name = oneflow_api.JobBuildAndInferCtx_GetCurrentJobName()
op_name = name_scope.GetJobNameScopePrefix(job_name) + op_type_name
return UserOpModuleBuilder(LazyUserOpModule, op_name, op_type_name)
@enable_if.condition(hob.in_global_mode & hob.eager_execution_enabled)
def eager_logical_user_op_module_builder(op_type_name):
job_name = oneflow_api.JobBuildAndInferCtx_GetCurrentJobName()
op_name = name_scope.GetJobNameScopePrefix(job_name) + op_type_name
return UserOpModuleBuilder(EagerLogicalUserOpModule, op_name, op_type_name)
class LazyUserOpModule(UserOpModule, UserOp):
def __init__(self, op_name, op_type_name):
UserOp.__init__(self, op_name, op_type_name)
def InitOpKernel(self):
self.set_opkernel_object(None)
def InferAndTryRun(self):
assert hob.in_global_mode(None)
compile_context.CurJobAddOp(self.op_conf_)
return self
def MakeRemoteBlob(self, lbi):
return remote_blob_util.RemoteBlob(lbi)
class EagerLogicalUserOpModule(UserOpModule, UserOp):
def __init__(self, op_name, op_type_name):
UserOp.__init__(self, op_name, op_type_name)
def InitOpKernel(self):
def BuildInstruction(builder):
if not isinstance(
self.op_conf, oneflow_api.oneflow.core.operator.op_conf.OperatorConf
):
cfg_op_conf = oneflow_api.deprecated.MakeOpConfByString(
str(self.op_conf)
)
self.set_opkernel_object(builder.NewOpKernelObject(cfg_op_conf))
oneflow_api.deprecated.LogicalRun(BuildInstruction)
def InferAndTryRun(self):
assert hob.in_global_mode(None)
interpret_util.OpKernelForward(self.op_conf, self.opkernel_object)
return self
def MakeRemoteBlob(self, lbi):
return remote_blob_util.EagerLogicalBlob(lbi)
@oneflow_export("consistent_user_op_module_builder")
def api_consistent_user_op_module_builder(op_type_name):
api = enable_if.unique(
[
lazy_consistent_user_op_module_builder,
eager_consistent_user_op_module_builder,
]
)
return api(op_type_name)
@enable_if.condition(hob.in_global_mode & ~hob.eager_execution_enabled)
def lazy_consistent_user_op_module_builder(op_type_name):
job_name = oneflow_api.JobBuildAndInferCtx_GetCurrentJobName()
op_name = name_scope.GetJobNameScopePrefix(job_name) + op_type_name
return UserOpModuleBuilder(LazyConsistentUserOpModule, op_name, op_type_name)
@enable_if.condition(hob.in_global_mode & hob.eager_execution_enabled)
def eager_consistent_user_op_module_builder(op_type_name):
job_name = oneflow_api.JobBuildAndInferCtx_GetCurrentJobName()
op_name = name_scope.GetJobNameScopePrefix(job_name) + op_type_name
return UserOpModuleBuilder(EagerConsistentUserOpModule, op_name, op_type_name)
class LazyConsistentUserOpModule(UserOpModule, UserOp):
def __init__(self, op_name, op_type_name):
UserOp.__init__(self, op_name, op_type_name)
def InitOpKernel(self):
self.set_opkernel_object(None)
def InferAndTryRun(self):
assert hob.in_global_mode(None)
compile_context.CurJobAddConsistentOp(self.op_conf_)
return self
def MakeRemoteBlob(self, lbi):
return remote_blob_util.RemoteBlob(lbi)
class EagerConsistentUserOpModule(UserOpModule, UserOp):
def __init__(self, op_name, op_type_name):
UserOp.__init__(self, op_name, op_type_name)
def InitOpKernel(self):
def BuildInstruction(builder):
if not isinstance(
self.op_conf, oneflow_api.oneflow.core.operator.op_conf.OperatorConf
):
cfg_op_conf = oneflow_api.deprecated.MakeOpConfByString(
str(self.op_conf)
)
self.set_opkernel_object(builder.NewOpKernelObject(cfg_op_conf))
oneflow_api.deprecated.LogicalRun(BuildInstruction)
def InferAndTryRun(self):
assert hob.in_global_mode(None)
interpret_util.OpKernelConsistentForward(self.op_conf, self.opkernel_object)
return self
def MakeRemoteBlob(self, lbi):
return remote_blob_util.EagerLogicalBlob(lbi)
| [
"oneflow.core.operator.op_conf_pb2.OperatorConf",
"oneflow.python.framework.remote_blob.EagerLogicalBlob",
"oneflow.python.experimental.name_scope.GetJobNameScopePrefix",
"oneflow.python.framework.c_api_util.CheckAndCompleteUserOpConf",
"oneflow.python.framework.remote_blob.RemoteBlob",
"oneflow.python.on... | [((1811, 1847), 'oneflow_api.GetDefaultBlobRegister', 'oneflow_api.GetDefaultBlobRegister', ([], {}), '()\n', (1845, 1847), False, 'import oneflow_api\n'), ((5090, 5123), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""user_op_builder"""'], {}), "('user_op_builder')\n", (5104, 5123), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((5829, 5899), 'oneflow.python.lib.core.enable_if.condition', 'enable_if.condition', (['(hob.in_global_mode & ~hob.eager_execution_enabled)'], {}), '(hob.in_global_mode & ~hob.eager_execution_enabled)\n', (5848, 5899), True, 'import oneflow.python.lib.core.enable_if as enable_if\n'), ((6442, 6511), 'oneflow.python.lib.core.enable_if.condition', 'enable_if.condition', (['(hob.in_global_mode & hob.eager_execution_enabled)'], {}), '(hob.in_global_mode & hob.eager_execution_enabled)\n', (6461, 6511), True, 'import oneflow.python.lib.core.enable_if as enable_if\n'), ((7058, 7102), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""consistent_user_op_builder"""'], {}), "('consistent_user_op_builder')\n", (7072, 7102), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((15175, 15215), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""user_op_module_builder"""'], {}), "('user_op_module_builder')\n", (15189, 15215), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((15911, 15981), 'oneflow.python.lib.core.enable_if.condition', 'enable_if.condition', (['(hob.in_global_mode & ~hob.eager_execution_enabled)'], {}), '(hob.in_global_mode & ~hob.eager_execution_enabled)\n', (15930, 15981), True, 'import oneflow.python.lib.core.enable_if as enable_if\n'), ((16243, 16312), 'oneflow.python.lib.core.enable_if.condition', 'enable_if.condition', (['(hob.in_global_mode & hob.eager_execution_enabled)'], {}), '(hob.in_global_mode & hob.eager_execution_enabled)\n', (16262, 16312), True, 'import oneflow.python.lib.core.enable_if as enable_if\n'), ((17911, 17962), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""consistent_user_op_module_builder"""'], {}), "('consistent_user_op_module_builder')\n", (17925, 17962), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((18211, 18281), 'oneflow.python.lib.core.enable_if.condition', 'enable_if.condition', (['(hob.in_global_mode & ~hob.eager_execution_enabled)'], {}), '(hob.in_global_mode & ~hob.eager_execution_enabled)\n', (18230, 18281), True, 'import oneflow.python.lib.core.enable_if as enable_if\n'), ((18564, 18633), 'oneflow.python.lib.core.enable_if.condition', 'enable_if.condition', (['(hob.in_global_mode & hob.eager_execution_enabled)'], {}), '(hob.in_global_mode & hob.eager_execution_enabled)\n', (18583, 18633), True, 'import oneflow.python.lib.core.enable_if as enable_if\n'), ((5738, 5801), 'oneflow.python.lib.core.enable_if.unique', 'enable_if.unique', (['[lazy_user_op_builder, eager_user_op_builder]'], {}), '([lazy_user_op_builder, eager_user_op_builder])\n', (5754, 5801), True, 'import oneflow.python.lib.core.enable_if as enable_if\n'), ((5950, 6001), 'oneflow_api.JobBuildAndInferCtx_GetCurrentJobName', 'oneflow_api.JobBuildAndInferCtx_GetCurrentJobName', ([], {}), '()\n', (5999, 6001), False, 'import oneflow_api\n'), ((6563, 6614), 'oneflow_api.JobBuildAndInferCtx_GetCurrentJobName', 'oneflow_api.JobBuildAndInferCtx_GetCurrentJobName', ([], {}), '()\n', (6612, 6614), False, 'import oneflow_api\n'), ((7163, 7214), 'oneflow_api.JobBuildAndInferCtx_GetCurrentJobName', 'oneflow_api.JobBuildAndInferCtx_GetCurrentJobName', ([], {}), '()\n', (7212, 7214), False, 'import oneflow_api\n'), ((15272, 15361), 'oneflow.python.lib.core.enable_if.unique', 'enable_if.unique', (['[lazy_user_op_module_builder, eager_logical_user_op_module_builder]'], {}), '([lazy_user_op_module_builder,\n eager_logical_user_op_module_builder])\n', (15288, 15361), True, 'import oneflow.python.lib.core.enable_if as enable_if\n'), ((16044, 16095), 'oneflow_api.JobBuildAndInferCtx_GetCurrentJobName', 'oneflow_api.JobBuildAndInferCtx_GetCurrentJobName', ([], {}), '()\n', (16093, 16095), False, 'import oneflow_api\n'), ((16384, 16435), 'oneflow_api.JobBuildAndInferCtx_GetCurrentJobName', 'oneflow_api.JobBuildAndInferCtx_GetCurrentJobName', ([], {}), '()\n', (16433, 16435), False, 'import oneflow_api\n'), ((18030, 18133), 'oneflow.python.lib.core.enable_if.unique', 'enable_if.unique', (['[lazy_consistent_user_op_module_builder,\n eager_consistent_user_op_module_builder]'], {}), '([lazy_consistent_user_op_module_builder,\n eager_consistent_user_op_module_builder])\n', (18046, 18133), True, 'import oneflow.python.lib.core.enable_if as enable_if\n'), ((18355, 18406), 'oneflow_api.JobBuildAndInferCtx_GetCurrentJobName', 'oneflow_api.JobBuildAndInferCtx_GetCurrentJobName', ([], {}), '()\n', (18404, 18406), False, 'import oneflow_api\n'), ((18708, 18759), 'oneflow_api.JobBuildAndInferCtx_GetCurrentJobName', 'oneflow_api.JobBuildAndInferCtx_GetCurrentJobName', ([], {}), '()\n', (18757, 18759), False, 'import oneflow_api\n'), ((1948, 1975), 'oneflow.core.operator.op_conf_pb2.OperatorConf', 'op_conf_util.OperatorConf', ([], {}), '()\n', (1973, 1975), True, 'import oneflow.core.operator.op_conf_pb2 as op_conf_util\n'), ((6016, 6058), 'oneflow.python.experimental.name_scope.GetJobNameScopePrefix', 'name_scope.GetJobNameScopePrefix', (['job_name'], {}), '(job_name)\n', (6048, 6058), True, 'import oneflow.python.experimental.name_scope as name_scope\n'), ((6292, 6334), 'oneflow.python.framework.compile_context.CurJobAddOp', 'compile_context.CurJobAddOp', (['self.op_conf_'], {}), '(self.op_conf_)\n', (6319, 6334), True, 'import oneflow.python.framework.compile_context as compile_context\n'), ((6406, 6438), 'oneflow.python.framework.remote_blob.RemoteBlob', 'remote_blob_util.RemoteBlob', (['lbi'], {}), '(lbi)\n', (6433, 6438), True, 'import oneflow.python.framework.remote_blob as remote_blob_util\n'), ((6629, 6671), 'oneflow.python.experimental.name_scope.GetJobNameScopePrefix', 'name_scope.GetJobNameScopePrefix', (['job_name'], {}), '(job_name)\n', (6661, 6671), True, 'import oneflow.python.experimental.name_scope as name_scope\n'), ((6907, 6944), 'oneflow.python.framework.interpret_util.Forward', 'interpret_util.Forward', (['self.op_conf_'], {}), '(self.op_conf_)\n', (6929, 6944), True, 'import oneflow.python.framework.interpret_util as interpret_util\n'), ((7016, 7054), 'oneflow.python.framework.remote_blob.EagerLogicalBlob', 'remote_blob_util.EagerLogicalBlob', (['lbi'], {}), '(lbi)\n', (7049, 7054), True, 'import oneflow.python.framework.remote_blob as remote_blob_util\n'), ((7229, 7271), 'oneflow.python.experimental.name_scope.GetJobNameScopePrefix', 'name_scope.GetJobNameScopePrefix', (['job_name'], {}), '(job_name)\n', (7261, 7271), True, 'import oneflow.python.experimental.name_scope as name_scope\n'), ((7517, 7564), 'oneflow.python.framework.interpret_util.ConsistentForward', 'interpret_util.ConsistentForward', (['self.op_conf_'], {}), '(self.op_conf_)\n', (7549, 7564), True, 'import oneflow.python.framework.interpret_util as interpret_util\n'), ((7636, 7668), 'oneflow.python.framework.remote_blob.RemoteBlob', 'remote_blob_util.RemoteBlob', (['lbi'], {}), '(lbi)\n', (7663, 7668), True, 'import oneflow.python.framework.remote_blob as remote_blob_util\n'), ((7980, 8041), 'oneflow.python.framework.c_api_util.CheckAndCompleteUserOpConf', 'c_api_util.CheckAndCompleteUserOpConf', (['self.user_op_.op_conf_'], {}), '(self.user_op_.op_conf_)\n', (8017, 8041), True, 'import oneflow.python.framework.c_api_util as c_api_util\n'), ((8317, 8368), 'oneflow_api.JobBuildAndInferCtx_GetCurrentJobName', 'oneflow_api.JobBuildAndInferCtx_GetCurrentJobName', ([], {}), '()\n', (8366, 8368), False, 'import oneflow_api\n'), ((11499, 11524), 'oneflow.core.framework.user_op_attr_pb2.AttrValue', 'attr_value_pb.AttrValue', ([], {}), '()\n', (11522, 11524), True, 'import oneflow.core.framework.user_op_attr_pb2 as attr_value_pb\n'), ((11587, 11678), 'oneflow_api.GetUserOpAttrType', 'oneflow_api.GetUserOpAttrType', (['self.user_op_.op_conf_.user_conf.op_type_name', 'attr_name'], {}), '(self.user_op_.op_conf_.user_conf.op_type_name,\n attr_name)\n', (11616, 11678), False, 'import oneflow_api\n'), ((16110, 16152), 'oneflow.python.experimental.name_scope.GetJobNameScopePrefix', 'name_scope.GetJobNameScopePrefix', (['job_name'], {}), '(job_name)\n', (16142, 16152), True, 'import oneflow.python.experimental.name_scope as name_scope\n'), ((16450, 16492), 'oneflow.python.experimental.name_scope.GetJobNameScopePrefix', 'name_scope.GetJobNameScopePrefix', (['job_name'], {}), '(job_name)\n', (16482, 16492), True, 'import oneflow.python.experimental.name_scope as name_scope\n'), ((16850, 16874), 'oneflow.python.framework.hob.in_global_mode', 'hob.in_global_mode', (['None'], {}), '(None)\n', (16868, 16874), True, 'import oneflow.python.framework.hob as hob\n'), ((16883, 16925), 'oneflow.python.framework.compile_context.CurJobAddOp', 'compile_context.CurJobAddOp', (['self.op_conf_'], {}), '(self.op_conf_)\n', (16910, 16925), True, 'import oneflow.python.framework.compile_context as compile_context\n'), ((16997, 17029), 'oneflow.python.framework.remote_blob.RemoteBlob', 'remote_blob_util.RemoteBlob', (['lbi'], {}), '(lbi)\n', (17024, 17029), True, 'import oneflow.python.framework.remote_blob as remote_blob_util\n'), ((17600, 17651), 'oneflow_api.deprecated.LogicalRun', 'oneflow_api.deprecated.LogicalRun', (['BuildInstruction'], {}), '(BuildInstruction)\n', (17633, 17651), False, 'import oneflow_api\n'), ((17698, 17722), 'oneflow.python.framework.hob.in_global_mode', 'hob.in_global_mode', (['None'], {}), '(None)\n', (17716, 17722), True, 'import oneflow.python.framework.hob as hob\n'), ((17731, 17797), 'oneflow.python.framework.interpret_util.OpKernelForward', 'interpret_util.OpKernelForward', (['self.op_conf', 'self.opkernel_object'], {}), '(self.op_conf, self.opkernel_object)\n', (17761, 17797), True, 'import oneflow.python.framework.interpret_util as interpret_util\n'), ((17869, 17907), 'oneflow.python.framework.remote_blob.EagerLogicalBlob', 'remote_blob_util.EagerLogicalBlob', (['lbi'], {}), '(lbi)\n', (17902, 17907), True, 'import oneflow.python.framework.remote_blob as remote_blob_util\n'), ((18421, 18463), 'oneflow.python.experimental.name_scope.GetJobNameScopePrefix', 'name_scope.GetJobNameScopePrefix', (['job_name'], {}), '(job_name)\n', (18453, 18463), True, 'import oneflow.python.experimental.name_scope as name_scope\n'), ((18774, 18816), 'oneflow.python.experimental.name_scope.GetJobNameScopePrefix', 'name_scope.GetJobNameScopePrefix', (['job_name'], {}), '(job_name)\n', (18806, 18816), True, 'import oneflow.python.experimental.name_scope as name_scope\n'), ((19187, 19211), 'oneflow.python.framework.hob.in_global_mode', 'hob.in_global_mode', (['None'], {}), '(None)\n', (19205, 19211), True, 'import oneflow.python.framework.hob as hob\n'), ((19220, 19272), 'oneflow.python.framework.compile_context.CurJobAddConsistentOp', 'compile_context.CurJobAddConsistentOp', (['self.op_conf_'], {}), '(self.op_conf_)\n', (19257, 19272), True, 'import oneflow.python.framework.compile_context as compile_context\n'), ((19344, 19376), 'oneflow.python.framework.remote_blob.RemoteBlob', 'remote_blob_util.RemoteBlob', (['lbi'], {}), '(lbi)\n', (19371, 19376), True, 'import oneflow.python.framework.remote_blob as remote_blob_util\n'), ((19950, 20001), 'oneflow_api.deprecated.LogicalRun', 'oneflow_api.deprecated.LogicalRun', (['BuildInstruction'], {}), '(BuildInstruction)\n', (19983, 20001), False, 'import oneflow_api\n'), ((20048, 20072), 'oneflow.python.framework.hob.in_global_mode', 'hob.in_global_mode', (['None'], {}), '(None)\n', (20066, 20072), True, 'import oneflow.python.framework.hob as hob\n'), ((20081, 20157), 'oneflow.python.framework.interpret_util.OpKernelConsistentForward', 'interpret_util.OpKernelConsistentForward', (['self.op_conf', 'self.opkernel_object'], {}), '(self.op_conf, self.opkernel_object)\n', (20121, 20157), True, 'import oneflow.python.framework.interpret_util as interpret_util\n'), ((20229, 20267), 'oneflow.python.framework.remote_blob.EagerLogicalBlob', 'remote_blob_util.EagerLogicalBlob', (['lbi'], {}), '(lbi)\n', (20262, 20267), True, 'import oneflow.python.framework.remote_blob as remote_blob_util\n'), ((8387, 8429), 'oneflow.python.experimental.name_scope.GetJobNameScopePrefix', 'name_scope.GetJobNameScopePrefix', (['job_name'], {}), '(job_name)\n', (8419, 8429), True, 'import oneflow.python.experimental.name_scope as name_scope\n'), ((15602, 15622), 'oneflow.current_scope', 'flow.current_scope', ([], {}), '()\n', (15620, 15622), True, 'import oneflow as flow\n'), ((2135, 2158), 'oneflow.current_scope', 'oneflow.current_scope', ([], {}), '()\n', (2156, 2158), False, 'import oneflow\n'), ((3090, 3126), 'oneflow.core.register.logical_blob_id_pb2.LogicalBlobId', 'logical_blob_id_util.LogicalBlobId', ([], {}), '()\n', (3124, 3126), True, 'import oneflow.core.register.logical_blob_id_pb2 as logical_blob_id_util\n'), ((3378, 3408), 'oneflow.eager_execution_enabled', 'flow.eager_execution_enabled', ([], {}), '()\n', (3406, 3408), True, 'import oneflow as flow\n'), ((4365, 4401), 'oneflow.core.register.logical_blob_id_pb2.LogicalBlobId', 'logical_blob_id_util.LogicalBlobId', ([], {}), '()\n', (4399, 4401), True, 'import oneflow.core.register.logical_blob_id_pb2 as logical_blob_id_util\n'), ((11448, 11472), 'traceback.format_stack', 'traceback.format_stack', ([], {}), '()\n', (11470, 11472), False, 'import traceback\n'), ((3430, 3476), 'oneflow.python.eager.gradient_util.GetDefaultBackwardBlobRegister', 'gradient_util.GetDefaultBackwardBlobRegister', ([], {}), '()\n', (3474, 3476), True, 'import oneflow.python.eager.gradient_util as gradient_util\n'), ((13079, 13135), 'oneflow_api.deprecated.GetProtoDtype4OfDtype', 'oneflow_api.deprecated.GetProtoDtype4OfDtype', (['attr_value'], {}), '(attr_value)\n', (13123, 13135), False, 'import oneflow_api\n'), ((12897, 12953), 'oneflow_api.deprecated.GetProtoDtype4OfDtype', 'oneflow_api.deprecated.GetProtoDtype4OfDtype', (['attr_value'], {}), '(attr_value)\n', (12941, 12953), False, 'import oneflow_api\n'), ((13011, 13027), 'oneflow.dtypes', 'oneflow.dtypes', ([], {}), '()\n', (13025, 13027), False, 'import oneflow\n'), ((14259, 14306), 'oneflow_api.deprecated.GetProtoDtype4OfDtype', 'oneflow_api.deprecated.GetProtoDtype4OfDtype', (['x'], {}), '(x)\n', (14303, 14306), False, 'import oneflow_api\n'), ((14610, 14633), 'oneflow.core.common.shape_pb2.ShapeProto', 'shape_util.ShapeProto', ([], {}), '()\n', (14631, 14633), True, 'import oneflow.core.common.shape_pb2 as shape_util\n'), ((14041, 14088), 'oneflow_api.deprecated.GetProtoDtype4OfDtype', 'oneflow_api.deprecated.GetProtoDtype4OfDtype', (['x'], {}), '(x)\n', (14085, 14088), False, 'import oneflow_api\n'), ((14120, 14136), 'oneflow.dtypes', 'oneflow.dtypes', ([], {}), '()\n', (14134, 14136), False, 'import oneflow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import functools
import math
import numpy as np
import oneflow as flow
import oneflow.core.operator.op_conf_pb2 as op_conf_util
import oneflow.core.job.initializer_conf_pb2 as initializer_conf_util
from oneflow.python.oneflow_export import oneflow_export
from typing import Optional, Sequence, Union
@oneflow_export("empty_initializer")
def empty_initializer(
dtype: flow.dtype = flow.float,
) -> initializer_conf_util.InitializerConf:
initializer = initializer_conf_util.InitializerConf()
empty_conf = initializer_conf_util.EmptyInitializerConf()
initializer.empty_conf.CopyFrom(empty_conf)
return initializer
@oneflow_export("constant_initializer")
def constant_initializer(
value: float = 0, dtype: flow.dtype = flow.float
) -> initializer_conf_util.InitializerConf:
r"""Initializer that generates blob with constant values.
Args:
value (float, optional): A Python scalar. All elements of the initialized variable . Defaults to 0.
dtype (flow.dtype, optional): Default data type. Defaults to flow.float.
Raises:
NotImplementedError: Do not support such data type.
Returns:
initializer_conf_util.InitializerConf: An InitializerConf object.
For example:
Example 1:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
def watch_handler(y: tp.Numpy):
print("out", y)
@flow.global_function()
def constant_Job() -> None:
init = flow.constant_initializer(2.5)
blob = flow.get_variable(
"blob-weight",
shape=(3, ),
initializer=init,
trainable=True
)
flow.watch(blob, watch_handler)
checkpoint = flow.train.CheckPoint()
checkpoint.init()
constant_Job()
# out [2.5 2.5 2.5]
Example 2:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def conv2d_constant_Job(x: tp.Numpy.Placeholder((1, 256, 32, 32))
) -> tp.Numpy:
initializer = flow.constant_initializer(0.01)
conv2d = flow.layers.conv2d(
x,
filters=128,
kernel_size=3,
strides=1,
padding='SAME',
kernel_initializer=initializer,
name="Conv2d"
)
return conv2d
x = np.random.randn(1, 256, 32, 32).astype(np.float32)
out = conv2d_constant_Job(x)
# out.shape (1, 128, 32, 32)
"""
initializer = initializer_conf_util.InitializerConf()
if dtype in [flow.float, flow.double]:
setattr(initializer.constant_conf, "value", float(value))
elif dtype in [
flow.int8,
flow.int32,
flow.int64,
]:
setattr(initializer.constant_int_conf, "value", int(value))
else:
raise NotImplementedError("Do not support such data type")
return initializer
@oneflow_export("zeros_initializer")
def zeros_initializer(
dtype: flow.dtype = flow.float,
) -> initializer_conf_util.InitializerConf:
r"""Initializer that generates blobs initialized to 0
Args:
dtype (flow.dtype, optional): Default data type. Defaults to flow.float.
Returns:
initializer_conf_util.InitializerConf: constant_initializer
For example:
Example 1:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
def watch_handler(y: tp.Numpy):
print("out", y)
@flow.global_function()
def zeros_Job() -> None:
init = flow.zeros_initializer()
blob = flow.get_variable(
"blob-weight",
shape=(3, ),
initializer=init,
trainable=True
)
flow.watch(blob, watch_handler)
checkpoint = flow.train.CheckPoint()
checkpoint.init()
zeros_Job()
# out [0. 0. 0.]
Example 2:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def conv2d_zero_Job(x: tp.Numpy.Placeholder((1, 256, 32, 32))
) -> tp.Numpy:
initializer = flow.zeros_initializer()
conv2d = flow.layers.conv2d(
x,
filters=128,
kernel_size=3,
strides=1,
padding='SAME',
kernel_initializer=initializer,
name="Conv2d"
)
return conv2d
x = np.random.randn(1, 256, 32, 32).astype(np.float32)
out = conv2d_zero_Job(x)
# out.shape (1, 128, 32, 32)
"""
return constant_initializer(0.0, dtype)
@oneflow_export("ones_initializer")
def ones_initializer(
dtype: flow.dtype = flow.float,
) -> initializer_conf_util.InitializerConf:
r"""Initializer that generates blobs initialized to 1.
Args:
dtype (flow.dtype, optional): Default data type. Defaults to flow.float.
Returns:
initializer_conf_util.InitializerConf: constant_initializer
For example:
Example 1:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
def watch_handler(y: tp.Numpy):
print("out", y)
@flow.global_function()
def ones_Job() -> None:
init = flow.ones_initializer()
blob = flow.get_variable(
"blob-weight",
shape=(3, ),
initializer=init,
trainable=True
)
flow.watch(blob, watch_handler)
checkpoint = flow.train.CheckPoint()
checkpoint.init()
ones_Job()
# out [1. 1. 1.]
Example 2:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def conv2d_one_Job(x: tp.Numpy.Placeholder((1, 256, 32, 32))
) -> tp.Numpy:
initializer = flow.ones_initializer()
conv2d = flow.layers.conv2d(
x,
filters=128,
kernel_size=3,
strides=1,
padding='SAME',
kernel_initializer=initializer,
name="Conv2d"
)
return conv2d
x = np.random.randn(1, 256, 32, 32).astype(np.float32)
out = conv2d_one_Job(x)
# out.shape (1, 128, 32, 32)
"""
return constant_initializer(1.0, dtype)
@oneflow_export("random_uniform_initializer")
def random_uniform_initializer(
minval: float = 0, maxval: float = 1, dtype: flow.dtype = flow.float
) -> initializer_conf_util.InitializerConf:
r"""Initializer that generates blobs with a uniform distribution.
Args:
minval (float, optional): A python scalar. Lower bound of the range of random values to generate. Defaults to 0.
maxval (float, optional): A python scalar. Upper bound of the range of random values to generate. Defaults to 1.
dtype (flow.dtype, optional): Default data type. Defaults to flow.float.
Raises:
NotImplementedError: Do not support such data type.
Returns:
initializer_conf_util.InitializerConf: Initial configuration
For example:
Example 1:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
def watch_handler(y: tp.Numpy):
print("out", y)
@flow.global_function()
def random_uniform_Job() -> None:
init = flow.random_uniform_initializer(minval=0, maxval=0.5)
blob = flow.get_variable(
"blob-weight",
shape=(3, ),
initializer=init,
trainable=True
)
flow.watch(blob, watch_handler)
checkpoint = flow.train.CheckPoint()
checkpoint.init()
random_uniform_Job()
# out [0.07557311 0.3943565 0.31875622]
Example 2:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def conv2d_random_uniform_Job(x: tp.Numpy.Placeholder((1, 256, 32, 32))
) -> tp.Numpy:
initializer = flow.random_uniform_initializer(minval=0, maxval=0.5)
conv2d = flow.layers.conv2d(
x,
filters=128,
kernel_size=3,
strides=1,
padding='SAME',
kernel_initializer=initializer,
name="Conv2d"
)
return conv2d
x = np.random.randn(1, 256, 32, 32).astype(np.float32)
out = conv2d_random_uniform_Job(x)
# out.shape (1, 128, 32, 32)
"""
assert minval <= maxval
initializer = initializer_conf_util.InitializerConf()
if dtype in [flow.float, flow.double]:
setattr(initializer.random_uniform_conf, "min", float(minval))
setattr(initializer.random_uniform_conf, "max", float(maxval))
elif dtype in [
flow.int8,
flow.int32,
flow.int64,
]:
setattr(initializer.random_uniform_int_conf, "min", int(minval))
setattr(initializer.random_uniform_int_conf, "max", int(maxval))
else:
raise NotImplementedError("Do not support such data type")
return initializer
@oneflow_export("random_normal_initializer")
def random_normal_initializer(
mean: float = 0.0,
stddev: float = 1.0,
seed: Optional[int] = None,
dtype: Optional[flow.dtype] = None,
) -> initializer_conf_util.InitializerConf:
r"""Initializer that generates blob with a normal distribution.
Args:
mean (float, optional): A python scalar. Mean of the random values to generate.. Defaults to 0.0.
stddev (float, optional): A python scalar. Standard deviation of the random values to generate. Defaults to 1.0.
seed (Optional[int], optional): None. Not support yet. Defaults to None.
dtype (Optional[flow.dtype], optional): . Defaults to None.
Returns:
initializer_conf_util.InitializerConf: Initial configuration
For example:
Example 1:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
def watch_handler(y: tp.Numpy):
print("out", y)
@flow.global_function()
def random_normal_Job() -> None:
init = flow.random_normal_initializer(mean=1, stddev=1)
blob = flow.get_variable(
"blob-weight",
shape=(3, ),
initializer=init,
trainable=True
)
flow.watch(blob, watch_handler)
checkpoint = flow.train.CheckPoint()
checkpoint.init()
random_normal_Job()
# out [1.4190257 2.7663114 1.7114428]
Example 2:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def conv2d_random_normal_Job(x: tp.Numpy.Placeholder((1, 256, 32, 32))
) -> tp.Numpy:
initializer = flow.random_normal_initializer(mean=0, stddev=1)
conv2d = flow.layers.conv2d(
x,
filters=128,
kernel_size=3,
strides=1,
padding='SAME',
kernel_initializer=initializer,
name="Conv2d"
)
return conv2d
x = np.random.randn(1, 256, 32, 32).astype(np.float32)
out = conv2d_random_normal_Job(x)
# out.shape (1, 128, 32, 32)
"""
assert seed is None
assert dtype is None
if seed is not None:
assert name is not None
initializer = initializer_conf_util.InitializerConf()
setattr(initializer.random_normal_conf, "mean", float(mean))
setattr(initializer.random_normal_conf, "std", float(stddev))
return initializer
@oneflow_export("truncated_normal_initializer")
def truncated_normal_initializer(
mean: float = 0.0, stddev: float = 1.0
) -> initializer_conf_util.InitializerConf:
r"""Initializer that generates a truncated normal distribution.
Args:
mean (float, optional): A scalar (float). Defaults to 0.0.
stddev (float, optional): A scalar (float). Defaults to 1.0.
Returns:
initializer_conf_util.InitializerConf: Initial configuration
For example:
Example 1:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
def watch_handler(y: tp.Numpy):
print("out", y)
@flow.global_function()
def truncated_normal_Job() -> None:
init = flow.truncated_normal_initializer(mean=1, stddev=1)
blob = flow.get_variable(
"blob-weight",
shape=(3, ),
initializer=init,
trainable=True
)
flow.watch(blob, watch_handler)
checkpoint = flow.train.CheckPoint()
checkpoint.init()
truncated_normal_Job()
# out [1.8303236 0.09787154 0.83049864]
Example 2:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def conv2d_truncated_normal_Job(x: tp.Numpy.Placeholder((1, 256, 32, 32))
) -> tp.Numpy:
initializer = flow.truncated_normal_initializer(mean=0, stddev=1)
conv2d = flow.layers.conv2d(
x,
filters=128,
kernel_size=3,
strides=1,
padding='SAME',
kernel_initializer=initializer,
name="Conv2d"
)
return conv2d
x = np.random.randn(1, 256, 32, 32).astype(np.float32)
out = conv2d_truncated_normal_Job(x)
# out.shape (1, 128, 32, 32)
"""
initializer = initializer_conf_util.InitializerConf()
setattr(initializer.truncated_normal_conf, "mean", float(mean))
setattr(initializer.truncated_normal_conf, "std", float(stddev))
return initializer
@oneflow_export("glorot_uniform_initializer", "xavier_uniform_initializer")
def glorot_uniform_initializer(
data_format: str = "",
) -> initializer_conf_util.InitializerConf:
r"""Initializer that generates a Xavier uniform distribution.
It also can be called as `oneflow.glorot_uniform_initializer`.
The equation is:
.. math::
W\sim U(-\sqrt{\frac{{6}}{{n_j+n_{j+1}}}},\sqrt{\frac{{6}}{{n_j+n_{j+1}}}})
:math:`U` means uniform distribution
:math:`n_j` means the amount of Nth layer parameters
Args:
data_format (str, optional): The data format. Defaults to "".
Returns:
initializer_conf_util.InitializerConf: Initial configuration
For example:
Example 1:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
def watch_handler(y: tp.Numpy):
print("out", y)
@flow.global_function()
def xavier_uniform_Job() -> None:
init = flow.xavier_uniform_initializer()
blob = flow.get_variable(
"blob-weight",
shape=(3, 3),
initializer=init,
trainable=True
)
flow.watch(blob, watch_handler)
checkpoint = flow.train.CheckPoint()
checkpoint.init()
xavier_uniform_Job()
# out [[-0.14424723 -0.9532095 -0.08723891]
# [-0.8011227 -0.29729813 -0.26769108]
# [ 0.9208976 -0.5971756 -0.15077025]]
Example 2:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def conv2d_xavier_uniform_Job(x: tp.Numpy.Placeholder((1, 256, 32, 32))
) -> tp.Numpy:
initializer = flow.xavier_uniform_initializer()
conv2d = flow.layers.conv2d(
x,
filters=128,
kernel_size=3,
strides=1,
padding='SAME',
kernel_initializer=initializer,
name="Conv2d"
)
return conv2d
x = np.random.randn(1, 256, 32, 32).astype(np.float32)
out = conv2d_xavier_uniform_Job(x)
# out.shape (1, 128, 32, 32)
"""
return variance_scaling_initializer(1.0, "fan_avg", "random_uniform", data_format)
@oneflow_export("glorot_normal_initializer", "xavier_normal_initializer")
def glorot_normal_initializer(
data_format: str = "",
) -> initializer_conf_util.InitializerConf:
r"""Initializer that generates a Xavier normal distribution.
It also can be called as `oneflow.glorot_normal_initializer`.
The equation is:
.. math::
W\sim N(0, \sqrt{\frac{{2}}{{n_j+n_{j+1}}}})
:math:`N` means normal distribution
:math:`n_j` means the amount of Nth layer parameters
Args:
data_format (str, optional): The data format. Defaults to "".
Returns:
initializer_conf_util.InitializerConf: Initial configuration
For example:
Example 1:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
def watch_handler(y: tp.Numpy):
print("out", y)
@flow.global_function()
def xavier_normal_Job() -> None:
init = flow.xavier_normal_initializer()
blob = flow.get_variable(
"blob-weight",
shape=(3, 3),
initializer=init,
trainable=True
)
flow.watch(blob, watch_handler)
checkpoint = flow.train.CheckPoint()
checkpoint.init()
xavier_normal_Job()
# out [[ 0.5908121 -0.10804518 -0.6148571 ]
# [ 1.4007381 -0.08172473 0.36579943]
# [-0.6461796 -0.15923311 0.33653972]]
Example 2:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def conv2d_xavier_normal_Job(x: tp.Numpy.Placeholder((1, 256, 32, 32))
) -> tp.Numpy:
initializer = flow.xavier_normal_initializer()
conv2d = flow.layers.conv2d(
x,
filters=128,
kernel_size=3,
strides=1,
padding='SAME',
kernel_initializer=initializer,
name="Conv2d"
)
return conv2d
x = np.random.randn(1, 256, 32, 32).astype(np.float32)
out = conv2d_xavier_normal_Job(x)
# out.shape (1, 128, 32, 32)
"""
return variance_scaling_initializer(1.0, "fan_avg", "random_normal", data_format)
@oneflow_export("variance_scaling_initializer")
def variance_scaling_initializer(
scale: float = 1.0,
mode: str = "fan_in",
distribution: str = "truncated_normal",
data_format: str = "",
) -> initializer_conf_util.InitializerConf:
r"""Initializer that generates a truncated normal distribution or a random normal distribution or a random uniform distribution with a scale adapting to it.
When the distribution is "truncated_normal"
The equation is:
.. math::
W\sim N(0, \sqrt{\frac{{scale}}{{n}}})
If mode is "fan_in", the "n" is the number of input units in the weight Blob.
If mode is "fan_out", the "n" is the number of output units in the weight Blob.
if mode is "fan_avg", the "n" is the average of the number of input and output units in the weight Blob
Args:
scale (float, optional): Scaling factor (positive float). Defaults to 1.0.
mode (str, optional): One of "fan_in", "fan_out", "fan_avg". Defaults to "fan_in".
distribution (str, optional): Random distribution to use. One of "truncated_normal",. Defaults to "truncated_normal".
data_format (str, optional): A string be one of "N...C" or "NC...". Defaults to "".
Returns:
initializer_conf_util.InitializerConf: Initial configuration
For example:
Example 1:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
def watch_handler(y: tp.Numpy):
print("out", y)
@flow.global_function()
def variance_scale_Job() -> None:
init = flow.variance_scaling_initializer(scale=2.0, mode="fan_avg")
blob = flow.get_variable(
"blob-weight",
shape=(3, 3),
initializer=init,
trainable=True
)
flow.watch(blob, watch_handler)
checkpoint = flow.train.CheckPoint()
checkpoint.init()
variance_scale_Job()
# out [[-0.13931477 0.12266728 -0.9434968 ]
# [-0.49665168 0.10231158 -0.19194333]
# [-0.7902896 -1.7034698 -0.38695997]]
Example 2:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def conv2d_variance_scaling_Job(x: tp.Numpy.Placeholder((1, 256, 32, 32))
) -> tp.Numpy:
initializer = flow.variance_scaling_initializer(mode="fan_out")
conv2d = flow.layers.conv2d(
x,
filters=128,
kernel_size=3,
strides=1,
padding='SAME',
kernel_initializer=initializer,
name="Conv2d"
)
return conv2d
x = np.random.randn(1, 256, 32, 32).astype(np.float32)
out = conv2d_variance_scaling_Job(x)
# out.shape (1, 128, 32, 32)
"""
initializer = initializer_conf_util.InitializerConf()
setattr(initializer.variance_scaling_conf, "scale", float(scale))
setattr(
initializer.variance_scaling_conf, "variance_norm", _get_variance_norm(mode),
)
setattr(
initializer.variance_scaling_conf,
"distribution",
_get_random_distribution(distribution),
)
setattr(
initializer.variance_scaling_conf, "data_format", _get_data_format(data_format),
)
return initializer
@oneflow_export("kaiming_initializer")
def kaiming_initializer(
shape: Sequence[int],
distribution: str = "random_normal",
mode: str = "fan_in",
nonlinearity: str = "leaky_relu",
negative_slope: float = 0.0,
data_format: str = "NCHW",
) -> None:
r"""Initialize weight according to the method described in `Delving deep into
rectifiers: Surpassing human-level performance on ImageNet classification`
- <NAME> al. (2015), using a normal or uniform distribution.
When distribution is "random_normal"
The equation is:
.. math::
W \sim N(0, \sqrt{\frac{{2}}{{n}}})
When distribution is "random_uniform"
The equation is:
.. math::
W \sim U(-\sqrt{\frac{{6}}{{n}}}, \sqrt{\frac{{6}}{{n}}})
If mode is "fan_in", the "n" is the number of input units in the weight Blob.
If mode is "fan_out", the "n" is the number of output units in the weight Blob.
if mode is "fan_avg", the "n" is the average of the number of input and output units in the weight Blob
Args:
shape (Sequence[int]): Blob shape.
distribution (str, optional): 'random_normal' or 'random_uniform'. Defaults to "random_normal".
mode (str, optional): 'fan_in', 'fan_out' or 'fan_avg'. Defaults to "fan_in".
nonlinearity (str, optional): None, 'tanh', 'sigmoid', 'relu' or 'leaky_relu'. Defaults to "leaky_relu".
negative_slope (float, optional): The negative slope of leaky_relu. Defaults to 0.0.
data_format (str, optional): 'NCHW', 'NHWC'. Defaults to "NCHW".
Raises:
NotImplementedError: Only support normal and uniform distribution
Returns:
[type]: flow.random_normal_initializer or flow.random_uniform_initializer
For example:
Example 1:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
def watch_handler(y: tp.Numpy):
print("out", y)
@flow.global_function()
def kaiming_Job() -> None:
init = flow.kaiming_initializer(shape=(3, 3),
mode="fan_avg",
nonlinearity="relu")
blob = flow.get_variable(
"blob-weight",
shape=(3, 3),
initializer=init,
trainable=True
)
flow.watch(blob, watch_handler)
checkpoint = flow.train.CheckPoint()
checkpoint.init()
kaiming_Job()
# out [[ 0.54521346 0.32585594 1.3474437 ]
# [ 0.30729076 -0.19158769 0.2709008 ]
# [-0.95830524 -0.05093324 0.28178614]]
Example 2:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def conv2d_kaiming_Job(x: tp.Numpy.Placeholder((1, 256, 32, 32))
) -> tp.Numpy:
initializer = flow.kaiming_initializer(shape=(1, 256, 32, 32))
conv2d = flow.layers.conv2d(
x,
filters=128,
kernel_size=3,
strides=1,
padding='SAME',
kernel_initializer=initializer,
name="Conv2d"
)
return conv2d
x = np.random.randn(1, 256, 32, 32).astype(np.float32)
out = conv2d_kaiming_Job(x)
# out.shape (1, 128, 32, 32)
"""
assert isinstance(shape, tuple)
# Kaiming Initialization only deals with FC, Conv and Deconv's weight
assert len(shape) >= 2
elem_cnt = functools.reduce(lambda a, b: a * b, shape, 1)
assert elem_cnt > 0
assert distribution in ["random_normal", "random_uniform"]
assert mode in ["fan_in", "fan_out", "fan_avg"]
assert nonlinearity in [None, "tanh", "sigmoid", "relu", "leaky_relu"]
assert data_format in ["NCHW", "NHWC"]
fan = _CalcFan(shape, mode, _get_data_format(data_format))
gain = _CalcGain(nonlinearity, negative_slope)
std = gain / math.sqrt(fan)
if distribution == "random_normal":
return flow.random_normal_initializer(0.0, std)
elif distribution == "random_uniform":
bound = math.sqrt(3.0) * std
return flow.random_uniform_initializer(-bound, bound)
else:
raise NotImplementedError("Only support normal and uniform distribution")
def _get_variance_norm(mode):
if mode.lower() == "fan_in":
return initializer_conf_util.kFanIn
elif mode.lower() == "fan_out":
return initializer_conf_util.kFanOut
elif mode.lower() == "fan_avg":
return initializer_conf_util.kAverage
else:
raise ValueError("Invalid variance_norm")
def _get_random_distribution(distribution):
if distribution.lower() == "truncated_normal":
return initializer_conf_util.kTruncatedNormal
elif distribution.lower() == "random_normal":
return initializer_conf_util.kRandomNormal
elif distribution.lower() == "random_uniform":
return initializer_conf_util.kRandomUniform
else:
raise ValueError("Invalid random_distribution")
def _get_data_format(data_format):
assert isinstance(data_format, str), "data_format must be a string"
if data_format.startswith("NC"):
return "channels_first"
elif data_format.startswith("N") and data_format.endswith("C"):
return "channels_last"
else:
assert data_format == "", ValueError(
'data_format must be "N...C" or "NC..." or ""'
)
return ""
def _CalcFan(shape, mode, data_format):
if len(shape) == 2: # Linear
fan_in = shape[1]
fan_out = shape[0]
else: # Conv and Deconv
fan_in = 1.0
for dim in shape[1:]:
fan_in *= dim
fan_out = shape[0]
if data_format == "channels_first":
for dim in shape[2:]:
fan_out *= dim
elif data_format == "channels_last":
for dim in shape[1:-1]:
fan_out *= dim
else:
raise NotImplementedError(
"Only support 'channels_first' and 'channels_last' data format"
)
if mode == "fan_avg":
return (float(fan_in) + float(fan_out)) / 2
elif mode == "fan_in":
return float(fan_in)
elif mode == "fan_out":
return float(fan_out)
else:
raise NotImplementedError("Only support 'fan_in', 'fan_out' and 'fan_avg' mode")
def _CalcGain(nonlinearity, negative_slope):
if nonlinearity is None or nonlinearity == "sigmoid":
return 1.0
elif nonlinearity == "tanh":
return 5.0 / 3
elif nonlinearity == "relu":
return math.sqrt(2.0)
elif nonlinearity == "leaky_relu":
return math.sqrt(2.0 / (1 + negative_slope ** 2))
else:
raise NotImplementedError(
"Only support None, 'tanh', 'sigmoid', 'relu' and 'leaky_relu' nonlinearity"
)
_init_map = {}
def register_initializer(flow_initializer):
def deco(func):
_init_map[flow_initializer] = func
return func
return deco
def GetInitializer(initializer_conf, random_seed, var_blob_shape):
f = None
for m in _init_map:
if initializer_conf.HasField(m):
f = _init_map[m]
break
assert f is not None, initializer_conf
return f(getattr(initializer_conf, m), random_seed, var_blob_shape)
@register_initializer("constant_conf")
@register_initializer("constant_int_conf")
def ConstantInitializerImpl(
initializer_conf: Union[
initializer_conf_util.ConstantInitializerConf,
initializer_conf_util.ConstantIntInitializerConf,
],
random_seed: int,
var_blob_shape: Sequence[int],
):
return lambda length: np.full((length,), initializer_conf.value)
@register_initializer("random_normal_conf")
def RandomNormalInitializerImpl(
initializer_conf: initializer_conf_util.RandomNormalInitializerConf,
random_seed: int,
var_blob_shape: Sequence[int],
):
rng = np.random.default_rng(random_seed)
return lambda length: rng.normal(
loc=initializer_conf.mean, scale=initializer_conf.std, size=length
)
@register_initializer("random_uniform_conf")
def RandomUniformInitializerImpl(
initializer_conf: initializer_conf_util.RandomUniformIntInitializerConf,
random_seed: int,
var_blob_shape: Sequence[int],
):
rng = np.random.default_rng(random_seed)
return lambda length: rng.uniform(
low=initializer_conf.min,
high=np.nextafter(initializer_conf.max, float("inf")),
size=length,
)
@register_initializer("random_uniform_int_conf")
def RandomUniformIntInitializerImpl(
initializer_conf: initializer_conf_util.RandomUniformIntInitializerConf,
random_seed: int,
var_blob_shape: Sequence[int],
):
rng = np.random.default_rng(random_seed)
return lambda length: rng.integers(
low=initializer_conf.min, high=initializer_conf.max, size=length
)
def RngTruncatedNormal(mean, std, length, rng):
truncated_value = 2 * std
data = np.empty(length)
generated = 0
ratio = 1.2
while generated < length:
remaining = length - generated
norm = rng.normal(mean, std, size=int(remaining * ratio))
truncated = norm[np.abs(norm - mean) < truncated_value][:remaining]
data[generated : generated + len(truncated)] = truncated
generated += len(truncated)
return data
@register_initializer("truncated_normal_conf")
def TruncatedNormalInitializerImpl(
initializer_conf: initializer_conf_util.TruncatedNormalInitializerConf,
random_seed: int,
var_blob_shape: Sequence[int],
):
rng = np.random.default_rng(random_seed)
return lambda length: RngTruncatedNormal(
initializer_conf.mean, initializer_conf.std, length, rng,
)
def GenInitialFan(initializer_conf, var_blob_shape: Sequence[int]):
variance_norm = initializer_conf.variance_norm
data_format = initializer_conf.data_format
fan_in = np.prod(var_blob_shape[1:]).astype(np.int).item()
fan_out = var_blob_shape[0]
if data_format == "channel_first":
fan_out *= np.prod(var_blob_shape[2:]).astype(np.int).item()
else:
fan_out *= np.prod(var_blob_shape[1:-1]).astype(np.int).item()
if variance_norm == initializer_conf_util.kAverage:
fan = (fan_in + fan_out) / 2
elif variance_norm == initializer_conf_util.kFanIn:
fan = fan_in
elif variance_norm == initializer_conf_util.kFanOut:
fan = fan_out
else:
raise NotImplemented()
return fan
@register_initializer("variance_scaling_conf")
def VarianceScalingInitializerImpl(
initializer_conf: initializer_conf_util.VarianceScalingInitializerConf,
random_seed: int,
var_blob_shape: Sequence[int],
):
scale = initializer_conf.scale / GenInitialFan(initializer_conf, var_blob_shape)
distribution = initializer_conf.distribution
rng = np.random.default_rng(random_seed)
if distribution == initializer_conf_util.kTruncatedNormal:
stddev = math.sqrt(scale) / 0.87962566103423978
return lambda length: RngTruncatedNormal(0, stddev, length, rng)
elif distribution == initializer_conf_util.kRandomNormal:
stddev = math.sqrt(scale)
return lambda length: rng.normal(0, stddev, size=length,)
elif distribution == initializer_conf_util.kRandomUniform:
limit = math.sqrt(3.0 * scale)
return lambda length: rng.uniform(low=-limit, high=limit, size=length)
else:
raise NotImplemented()
@register_initializer("empty_conf")
def EmptyInitializerImpl(
initializer_conf: initializer_conf_util.EmptyInitializerConf,
random_seed: int,
var_blob_shape: Sequence[int],
):
return None
| [
"oneflow.core.job.initializer_conf_pb2.InitializerConf",
"oneflow.core.job.initializer_conf_pb2.EmptyInitializerConf",
"oneflow.python.oneflow_export.oneflow_export",
"oneflow.random_uniform_initializer",
"oneflow.random_normal_initializer"
] | [((935, 970), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""empty_initializer"""'], {}), "('empty_initializer')\n", (949, 970), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((1268, 1306), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""constant_initializer"""'], {}), "('constant_initializer')\n", (1282, 1306), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((3728, 3763), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""zeros_initializer"""'], {}), "('zeros_initializer')\n", (3742, 3763), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((5562, 5596), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""ones_initializer"""'], {}), "('ones_initializer')\n", (5576, 5596), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((7397, 7441), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""random_uniform_initializer"""'], {}), "('random_uniform_initializer')\n", (7411, 7441), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((10305, 10348), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""random_normal_initializer"""'], {}), "('random_normal_initializer')\n", (10319, 10348), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((12924, 12970), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""truncated_normal_initializer"""'], {}), "('truncated_normal_initializer')\n", (12938, 12970), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((15152, 15226), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""glorot_uniform_initializer"""', '"""xavier_uniform_initializer"""'], {}), "('glorot_uniform_initializer', 'xavier_uniform_initializer')\n", (15166, 15226), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((17558, 17630), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""glorot_normal_initializer"""', '"""xavier_normal_initializer"""'], {}), "('glorot_normal_initializer', 'xavier_normal_initializer')\n", (17572, 17630), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((19921, 19967), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""variance_scaling_initializer"""'], {}), "('variance_scaling_initializer')\n", (19935, 19967), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((23385, 23422), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""kaiming_initializer"""'], {}), "('kaiming_initializer')\n", (23399, 23422), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((1092, 1131), 'oneflow.core.job.initializer_conf_pb2.InitializerConf', 'initializer_conf_util.InitializerConf', ([], {}), '()\n', (1129, 1131), True, 'import oneflow.core.job.initializer_conf_pb2 as initializer_conf_util\n'), ((1149, 1193), 'oneflow.core.job.initializer_conf_pb2.EmptyInitializerConf', 'initializer_conf_util.EmptyInitializerConf', ([], {}), '()\n', (1191, 1193), True, 'import oneflow.core.job.initializer_conf_pb2 as initializer_conf_util\n'), ((3321, 3360), 'oneflow.core.job.initializer_conf_pb2.InitializerConf', 'initializer_conf_util.InitializerConf', ([], {}), '()\n', (3358, 3360), True, 'import oneflow.core.job.initializer_conf_pb2 as initializer_conf_util\n'), ((9744, 9783), 'oneflow.core.job.initializer_conf_pb2.InitializerConf', 'initializer_conf_util.InitializerConf', ([], {}), '()\n', (9781, 9783), True, 'import oneflow.core.job.initializer_conf_pb2 as initializer_conf_util\n'), ((12726, 12765), 'oneflow.core.job.initializer_conf_pb2.InitializerConf', 'initializer_conf_util.InitializerConf', ([], {}), '()\n', (12763, 12765), True, 'import oneflow.core.job.initializer_conf_pb2 as initializer_conf_util\n'), ((14949, 14988), 'oneflow.core.job.initializer_conf_pb2.InitializerConf', 'initializer_conf_util.InitializerConf', ([], {}), '()\n', (14986, 14988), True, 'import oneflow.core.job.initializer_conf_pb2 as initializer_conf_util\n'), ((22902, 22941), 'oneflow.core.job.initializer_conf_pb2.InitializerConf', 'initializer_conf_util.InitializerConf', ([], {}), '()\n', (22939, 22941), True, 'import oneflow.core.job.initializer_conf_pb2 as initializer_conf_util\n'), ((27028, 27074), 'functools.reduce', 'functools.reduce', (['(lambda a, b: a * b)', 'shape', '(1)'], {}), '(lambda a, b: a * b, shape, 1)\n', (27044, 27074), False, 'import functools\n'), ((31474, 31508), 'numpy.random.default_rng', 'np.random.default_rng', (['random_seed'], {}), '(random_seed)\n', (31495, 31508), True, 'import numpy as np\n'), ((31856, 31890), 'numpy.random.default_rng', 'np.random.default_rng', (['random_seed'], {}), '(random_seed)\n', (31877, 31890), True, 'import numpy as np\n'), ((32289, 32323), 'numpy.random.default_rng', 'np.random.default_rng', (['random_seed'], {}), '(random_seed)\n', (32310, 32323), True, 'import numpy as np\n'), ((32534, 32550), 'numpy.empty', 'np.empty', (['length'], {}), '(length)\n', (32542, 32550), True, 'import numpy as np\n'), ((33144, 33178), 'numpy.random.default_rng', 'np.random.default_rng', (['random_seed'], {}), '(random_seed)\n', (33165, 33178), True, 'import numpy as np\n'), ((34420, 34454), 'numpy.random.default_rng', 'np.random.default_rng', (['random_seed'], {}), '(random_seed)\n', (34441, 34454), True, 'import numpy as np\n'), ((27464, 27478), 'math.sqrt', 'math.sqrt', (['fan'], {}), '(fan)\n', (27473, 27478), False, 'import math\n'), ((27534, 27574), 'oneflow.random_normal_initializer', 'flow.random_normal_initializer', (['(0.0)', 'std'], {}), '(0.0, std)\n', (27564, 27574), True, 'import oneflow as flow\n'), ((31209, 31251), 'numpy.full', 'np.full', (['(length,)', 'initializer_conf.value'], {}), '((length,), initializer_conf.value)\n', (31216, 31251), True, 'import numpy as np\n'), ((27670, 27716), 'oneflow.random_uniform_initializer', 'flow.random_uniform_initializer', (['(-bound)', 'bound'], {}), '(-bound, bound)\n', (27701, 27716), True, 'import oneflow as flow\n'), ((34535, 34551), 'math.sqrt', 'math.sqrt', (['scale'], {}), '(scale)\n', (34544, 34551), False, 'import math\n'), ((34726, 34742), 'math.sqrt', 'math.sqrt', (['scale'], {}), '(scale)\n', (34735, 34742), False, 'import math\n'), ((27634, 27648), 'math.sqrt', 'math.sqrt', (['(3.0)'], {}), '(3.0)\n', (27643, 27648), False, 'import math\n'), ((30133, 30147), 'math.sqrt', 'math.sqrt', (['(2.0)'], {}), '(2.0)\n', (30142, 30147), False, 'import math\n'), ((34888, 34910), 'math.sqrt', 'math.sqrt', (['(3.0 * scale)'], {}), '(3.0 * scale)\n', (34897, 34910), False, 'import math\n'), ((30202, 30244), 'math.sqrt', 'math.sqrt', (['(2.0 / (1 + negative_slope ** 2))'], {}), '(2.0 / (1 + negative_slope ** 2))\n', (30211, 30244), False, 'import math\n'), ((32745, 32764), 'numpy.abs', 'np.abs', (['(norm - mean)'], {}), '(norm - mean)\n', (32751, 32764), True, 'import numpy as np\n'), ((33478, 33505), 'numpy.prod', 'np.prod', (['var_blob_shape[1:]'], {}), '(var_blob_shape[1:])\n', (33485, 33505), True, 'import numpy as np\n'), ((33618, 33645), 'numpy.prod', 'np.prod', (['var_blob_shape[2:]'], {}), '(var_blob_shape[2:])\n', (33625, 33645), True, 'import numpy as np\n'), ((33697, 33726), 'numpy.prod', 'np.prod', (['var_blob_shape[1:-1]'], {}), '(var_blob_shape[1:-1])\n', (33704, 33726), True, 'import numpy as np\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import os
from collections import OrderedDict
import numpy as np
import oneflow as flow
import tensorflow as tf
from test_util import Args, CompareOpWithTensorFlow, GenArgDict
@flow.unittest.skip_unless_1n4d()
class TestPad(flow.unittest.TestCase):
def test_pad(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["cpu", "gpu"]
arg_dict["flow_op"] = [flow.pad]
arg_dict["tf_op"] = [tf.pad]
arg_dict["input_shape"] = [(2, 2, 1, 3), (1, 1, 2, 3)]
arg_dict["op_args"] = [
Args(
[([1, 2], [0, 0], [1, 2], [1, 1])],
tf.constant([([1, 2], [0, 0], [1, 2], [1, 1])]),
),
Args(
[([0, 0], [30, 0], [0, 1], [1, 0]), 99999999999999999999999999999999],
[
tf.constant(([0, 0], [30, 0], [0, 1], [1, 0])),
"constant",
99999999999999999999999999999999,
],
),
Args(
[([10, 0], [0, 0], [10, 20], [0, 0])],
tf.constant([([10, 0], [0, 0], [10, 20], [0, 0])]),
),
]
for arg in GenArgDict(arg_dict):
CompareOpWithTensorFlow(**arg)
def test_pad_5d(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["cpu", "gpu"]
arg_dict["flow_op"] = [flow.pad]
arg_dict["tf_op"] = [tf.pad]
arg_dict["input_shape"] = [(2, 2, 1, 3, 1), (1, 1, 2, 3, 1)]
arg_dict["op_args"] = [
Args(
[([1, 2], [3, 4], [5, 6], [7, 8], [9, 10])],
tf.constant([([1, 2], [3, 4], [5, 6], [7, 8], [9, 10])]),
),
Args(
[([1, 1], [2, 2], [3, 3], [4, 4], [5, 5])],
tf.constant([([1, 1], [2, 2], [3, 3], [4, 4], [5, 5])]),
),
Args(
[([0, 0], [0, 0], [10, 20], [0, 0], [3, 2])],
tf.constant([([0, 0], [0, 0], [10, 20], [0, 0], [3, 2])]),
),
]
for arg in GenArgDict(arg_dict):
CompareOpWithTensorFlow(**arg)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.unittest.skip_unless_1n4d"
] | [((786, 818), 'oneflow.unittest.skip_unless_1n4d', 'flow.unittest.skip_unless_1n4d', ([], {}), '()\n', (816, 818), True, 'import oneflow as flow\n'), ((2778, 2793), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2791, 2793), False, 'import unittest\n'), ((906, 919), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (917, 919), False, 'from collections import OrderedDict\n'), ((1788, 1808), 'test_util.GenArgDict', 'GenArgDict', (['arg_dict'], {}), '(arg_dict)\n', (1798, 1808), False, 'from test_util import Args, CompareOpWithTensorFlow, GenArgDict\n'), ((1905, 1918), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1916, 1918), False, 'from collections import OrderedDict\n'), ((2680, 2700), 'test_util.GenArgDict', 'GenArgDict', (['arg_dict'], {}), '(arg_dict)\n', (2690, 2700), False, 'from test_util import Args, CompareOpWithTensorFlow, GenArgDict\n'), ((1822, 1852), 'test_util.CompareOpWithTensorFlow', 'CompareOpWithTensorFlow', ([], {}), '(**arg)\n', (1845, 1852), False, 'from test_util import Args, CompareOpWithTensorFlow, GenArgDict\n'), ((2714, 2744), 'test_util.CompareOpWithTensorFlow', 'CompareOpWithTensorFlow', ([], {}), '(**arg)\n', (2737, 2744), False, 'from test_util import Args, CompareOpWithTensorFlow, GenArgDict\n'), ((1228, 1275), 'tensorflow.constant', 'tf.constant', (['[([1, 2], [0, 0], [1, 2], [1, 1])]'], {}), '([([1, 2], [0, 0], [1, 2], [1, 1])])\n', (1239, 1275), True, 'import tensorflow as tf\n'), ((1692, 1742), 'tensorflow.constant', 'tf.constant', (['[([10, 0], [0, 0], [10, 20], [0, 0])]'], {}), '([([10, 0], [0, 0], [10, 20], [0, 0])])\n', (1703, 1742), True, 'import tensorflow as tf\n'), ((2242, 2298), 'tensorflow.constant', 'tf.constant', (['[([1, 2], [3, 4], [5, 6], [7, 8], [9, 10])]'], {}), '([([1, 2], [3, 4], [5, 6], [7, 8], [9, 10])])\n', (2253, 2298), True, 'import tensorflow as tf\n'), ((2409, 2464), 'tensorflow.constant', 'tf.constant', (['[([1, 1], [2, 2], [3, 3], [4, 4], [5, 5])]'], {}), '([([1, 1], [2, 2], [3, 3], [4, 4], [5, 5])])\n', (2420, 2464), True, 'import tensorflow as tf\n'), ((2577, 2634), 'tensorflow.constant', 'tf.constant', (['[([0, 0], [0, 0], [10, 20], [0, 0], [3, 2])]'], {}), '([([0, 0], [0, 0], [10, 20], [0, 0], [3, 2])])\n', (2588, 2634), True, 'import tensorflow as tf\n'), ((1435, 1481), 'tensorflow.constant', 'tf.constant', (['([0, 0], [30, 0], [0, 1], [1, 0])'], {}), '(([0, 0], [30, 0], [0, 1], [1, 0]))\n', (1446, 1481), True, 'import tensorflow as tf\n')] |
import argparse
import math
import oneflow as flow
_GLOBAL_ARGS = None
def get_args():
global _GLOBAL_ARGS
if _GLOBAL_ARGS is None:
_GLOBAL_ARGS = parse_args()
return _GLOBAL_ARGS
def str2bool(v):
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Unsupported value encountered.")
def parse_args(ignore_unknown_args=False):
parser = argparse.ArgumentParser(
description="OneFlow ResNet50 Arguments", allow_abbrev=False
)
parser.add_argument(
"--save",
type=str,
default=None,
dest="save_path",
help="root dir of saving checkpoint",
)
parser.add_argument(
"--save-init",
action="store_true",
dest="save_init",
help="save right on init model finished",
)
parser.add_argument(
"--load",
type=str,
default=None,
dest="load_path",
help="root dir of loading checkpoint",
)
parser.add_argument(
"--ofrecord-path",
type=str,
default="./ofrecord",
dest="ofrecord_path",
help="dataset path",
)
parser.add_argument(
"--ofrecord-part-num",
type=int,
default=1,
dest="ofrecord_part_num",
help="ofrecord data part number",
)
parser.add_argument(
"--use-gpu-decode",
action="store_true",
dest="use_gpu_decode",
help="Use gpu decode.",
)
parser.add_argument(
"--synthetic-data",
action="store_true",
dest="synthetic_data",
help="Use synthetic data",
)
# fuse bn relu or bn add relu
parser.add_argument(
"--fuse-bn-relu",
action="store_true",
dest="fuse_bn_relu",
help="Whether to use use fuse batch_normalization and relu.",
)
parser.add_argument(
"--fuse-bn-add-relu",
action="store_true",
dest="fuse_bn_add_relu",
help="Whether to use use fuse batch_normalization, add and relu.",
)
# training hyper-parameters
parser.add_argument(
"--train-batch-size",
type=int,
default=32,
dest="train_batch_size",
help="train batch size",
)
parser.add_argument(
"--val-batch-size",
type=int,
default=32,
dest="val_batch_size",
help="val batch size",
)
parser.add_argument(
"--train-global-batch-size",
type=int,
default=None,
dest="train_global_batch_size",
help="train batch size",
)
parser.add_argument(
"--val-global-batch-size",
type=int,
default=None,
dest="val_global_batch_size",
help="val batch size",
)
parser.add_argument(
"--num-devices-per-node",
type=int,
default=1,
dest="num_devices_per_node",
help="",
)
parser.add_argument(
"--num-nodes",
type=int,
default=1,
dest="num_nodes",
help="node/machine number for training",
)
parser.add_argument("--lr", type=float, default=0.256, dest="learning_rate")
parser.add_argument("--wd", type=float, default=1.0 / 32768, dest="weight_decay")
parser.add_argument("--momentum", type=float, default=0.875, help="momentum")
parser.add_argument(
"--lr-decay-type",
type=str,
default="cosine",
choices=["none", "cosine", "step"],
dest="lr_decay_type",
help="cosine, step",
)
parser.add_argument(
"--grad-clipping",
type=float,
default=0.0,
dest="grad_clipping",
help="gradient clipping",
)
parser.add_argument(
"--warmup-epochs",
type=int,
default=5,
dest="warmup_epochs",
help="the epochs to warmp-up lr to scaled large-batch value",
)
parser.add_argument("--legacy-init", action="store_true", dest="legacy_init")
parser.add_argument(
"--use-fp16", action="store_true", help="Run model in fp16 mode."
)
parser.add_argument(
"--num-epochs", type=int, default=90, dest="num_epochs", help="number of epochs"
)
parser.add_argument(
"--nccl-fusion-threshold-mb",
type=int,
default=16,
dest="nccl_fusion_threshold_mb",
help="NCCL fusion threshold megabytes, set to 0 to compatible with previous version of OneFlow.",
)
parser.add_argument(
"--nccl-fusion-max-ops",
type=int,
default=24,
dest="nccl_fusion_max_ops",
help="Maximum number of ops of NCCL fusion, set to 0 to compatible with previous version of OneFlow.",
)
parser.add_argument(
"--zero-init-residual",
type=str2bool,
default=True,
nargs="?",
const=True,
dest="zero_init_residual",
)
parser.add_argument(
"--scale-grad",
action="store_true",
dest="scale_grad",
help="scale init grad with world_size",
)
# for data process
parser.add_argument(
"--num-classes",
type=int,
default=1000,
dest="num_classes",
help="num of pic classes",
)
parser.add_argument(
"--channel-last", action="store_true", dest="channel_last",
)
parser.add_argument(
"--samples-per-epoch",
type=int,
default=1281167,
dest="samples_per_epoch",
help="train pic number",
)
parser.add_argument(
"--val-samples-per-epoch",
type=int,
default=50000,
dest="val_samples_per_epoch",
help="validation pic number",
)
parser.add_argument(
"--label-smoothing",
type=float,
default=0.1,
dest="label_smoothing",
help="label smoothing factor",
)
parser.add_argument(
"--batches-per-epoch", type=int, default=None, dest="batches_per_epoch",
)
parser.add_argument(
"--val-batches-per-epoch", type=int, default=None, dest="val_batches_per_epoch",
)
parser.add_argument(
"--total-batches", type=int, default=-1, dest="total_batches",
)
parser.add_argument("--skip-eval", action="store_true", dest="skip_eval")
# log and loss print
parser.add_argument(
"--print-interval",
type=int,
default=100,
dest="print_interval",
help="print loss every n iteration",
)
parser.add_argument(
"--print-timestamp", action="store_true", dest="print_timestamp",
)
parser.add_argument(
"--metric-local",
type=str2bool,
default=True,
nargs="?",
const=True,
dest="metric_local",
)
parser.add_argument(
"--metric-train-acc",
type=str2bool,
default=True,
nargs="?",
const=True,
dest="metric_train_acc",
)
parser.add_argument(
"--gpu-stat-file",
type=str,
default=None,
dest="gpu_stat_file",
help="stat gpu utilization and memory usage when print",
)
parser.add_argument("--graph", action="store_true", help="Run model in graph mode.")
parser.add_argument("--ddp", action="store_true", help="Run model in ddp mode.")
if ignore_unknown_args:
args, _ = parser.parse_known_args()
else:
args = parser.parse_args()
if args.num_nodes > 1:
raise ValueError("NOT support num_nodes > 1")
if args.ddp and args.graph:
raise ValueError("graph and ddp can't be set at the same time")
if args.use_fp16 and not args.graph:
raise ValueError("NOT support fp16 in eager mode")
if args.ddp and not args.metric_local:
raise ValueError("metric_local must be set to True when with ddp")
if args.ddp and args.scale_grad:
raise ValueError("scale_grad is unavailable with ddp")
world_size = flow.env.get_world_size()
if args.train_global_batch_size is None:
args.train_global_batch_size = args.train_batch_size * world_size
else:
assert args.train_global_batch_size % args.train_batch_size == 0
if args.val_global_batch_size is None:
args.val_global_batch_size = args.val_batch_size * world_size
else:
assert args.val_global_batch_size % args.val_batch_size == 0
if args.batches_per_epoch is None:
args.batches_per_epoch = math.ceil(
args.samples_per_epoch // args.train_global_batch_size
)
if args.val_batches_per_epoch is None:
args.val_batches_per_epoch = int(
args.val_samples_per_epoch / args.val_global_batch_size
)
if flow.env.get_rank() == 0:
_print_args(args)
return args
def _print_args(args):
print("------------------------ arguments ------------------------", flush=True)
str_list = []
for arg in vars(args):
dots = "." * (48 - len(arg))
str_list.append(" {} {} {}".format(arg, dots, getattr(args, arg)))
for arg in sorted(str_list, key=lambda x: x.lower()):
print(arg, flush=True)
print("-------------------- end of arguments ---------------------", flush=True)
if __name__ == "__main__":
get_args()
| [
"oneflow.env.get_world_size",
"oneflow.env.get_rank"
] | [((513, 602), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""OneFlow ResNet50 Arguments"""', 'allow_abbrev': '(False)'}), "(description='OneFlow ResNet50 Arguments',\n allow_abbrev=False)\n", (536, 602), False, 'import argparse\n'), ((8042, 8067), 'oneflow.env.get_world_size', 'flow.env.get_world_size', ([], {}), '()\n', (8065, 8067), True, 'import oneflow as flow\n'), ((8536, 8601), 'math.ceil', 'math.ceil', (['(args.samples_per_epoch // args.train_global_batch_size)'], {}), '(args.samples_per_epoch // args.train_global_batch_size)\n', (8545, 8601), False, 'import math\n'), ((8796, 8815), 'oneflow.env.get_rank', 'flow.env.get_rank', ([], {}), '()\n', (8813, 8815), True, 'import oneflow as flow\n'), ((394, 454), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (['"""Unsupported value encountered."""'], {}), "('Unsupported value encountered.')\n", (420, 454), False, 'import argparse\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
import oneflow.experimental as flow
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
".numpy() doesn't work in lazy mode",
)
class TestSqueeze(flow.unittest.TestCase):
def test_squeeze(test_case):
input = flow.Tensor(np.array([[[[1, 1, 1]]]]).astype(np.int32))
of_shape = flow.squeeze(input, dim=[1, 2]).numpy().shape
np_shape = (1, 3)
test_case.assertTrue(np.array_equal(of_shape, np_shape))
def test_tensor_squeeze(test_case):
input = flow.Tensor(np.array([[[[1, 1, 1]]]]).astype(np.int32))
of_shape = input.squeeze(dim=[1, 2]).numpy().shape
np_shape = (1, 3)
test_case.assertTrue(np.array_equal(of_shape, np_shape))
def test_squeeze_int(test_case):
input = flow.Tensor(np.array([[[[1, 1, 1]]]]).astype(np.int32))
of_shape = flow.squeeze(input, 1).numpy().shape
np_shape = (1, 1, 3)
test_case.assertTrue(np.array_equal(of_shape, np_shape))
if __name__ == "__main__":
unittest.main()
| [
"oneflow.experimental.squeeze",
"oneflow.experimental.unittest.env.eager_execution_enabled"
] | [((1639, 1654), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1652, 1654), False, 'import unittest\n'), ((690, 733), 'oneflow.experimental.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (731, 733), True, 'import oneflow.experimental as flow\n'), ((1047, 1081), 'numpy.array_equal', 'np.array_equal', (['of_shape', 'np_shape'], {}), '(of_shape, np_shape)\n', (1061, 1081), True, 'import numpy as np\n'), ((1310, 1344), 'numpy.array_equal', 'np.array_equal', (['of_shape', 'np_shape'], {}), '(of_shape, np_shape)\n', (1324, 1344), True, 'import numpy as np\n'), ((1570, 1604), 'numpy.array_equal', 'np.array_equal', (['of_shape', 'np_shape'], {}), '(of_shape, np_shape)\n', (1584, 1604), True, 'import numpy as np\n'), ((883, 908), 'numpy.array', 'np.array', (['[[[[1, 1, 1]]]]'], {}), '([[[[1, 1, 1]]]])\n', (891, 908), True, 'import numpy as np\n'), ((946, 977), 'oneflow.experimental.squeeze', 'flow.squeeze', (['input'], {'dim': '[1, 2]'}), '(input, dim=[1, 2])\n', (958, 977), True, 'import oneflow.experimental as flow\n'), ((1152, 1177), 'numpy.array', 'np.array', (['[[[[1, 1, 1]]]]'], {}), '([[[[1, 1, 1]]]])\n', (1160, 1177), True, 'import numpy as np\n'), ((1412, 1437), 'numpy.array', 'np.array', (['[[[[1, 1, 1]]]]'], {}), '([[[[1, 1, 1]]]])\n', (1420, 1437), True, 'import numpy as np\n'), ((1475, 1497), 'oneflow.experimental.squeeze', 'flow.squeeze', (['input', '(1)'], {}), '(input, 1)\n', (1487, 1497), True, 'import oneflow.experimental as flow\n')] |
# -*- coding:utf-8 -*-
import sys
import argparse
from data_loader import Market1501, RandomIdentitySampler, ImageDataset
import oneflow as flow
from bisect import bisect_right
import os
import os.path as osp
import numpy as np
from utils.loggers import Logger
from utils.distance import compute_distance_matrix
from loss import TripletLoss, CrossEntropyLossLS
from model import ResReid
from lr_scheduler import WarmupMultiStepLR
def _parse_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--gpu_devices", type=str, default="0")
parser.add_argument("--batch_size", type=int, default=64, required=False)
parser.add_argument("--eval_batch_size", type=int, default=64, required=False)
parser.add_argument("--num_classes", type=int, default=751, required=False)
parser.add_argument("--lr", type=float, default=3.5e-04, required=False)
parser.add_argument("--max_epoch", type=int, default=120, required=False)
parser.add_argument("--step-size", type=list, default=[40, 70], required=False)
parser.add_argument("--weight_t", type=float, default=0.5, required=False)
parser.add_argument("--margin", type=float, default=0.3, required=False)
parser.add_argument("--weight_decay", type=float, default=5e-4, required=False)
parser.add_argument("--adam_beta1", type=float, default=0.9, required=False)
parser.add_argument("--adam_beta2", type=float, default=0.999, required=False)
parser.add_argument(
"--gamma",
type=float,
default=0.1,
required=False,
help="learning rate decay multiplier",
)
parser.add_argument(
"--warmup", action="store_true", default=True, help="warm up lr scheduler"
)
parser.add_argument("--warmup_factor", type=float, default=0.1, required=False)
parser.add_argument("--warmup_iters", type=int, default=10, required=False)
parser.add_argument("--epsilon", type=float, default=0.1, required=False)
parser.add_argument(
"--data_dir",
type=str,
default="./dataset",
required=False,
help="dataset directory",
)
parser.add_argument("--image_height", type=int, default=256, required=False)
parser.add_argument("--image_width", type=int, default=128, required=False)
parser.add_argument(
"--evaluate", action="store_true", default=False, help="train or eval"
)
parser.add_argument("--eval_freq", type=int, default=20, required=False)
parser.add_argument(
"--dist_metric",
type=str,
choices=["euclidean", "cosine"],
default="euclidean",
help="euclidean or cosine",
)
parser.add_argument("--rerank", type=bool, default=False)
parser.add_argument(
"--load_weights",
type=str,
default="./resnet50_pretrained_model",
help="model load directory",
)
parser.add_argument(
"--log_dir",
type=str,
default="./output",
required=False,
help="log info save directory",
)
parser.add_argument(
"--flow_weight",
type=str,
default="./output/flow_weight",
required=False,
help="log info save directory",
)
parser.add_argument("--num_instances", type=int, default=4)
parser.add_argument(
"opts",
default=None,
nargs=argparse.REMAINDER,
help="Modify config options using the command-line",
)
return parser.parse_args()
def main(args):
# log setting
log_name = "log_test.log" if args.evaluate else "log_train.log"
sys.stdout = Logger(osp.join(args.log_dir, log_name))
# cuda setting
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_devices
print("Currently using GPU {}".format(os.environ["CUDA_VISIBLE_DEVICES"]))
print("Building re-id model ")
model = ResReid(args.num_classes)
if args.load_weights:
pretrain_models = flow.load(args.load_weights)
model.load_state_dict(pretrain_models, strict=False)
model = model.to("cuda")
print("=> init dataset")
dataset = Market1501(root=args.data_dir)
if args.evaluate:
evaluate(model, dataset)
else:
optimizer = flow.optim.Adam(
model.parameters(),
lr=args.lr,
weight_decay=args.weight_decay,
betas=(args.adam_beta1, args.adam_beta2),
)
# lr scheduler
if args.warmup:
scheduler = WarmupMultiStepLR(
optimizer,
milestones=args.step_size,
gamma=args.gamma,
warmup_factor=args.warmup_factor,
warmup_iters=args.warmup_iters,
)
else:
scheduler = flow.optim.lr_scheduler.LambdaLR(
optimizer,
lr_lambda=lambda epoch: args.lr ** bisect_right(args.step_size, epoch),
)
train(model, dataset, args.num_classes, optimizer, scheduler)
def train(model, dataset, num_classes, optimizer, scheduler):
batch_size = args.batch_size
is_best = False
best_rank = 0
print("=> Start training")
# loss
criterion_t = TripletLoss(margin=args.margin).to("cuda")
criterion_x = CrossEntropyLossLS(num_classes=num_classes, epsilon=args.epsilon).to(
"cuda"
)
weight_t = args.weight_t
weight_x = 1.0 - args.weight_t
_, train_id, _ = map(list, zip(*dataset.train))
train_dataset = ImageDataset(
dataset.train, flag="train", process_size=(args.image_height, args.image_width)
)
# *****training*******#
for epoch in range(0, args.max_epoch):
# shift to train
model.train()
indicies = [
x for x in RandomIdentitySampler(train_id, batch_size, args.num_instances)
]
for i in range(len(indicies) // batch_size):
try:
# train_batch[0,1,2] are [imgs, pid, cam_id]
imgs, pids, _ = train_dataset.__getbatch__(
indicies[i * batch_size : (i + 1) * batch_size]
)
except:
imgs, pids, _ = train_dataset.__getbatch__(indicies[-batch_size:])
imgs = flow.Tensor(np.array(imgs)).to("cuda")
pids = flow.Tensor(np.array(pids), dtype=flow.int32).to("cuda")
outputs, features = model(imgs)
loss_t = compute_loss(criterion_t, features, pids)
loss_x = compute_loss(criterion_x, outputs, pids)
loss = weight_t * loss_t + weight_x * loss_x
loss.backward()
optimizer.step()
optimizer.zero_grad()
scheduler.step()
print(
"epoch:",
epoch + 1,
"loss_t:",
loss_t.numpy()[0],
"loss_x:",
loss_x.numpy()[0],
"loss:",
loss.numpy()[0],
"lr:",
optimizer.param_groups[0]["lr"],
)
# *****testing********#
if (epoch + 1) % args.eval_freq == 0 and (epoch + 1) != args.max_epoch:
rank1, mAP = evaluate(model, dataset)
if (rank1 + mAP) / 2.0 > best_rank:
is_best = True
else:
is_best = False
if is_best:
flow.save(model.state_dict(), args.flow_weight + "_" + str(epoch))
print("=> End training")
print("=> Final test")
rank1, _ = evaluate(model, dataset)
flow.save(model.state_dict(), args.flow_weight)
def compute_loss(criterion, outputs, targets):
if isinstance(outputs, (tuple, list)):
loss = DeepSupervision(criterion, outputs, targets)
else:
loss = criterion(outputs, targets)
return loss
def DeepSupervision(criterion, xs, y):
"""DeepSupervision
Applies criterion to each element in a list.
Args:
criterion: loss function
xs: tuple of inputs
y: ground truth
"""
loss = 0.0
for x in xs:
loss += criterion(x, y)
loss /= len(xs)
return loss
def evaluate(model, dataset):
query_dataset = ImageDataset(
dataset.query, flag="test", process_size=(args.image_height, args.image_width)
)
gallery_dataset = ImageDataset(
dataset.gallery, flag="test", process_size=(args.image_height, args.image_width)
)
eval_batch = args.eval_batch_size
model.eval()
dist_metric = args.dist_metric # distance metric, ['euclidean', 'cosine']
rerank = args.rerank # use person re-ranking
save_dir = args.log_dir
print("Extracting features from query set ...")
# query features, query person IDs and query camera IDs
qf, q_pids, q_camids = [], [], []
q_ind = list(range(len(query_dataset)))
for i in range((len(query_dataset) // eval_batch)):
imgs, pids, camids = query_dataset.__getbatch__(
q_ind[i * eval_batch : (i + 1) * eval_batch]
)
imgs = flow.Tensor(np.array(imgs)).to("cuda")
with flow.no_grad():
features = model(imgs)
qf.append(features.numpy())
q_pids.extend(pids)
q_camids.extend(camids)
qf = np.concatenate(qf, 0)
q_pids = np.asarray(q_pids)
q_camids = np.asarray(q_camids)
print("Done, obtained {}-by-{} matrix".format(qf.shape[0], qf.shape[1]))
print("Extracting features from gallery set ...")
# gallery features, gallery person IDs and gallery camera IDs
gf, g_pids, g_camids = [], [], []
g_ind = list(range(len(gallery_dataset)))
for i in range((len(gallery_dataset) // eval_batch)):
imgs, pids, camids = gallery_dataset.__getbatch__(
g_ind[i * eval_batch : (i + 1) * eval_batch]
)
imgs = flow.Tensor(np.array(imgs)).to("cuda")
with flow.no_grad():
features = model(imgs)
gf.append(features.numpy())
g_pids.extend(pids)
g_camids.extend(camids)
gf = np.concatenate(gf, 0)
g_pids = np.asarray(g_pids)
g_camids = np.asarray(g_camids)
print("Done, obtained {}-by-{} matrix".format(gf.shape[0], gf.shape[1]))
print("Computing distance matrix with metric={} ...".format(dist_metric))
distmat = compute_distance_matrix(qf, gf, dist_metric)
if rerank:
print("Applying person re-ranking ...")
distmat_qq = compute_distance_matrix(qf, qf, dist_metric)
distmat_gg = compute_distance_matrix(gf, gf, dist_metric)
distmat = re_ranking(distmat, distmat_qq, distmat_gg)
print("Computing CMC and mAP ...")
cmc, mAP = _eval(distmat, q_pids, g_pids, q_camids, g_camids)
print("=".ljust(30, "=") + " Result " + "=".ljust(30, "="))
print("mAP: {:.1%}".format(mAP))
print("CMC curve")
for r in [1, 5, 10]:
print("Rank-{:<3}: {:.1%}".format(r, cmc[r - 1]))
print("=".ljust(66, "="))
return cmc[0], mAP
def _eval(distmat, q_pids, g_pids, q_camids, g_camids, max_rank=50):
"""Evaluation with market1501 metric
Key: for each query identity, its gallery images from the same camera view are discarded.
"""
num_q, num_g = distmat.shape
if num_g < max_rank:
max_rank = num_g
print("Note: number of gallery samples is quite small, got {}".format(num_g))
indices = np.argsort(distmat, axis=1)
matches = (g_pids[indices] == q_pids[:, np.newaxis]).astype(np.int32)
# compute cmc curve for each query
all_cmc = []
all_AP = []
num_valid_q = 0.0 # number of valid query
for q_idx in range(num_q):
# get query pid and camid
q_pid = q_pids[q_idx]
q_camid = q_camids[q_idx]
# remove gallery samples that have the same pid and camid with query
order = indices[q_idx]
remove = (g_pids[order] == q_pid) & (g_camids[order] == q_camid)
keep = np.invert(remove)
# compute cmc curve
# binary vector, positions with value 1 are correct matches
raw_cmc = matches[q_idx][keep]
if not np.any(raw_cmc):
# this condition is true when query identity does not appear in gallery
continue
cmc = raw_cmc.cumsum()
cmc[cmc > 1] = 1
all_cmc.append(cmc[:max_rank])
num_valid_q += 1.0
# compute average precision
# reference: https://en.wikipedia.org/wiki/Evaluation_measures_(information_retrieval)#Average_precision
num_rel = raw_cmc.sum()
tmp_cmc = raw_cmc.cumsum()
tmp_cmc = [x / (i + 1.0) for i, x in enumerate(tmp_cmc)]
tmp_cmc = np.asarray(tmp_cmc) * raw_cmc
AP = tmp_cmc.sum() / num_rel
all_AP.append(AP)
assert num_valid_q > 0, "Error: all query identities do not appear in gallery"
all_cmc = np.asarray(all_cmc).astype(np.float32)
all_cmc = all_cmc.sum(0) / num_valid_q
mAP = np.mean(all_AP)
return all_cmc, mAP
def re_ranking(q_g_dist, q_q_dist, g_g_dist, k1=20, k2=6, lambda_value=0.3):
# The following naming, e.g. gallery_num, is different from outer scope.
# Don't care about it.
original_dist = np.concatenate(
[
np.concatenate([q_q_dist, q_g_dist], axis=1),
np.concatenate([q_g_dist.T, g_g_dist], axis=1),
],
axis=0,
)
original_dist = np.power(original_dist, 2).astype(np.float32)
original_dist = np.transpose(1.0 * original_dist / np.max(original_dist, axis=0))
V = np.zeros_like(original_dist).astype(np.float32)
initial_rank = np.argsort(original_dist).astype(np.int32)
query_num = q_g_dist.shape[0]
gallery_num = q_g_dist.shape[0] + q_g_dist.shape[1]
all_num = gallery_num
for i in range(all_num):
# k-reciprocal neighbors
forward_k_neigh_index = initial_rank[i, : k1 + 1]
backward_k_neigh_index = initial_rank[forward_k_neigh_index, : k1 + 1]
fi = np.where(backward_k_neigh_index == i)[0]
k_reciprocal_index = forward_k_neigh_index[fi]
k_reciprocal_expansion_index = k_reciprocal_index
for j in range(len(k_reciprocal_index)):
candidate = k_reciprocal_index[j]
candidate_forward_k_neigh_index = initial_rank[
candidate, : int(np.around(k1 / 2.0)) + 1
]
candidate_backward_k_neigh_index = initial_rank[
candidate_forward_k_neigh_index, : int(np.around(k1 / 2.0)) + 1
]
fi_candidate = np.where(candidate_backward_k_neigh_index == candidate)[0]
candidate_k_reciprocal_index = candidate_forward_k_neigh_index[fi_candidate]
if len(
np.intersect1d(candidate_k_reciprocal_index, k_reciprocal_index)
) > 2.0 / 3 * len(candidate_k_reciprocal_index):
k_reciprocal_expansion_index = np.append(
k_reciprocal_expansion_index, candidate_k_reciprocal_index
)
k_reciprocal_expansion_index = np.unique(k_reciprocal_expansion_index)
weight = np.exp(-original_dist[i, k_reciprocal_expansion_index])
V[i, k_reciprocal_expansion_index] = 1.0 * weight / np.sum(weight)
original_dist = original_dist[
:query_num,
]
if k2 != 1:
V_qe = np.zeros_like(V, dtype=np.float32)
for i in range(all_num):
V_qe[i, :] = np.mean(V[initial_rank[i, :k2], :], axis=0)
V = V_qe
del V_qe
del initial_rank
invIndex = []
for i in range(gallery_num):
invIndex.append(np.where(V[:, i] != 0)[0])
jaccard_dist = np.zeros_like(original_dist, dtype=np.float32)
# get jaccard_dist
for i in range(query_num):
temp_min = np.zeros(shape=[1, gallery_num], dtype=np.float32)
indNonZero = np.where(V[i, :] != 0)[0] # q_i's k-reciprocal index
indImages = [invIndex[ind] for ind in indNonZero] #
for j in range(len(indNonZero)):
temp_min[0, indImages[j]] = temp_min[0, indImages[j]] + np.minimum(
V[i, indNonZero[j]], V[indImages[j], indNonZero[j]] # V_pigj, V_gigj
)
jaccard_dist[i] = 1 - temp_min / (2.0 - temp_min)
final_dist = jaccard_dist * (1 - lambda_value) + original_dist * lambda_value
del original_dist
del V
del jaccard_dist
final_dist = final_dist[:query_num, query_num:]
return final_dist
if __name__ == "__main__":
args = _parse_args()
main(args)
| [
"oneflow.load",
"oneflow.no_grad"
] | [((465, 544), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), '(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n', (488, 544), False, 'import argparse\n'), ((3899, 3924), 'model.ResReid', 'ResReid', (['args.num_classes'], {}), '(args.num_classes)\n', (3906, 3924), False, 'from model import ResReid\n'), ((4142, 4172), 'data_loader.Market1501', 'Market1501', ([], {'root': 'args.data_dir'}), '(root=args.data_dir)\n', (4152, 4172), False, 'from data_loader import Market1501, RandomIdentitySampler, ImageDataset\n'), ((5506, 5603), 'data_loader.ImageDataset', 'ImageDataset', (['dataset.train'], {'flag': '"""train"""', 'process_size': '(args.image_height, args.image_width)'}), "(dataset.train, flag='train', process_size=(args.image_height,\n args.image_width))\n", (5518, 5603), False, 'from data_loader import Market1501, RandomIdentitySampler, ImageDataset\n'), ((8139, 8235), 'data_loader.ImageDataset', 'ImageDataset', (['dataset.query'], {'flag': '"""test"""', 'process_size': '(args.image_height, args.image_width)'}), "(dataset.query, flag='test', process_size=(args.image_height,\n args.image_width))\n", (8151, 8235), False, 'from data_loader import Market1501, RandomIdentitySampler, ImageDataset\n'), ((8268, 8366), 'data_loader.ImageDataset', 'ImageDataset', (['dataset.gallery'], {'flag': '"""test"""', 'process_size': '(args.image_height, args.image_width)'}), "(dataset.gallery, flag='test', process_size=(args.image_height,\n args.image_width))\n", (8280, 8366), False, 'from data_loader import Market1501, RandomIdentitySampler, ImageDataset\n'), ((9190, 9211), 'numpy.concatenate', 'np.concatenate', (['qf', '(0)'], {}), '(qf, 0)\n', (9204, 9211), True, 'import numpy as np\n'), ((9225, 9243), 'numpy.asarray', 'np.asarray', (['q_pids'], {}), '(q_pids)\n', (9235, 9243), True, 'import numpy as np\n'), ((9259, 9279), 'numpy.asarray', 'np.asarray', (['q_camids'], {}), '(q_camids)\n', (9269, 9279), True, 'import numpy as np\n'), ((9972, 9993), 'numpy.concatenate', 'np.concatenate', (['gf', '(0)'], {}), '(gf, 0)\n', (9986, 9993), True, 'import numpy as np\n'), ((10007, 10025), 'numpy.asarray', 'np.asarray', (['g_pids'], {}), '(g_pids)\n', (10017, 10025), True, 'import numpy as np\n'), ((10041, 10061), 'numpy.asarray', 'np.asarray', (['g_camids'], {}), '(g_camids)\n', (10051, 10061), True, 'import numpy as np\n'), ((10232, 10276), 'utils.distance.compute_distance_matrix', 'compute_distance_matrix', (['qf', 'gf', 'dist_metric'], {}), '(qf, gf, dist_metric)\n', (10255, 10276), False, 'from utils.distance import compute_distance_matrix\n'), ((11302, 11329), 'numpy.argsort', 'np.argsort', (['distmat'], {'axis': '(1)'}), '(distmat, axis=1)\n', (11312, 11329), True, 'import numpy as np\n'), ((12850, 12865), 'numpy.mean', 'np.mean', (['all_AP'], {}), '(all_AP)\n', (12857, 12865), True, 'import numpy as np\n'), ((15534, 15580), 'numpy.zeros_like', 'np.zeros_like', (['original_dist'], {'dtype': 'np.float32'}), '(original_dist, dtype=np.float32)\n', (15547, 15580), True, 'import numpy as np\n'), ((3660, 3692), 'os.path.join', 'osp.join', (['args.log_dir', 'log_name'], {}), '(args.log_dir, log_name)\n', (3668, 3692), True, 'import os.path as osp\n'), ((3978, 4006), 'oneflow.load', 'flow.load', (['args.load_weights'], {}), '(args.load_weights)\n', (3987, 4006), True, 'import oneflow as flow\n'), ((10362, 10406), 'utils.distance.compute_distance_matrix', 'compute_distance_matrix', (['qf', 'qf', 'dist_metric'], {}), '(qf, qf, dist_metric)\n', (10385, 10406), False, 'from utils.distance import compute_distance_matrix\n'), ((10428, 10472), 'utils.distance.compute_distance_matrix', 'compute_distance_matrix', (['gf', 'gf', 'dist_metric'], {}), '(gf, gf, dist_metric)\n', (10451, 10472), False, 'from utils.distance import compute_distance_matrix\n'), ((11851, 11868), 'numpy.invert', 'np.invert', (['remove'], {}), '(remove)\n', (11860, 11868), True, 'import numpy as np\n'), ((14940, 14979), 'numpy.unique', 'np.unique', (['k_reciprocal_expansion_index'], {}), '(k_reciprocal_expansion_index)\n', (14949, 14979), True, 'import numpy as np\n'), ((14997, 15052), 'numpy.exp', 'np.exp', (['(-original_dist[i, k_reciprocal_expansion_index])'], {}), '(-original_dist[i, k_reciprocal_expansion_index])\n', (15003, 15052), True, 'import numpy as np\n'), ((15220, 15254), 'numpy.zeros_like', 'np.zeros_like', (['V'], {'dtype': 'np.float32'}), '(V, dtype=np.float32)\n', (15233, 15254), True, 'import numpy as np\n'), ((15655, 15705), 'numpy.zeros', 'np.zeros', ([], {'shape': '[1, gallery_num]', 'dtype': 'np.float32'}), '(shape=[1, gallery_num], dtype=np.float32)\n', (15663, 15705), True, 'import numpy as np\n'), ((4512, 4655), 'lr_scheduler.WarmupMultiStepLR', 'WarmupMultiStepLR', (['optimizer'], {'milestones': 'args.step_size', 'gamma': 'args.gamma', 'warmup_factor': 'args.warmup_factor', 'warmup_iters': 'args.warmup_iters'}), '(optimizer, milestones=args.step_size, gamma=args.gamma,\n warmup_factor=args.warmup_factor, warmup_iters=args.warmup_iters)\n', (4529, 4655), False, 'from lr_scheduler import WarmupMultiStepLR\n'), ((5217, 5248), 'loss.TripletLoss', 'TripletLoss', ([], {'margin': 'args.margin'}), '(margin=args.margin)\n', (5228, 5248), False, 'from loss import TripletLoss, CrossEntropyLossLS\n'), ((5278, 5343), 'loss.CrossEntropyLossLS', 'CrossEntropyLossLS', ([], {'num_classes': 'num_classes', 'epsilon': 'args.epsilon'}), '(num_classes=num_classes, epsilon=args.epsilon)\n', (5296, 5343), False, 'from loss import TripletLoss, CrossEntropyLossLS\n'), ((9032, 9046), 'oneflow.no_grad', 'flow.no_grad', ([], {}), '()\n', (9044, 9046), True, 'import oneflow as flow\n'), ((9815, 9829), 'oneflow.no_grad', 'flow.no_grad', ([], {}), '()\n', (9827, 9829), True, 'import oneflow as flow\n'), ((12020, 12035), 'numpy.any', 'np.any', (['raw_cmc'], {}), '(raw_cmc)\n', (12026, 12035), True, 'import numpy as np\n'), ((12566, 12585), 'numpy.asarray', 'np.asarray', (['tmp_cmc'], {}), '(tmp_cmc)\n', (12576, 12585), True, 'import numpy as np\n'), ((12758, 12777), 'numpy.asarray', 'np.asarray', (['all_cmc'], {}), '(all_cmc)\n', (12768, 12777), True, 'import numpy as np\n'), ((13133, 13177), 'numpy.concatenate', 'np.concatenate', (['[q_q_dist, q_g_dist]'], {'axis': '(1)'}), '([q_q_dist, q_g_dist], axis=1)\n', (13147, 13177), True, 'import numpy as np\n'), ((13191, 13237), 'numpy.concatenate', 'np.concatenate', (['[q_g_dist.T, g_g_dist]'], {'axis': '(1)'}), '([q_g_dist.T, g_g_dist], axis=1)\n', (13205, 13237), True, 'import numpy as np\n'), ((13292, 13318), 'numpy.power', 'np.power', (['original_dist', '(2)'], {}), '(original_dist, 2)\n', (13300, 13318), True, 'import numpy as np\n'), ((13393, 13422), 'numpy.max', 'np.max', (['original_dist'], {'axis': '(0)'}), '(original_dist, axis=0)\n', (13399, 13422), True, 'import numpy as np\n'), ((13432, 13460), 'numpy.zeros_like', 'np.zeros_like', (['original_dist'], {}), '(original_dist)\n', (13445, 13460), True, 'import numpy as np\n'), ((13499, 13524), 'numpy.argsort', 'np.argsort', (['original_dist'], {}), '(original_dist)\n', (13509, 13524), True, 'import numpy as np\n'), ((13872, 13909), 'numpy.where', 'np.where', (['(backward_k_neigh_index == i)'], {}), '(backward_k_neigh_index == i)\n', (13880, 13909), True, 'import numpy as np\n'), ((15113, 15127), 'numpy.sum', 'np.sum', (['weight'], {}), '(weight)\n', (15119, 15127), True, 'import numpy as np\n'), ((15313, 15356), 'numpy.mean', 'np.mean', (['V[initial_rank[i, :k2], :]'], {'axis': '(0)'}), '(V[initial_rank[i, :k2], :], axis=0)\n', (15320, 15356), True, 'import numpy as np\n'), ((15727, 15749), 'numpy.where', 'np.where', (['(V[i, :] != 0)'], {}), '(V[i, :] != 0)\n', (15735, 15749), True, 'import numpy as np\n'), ((5776, 5839), 'data_loader.RandomIdentitySampler', 'RandomIdentitySampler', (['train_id', 'batch_size', 'args.num_instances'], {}), '(train_id, batch_size, args.num_instances)\n', (5797, 5839), False, 'from data_loader import Market1501, RandomIdentitySampler, ImageDataset\n'), ((14435, 14490), 'numpy.where', 'np.where', (['(candidate_backward_k_neigh_index == candidate)'], {}), '(candidate_backward_k_neigh_index == candidate)\n', (14443, 14490), True, 'import numpy as np\n'), ((14792, 14861), 'numpy.append', 'np.append', (['k_reciprocal_expansion_index', 'candidate_k_reciprocal_index'], {}), '(k_reciprocal_expansion_index, candidate_k_reciprocal_index)\n', (14801, 14861), True, 'import numpy as np\n'), ((15487, 15509), 'numpy.where', 'np.where', (['(V[:, i] != 0)'], {}), '(V[:, i] != 0)\n', (15495, 15509), True, 'import numpy as np\n'), ((15951, 16014), 'numpy.minimum', 'np.minimum', (['V[i, indNonZero[j]]', 'V[indImages[j], indNonZero[j]]'], {}), '(V[i, indNonZero[j]], V[indImages[j], indNonZero[j]])\n', (15961, 16014), True, 'import numpy as np\n'), ((8992, 9006), 'numpy.array', 'np.array', (['imgs'], {}), '(imgs)\n', (9000, 9006), True, 'import numpy as np\n'), ((9774, 9788), 'numpy.array', 'np.array', (['imgs'], {}), '(imgs)\n', (9782, 9788), True, 'import numpy as np\n'), ((14619, 14683), 'numpy.intersect1d', 'np.intersect1d', (['candidate_k_reciprocal_index', 'k_reciprocal_index'], {}), '(candidate_k_reciprocal_index, k_reciprocal_index)\n', (14633, 14683), True, 'import numpy as np\n'), ((6261, 6275), 'numpy.array', 'np.array', (['imgs'], {}), '(imgs)\n', (6269, 6275), True, 'import numpy as np\n'), ((6319, 6333), 'numpy.array', 'np.array', (['pids'], {}), '(pids)\n', (6327, 6333), True, 'import numpy as np\n'), ((4897, 4932), 'bisect.bisect_right', 'bisect_right', (['args.step_size', 'epoch'], {}), '(args.step_size, epoch)\n', (4909, 4932), False, 'from bisect import bisect_right\n'), ((14214, 14233), 'numpy.around', 'np.around', (['(k1 / 2.0)'], {}), '(k1 / 2.0)\n', (14223, 14233), True, 'import numpy as np\n'), ((14369, 14388), 'numpy.around', 'np.around', (['(k1 / 2.0)'], {}), '(k1 / 2.0)\n', (14378, 14388), True, 'import numpy as np\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from collections import OrderedDict
import numpy as np
import oneflow as flow
import tensorflow as tf
import test_global_storage
from test_util import GenArgList
gpus = tf.config.experimental.list_physical_devices("GPU")
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
def compare_with_tensorflow(
device_type,
x_shape,
filters,
kernel_size,
groups,
of_padding="SAME",
tf_padding="SAME",
stride=1,
data_format="NCDHW",
dilation=1,
):
assert device_type in ["gpu", "cpu"]
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
func_config.cudnn_conv_heuristic_search_algo(False)
if data_format == "NCW":
xy_data_transpose = (0, 2, 1)
weight_data_transpose = (2, 1, 0)
else:
xy_data_transpose = (0, 1, 2)
weight_data_transpose = (1, 2, 0)
@flow.global_function(type="train", function_config=func_config)
def ConvJob():
with flow.scope.placement(device_type, "0:0"):
x = flow.get_variable(
"x",
shape=x_shape,
dtype=flow.float,
initializer=flow.random_uniform_initializer(minval=0, maxval=100),
trainable=True,
)
if data_format == "NCW":
weight_shape = (filters, x.shape[1] // groups, kernel_size)
else:
weight_shape = (filters, kernel_size, x.shape[2] // groups)
weight = flow.get_variable(
"conv-weight",
shape=weight_shape,
dtype=flow.float,
initializer=flow.random_uniform_initializer(minval=0, maxval=100),
)
loss = flow.nn.conv1d(
x,
weight,
strides=[stride],
padding=of_padding,
data_format=data_format,
dilations=dilation,
groups=groups,
)
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [1e-4]), momentum=0
).minimize(loss)
flow.watch(x, test_global_storage.Setter("x"))
flow.watch_diff(x, test_global_storage.Setter("x_diff"))
flow.watch(weight, test_global_storage.Setter("weight"))
flow.watch_diff(weight, test_global_storage.Setter("weight_diff"))
flow.watch(loss, test_global_storage.Setter("loss"))
flow.watch_diff(loss, test_global_storage.Setter("loss_diff"))
return loss
# OneFlow
check_point = flow.train.CheckPoint()
check_point.init()
of_out = ConvJob().get()
# TensorFlow
with tf.GradientTape(persistent=True) as tape:
x = tf.Variable(test_global_storage.Get("x").transpose(xy_data_transpose))
assert groups > 0
assert x_shape[1] % groups == 0
assert filters % groups == 0
weight = tf.Variable(
test_global_storage.Get("weight").transpose(weight_data_transpose)
)
tf_out = tf.nn.conv1d(
x,
weight,
stride=[1, stride, 1],
padding=tf_padding,
data_format="NWC",
dilations=[1, dilation, 1],
)
loss_diff = test_global_storage.Get("loss_diff").transpose(xy_data_transpose)
tf_x_diff = tape.gradient(tf_out, x, loss_diff)
tf_weight_diff = tape.gradient(tf_out, weight, loss_diff)
assert np.allclose(
of_out.numpy().transpose(xy_data_transpose),
tf_out.numpy(),
rtol=1e-5,
atol=1e-5,
)
assert np.allclose(
test_global_storage.Get("x_diff").transpose(xy_data_transpose),
tf_x_diff.numpy(),
rtol=1e-4,
atol=1e-4,
)
assert np.allclose(
test_global_storage.Get("weight_diff").transpose(weight_data_transpose),
tf_weight_diff.numpy(),
rtol=1e-5,
atol=1e-5,
)
def test_padding_valid(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["cpu", "gpu"]
arg_dict["x_shape"] = [(10, 32, 10)]
arg_dict["filters"] = [64]
arg_dict["kernel_size"] = [3, 2]
arg_dict["groups"] = [1]
arg_dict["of_padding"] = ["VALID"]
arg_dict["tf_padding"] = ["VALID"]
arg_dict["stride"] = [2]
arg_dict["data_format"] = ["NCW", "NWC"]
arg_dict["dilation"] = [2]
for arg in GenArgList(arg_dict):
compare_with_tensorflow(*arg)
def test_padding_same(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["x_shape"] = [(10, 32, 11)]
arg_dict["filters"] = [64]
arg_dict["kernel_size"] = [2]
arg_dict["groups"] = [1]
arg_dict["of_padding"] = ["SAME_UPPER"]
arg_dict["tf_padding"] = ["SAME"]
arg_dict["stride"] = [2]
arg_dict["data_format"] = ["NCW"]
arg_dict["dilation"] = [1]
for arg in GenArgList(arg_dict):
compare_with_tensorflow(*arg)
| [
"oneflow.clear_default_session",
"oneflow.global_function",
"oneflow.train.CheckPoint",
"oneflow.scope.placement",
"oneflow.nn.conv1d",
"oneflow.optimizer.PiecewiseConstantScheduler",
"oneflow.random_uniform_initializer",
"oneflow.FunctionConfig",
"oneflow.scope.consistent_view"
] | [((771, 822), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (815, 822), True, 'import tensorflow as tf\n'), ((844, 895), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['gpu', '(True)'], {}), '(gpu, True)\n', (884, 895), True, 'import tensorflow as tf\n'), ((1148, 1176), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (1174, 1176), True, 'import oneflow as flow\n'), ((1195, 1216), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (1214, 1216), True, 'import oneflow as flow\n'), ((1592, 1655), 'oneflow.global_function', 'flow.global_function', ([], {'type': '"""train"""', 'function_config': 'func_config'}), "(type='train', function_config=func_config)\n", (1612, 1655), True, 'import oneflow as flow\n'), ((3313, 3336), 'oneflow.train.CheckPoint', 'flow.train.CheckPoint', ([], {}), '()\n', (3334, 3336), True, 'import oneflow as flow\n'), ((4719, 4732), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4730, 4732), False, 'from collections import OrderedDict\n'), ((5114, 5134), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (5124, 5134), False, 'from test_util import GenArgList\n'), ((5225, 5238), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5236, 5238), False, 'from collections import OrderedDict\n'), ((5614, 5634), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (5624, 5634), False, 'from test_util import GenArgList\n'), ((1301, 1329), 'oneflow.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (1327, 1329), True, 'import oneflow as flow\n'), ((3415, 3447), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {'persistent': '(True)'}), '(persistent=True)\n', (3430, 3447), True, 'import tensorflow as tf\n'), ((3780, 3897), 'tensorflow.nn.conv1d', 'tf.nn.conv1d', (['x', 'weight'], {'stride': '[1, stride, 1]', 'padding': 'tf_padding', 'data_format': '"""NWC"""', 'dilations': '[1, dilation, 1]'}), "(x, weight, stride=[1, stride, 1], padding=tf_padding,\n data_format='NWC', dilations=[1, dilation, 1])\n", (3792, 3897), True, 'import tensorflow as tf\n'), ((1688, 1728), 'oneflow.scope.placement', 'flow.scope.placement', (['device_type', '"""0:0"""'], {}), "(device_type, '0:0')\n", (1708, 1728), True, 'import oneflow as flow\n'), ((2444, 2572), 'oneflow.nn.conv1d', 'flow.nn.conv1d', (['x', 'weight'], {'strides': '[stride]', 'padding': 'of_padding', 'data_format': 'data_format', 'dilations': 'dilation', 'groups': 'groups'}), '(x, weight, strides=[stride], padding=of_padding, data_format\n =data_format, dilations=dilation, groups=groups)\n', (2458, 2572), True, 'import oneflow as flow\n'), ((3994, 4030), 'test_global_storage.Get', 'test_global_storage.Get', (['"""loss_diff"""'], {}), "('loss_diff')\n", (4017, 4030), False, 'import test_global_storage\n'), ((2865, 2896), 'test_global_storage.Setter', 'test_global_storage.Setter', (['"""x"""'], {}), "('x')\n", (2891, 2896), False, 'import test_global_storage\n'), ((2929, 2965), 'test_global_storage.Setter', 'test_global_storage.Setter', (['"""x_diff"""'], {}), "('x_diff')\n", (2955, 2965), False, 'import test_global_storage\n'), ((2998, 3034), 'test_global_storage.Setter', 'test_global_storage.Setter', (['"""weight"""'], {}), "('weight')\n", (3024, 3034), False, 'import test_global_storage\n'), ((3072, 3113), 'test_global_storage.Setter', 'test_global_storage.Setter', (['"""weight_diff"""'], {}), "('weight_diff')\n", (3098, 3113), False, 'import test_global_storage\n'), ((3144, 3178), 'test_global_storage.Setter', 'test_global_storage.Setter', (['"""loss"""'], {}), "('loss')\n", (3170, 3178), False, 'import test_global_storage\n'), ((3214, 3253), 'test_global_storage.Setter', 'test_global_storage.Setter', (['"""loss_diff"""'], {}), "('loss_diff')\n", (3240, 3253), False, 'import test_global_storage\n'), ((4351, 4384), 'test_global_storage.Get', 'test_global_storage.Get', (['"""x_diff"""'], {}), "('x_diff')\n", (4374, 4384), False, 'import test_global_storage\n'), ((4518, 4556), 'test_global_storage.Get', 'test_global_storage.Get', (['"""weight_diff"""'], {}), "('weight_diff')\n", (4541, 4556), False, 'import test_global_storage\n'), ((1879, 1932), 'oneflow.random_uniform_initializer', 'flow.random_uniform_initializer', ([], {'minval': '(0)', 'maxval': '(100)'}), '(minval=0, maxval=100)\n', (1910, 1932), True, 'import oneflow as flow\n'), ((2356, 2409), 'oneflow.random_uniform_initializer', 'flow.random_uniform_initializer', ([], {'minval': '(0)', 'maxval': '(100)'}), '(minval=0, maxval=100)\n', (2387, 2409), True, 'import oneflow as flow\n'), ((3481, 3509), 'test_global_storage.Get', 'test_global_storage.Get', (['"""x"""'], {}), "('x')\n", (3504, 3509), False, 'import test_global_storage\n'), ((3685, 3718), 'test_global_storage.Get', 'test_global_storage.Get', (['"""weight"""'], {}), "('weight')\n", (3708, 3718), False, 'import test_global_storage\n'), ((2743, 2798), 'oneflow.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[0.0001]'], {}), '([], [0.0001])\n', (2784, 2798), True, 'import oneflow as flow\n')] |
import json
import os
import shutil
import sys
import oneflow as flow
import numpy as np
import torch
import transformers
from base_weight_utils import colored_string
from roberta_weight_utils import (
Roberta_trans,
RobertaForMaskedLM_trans,
RobertaForSequenceClassification_trans
)
sys.path.append("../")
from models.roberta import (
Roberta,
RobertaForMaskedLM,
RobertaForSequenceClassification
)
# model saves in `./save_dir/model_dir/weights`
# parameters saved in `./save_dir/model_dir/parameters.json`
class BaseTransform:
def __init__(self, model_flow, model_torch, pretrained_model, save_dir, model_dir, trans_func, cuda=False):
self.save_dir = os.path.join(save_dir, model_dir)
self.weights_dir = os.path.join(self.save_dir, "weights")
self.param_path = os.path.join(self.save_dir, "parameters.json")
self.pretrained_model = pretrained_model
self.config = transformers.RobertaConfig.from_pretrained(pretrained_model)
self.device = "cuda" if cuda else "cpu"
self.build_params()
self.build_model(model_flow, model_torch)
self.trans_func = trans_func
def build_params(self):
self.kwargs = {
"vocab_size": self.config.vocab_size,
"type_vocab_size": self.config.type_vocab_size,
"max_position_embeddings": self.config.max_position_embeddings,
"hidden_size": self.config.hidden_size,
"intermediate_size": self.config.intermediate_size,
"chunk_size_feed_forward": 0,
"num_layers": self.config.num_hidden_layers,
"nheads": self.config.num_attention_heads,
"activation": self.config.hidden_act,
"pad_token_id": self.config.pad_token_id,
"layer_norm_eps": self.config.layer_norm_eps,
"attn_dropout": self.config.attention_probs_dropout_prob,
"hidden_dropout": self.config.hidden_dropout_prob,
"position_embedding_type": self.config.position_embedding_type,
"is_decoder": self.config.is_decoder,
"add_cross_attention": self.config.add_cross_attention,
}
def build_model(self, model_flow, model_torch):
colored_string("Generating model with transformers, pretrained = {}...".format(self.pretrained_model), color="green", end="")
self.model_torch = model_torch.from_pretrained(self.pretrained_model).to(self.device)
colored_string("Done.", color="green")
colored_string("Generating model with oneflow...", color="green", end="")
self.model_flow = model_flow(**self.kwargs).to(self.device)
colored_string("Done.", color="green")
def run(self, test_args, test_only=False):
if not test_only:
self.transform()
self.save()
self.test(**test_args)
def transform(self):
colored_string("Transforming weights...", color="green")
self.model_flow = self.trans_func(self.model_flow, self.model_torch)
colored_string("Done.", color="green")
def test(self, bs, seq_len):
raise NotImplementedError
def L1Loss_numpy(self, flow_tensor, torch_tensor):
if torch_tensor.device == torch.cuda:
torch_tensor = torch_tensor.cpu()
return np.mean(flow_tensor.numpy() - torch_tensor.detach().numpy())
def get_random_tensor(self, low, high, sz, if_int=False):
arr = np.random.randint(low=low, size=sz, high=high)
flow_tensor = flow.tensor(arr, device=self.device)
torch_tensor = torch.tensor(arr, device=self.device)
return (flow_tensor.to(flow.int64), torch_tensor.to(torch.long)) if if_int \
else (flow_tensor, torch_tensor)
def save_param(self):
with open(self.param_path, mode="w") as fp:
json.dump(self.kwargs, fp)
def save(self):
if not os.path.exists(self.save_dir):
os.makedirs(self.weights_dir)
flow.save(self.model_flow.state_dict(), self.weights_dir)
self.save_param()
colored_string("Model saved.", color="green")
else:
colored_string(
"Model save directory '{}' already exists. Do you still want to save? (y/n)".format(self.save_dir), color="blue")
ans = input()
while ans.lower() != "y" and ans.lower() != "n":
ans = input("Please input y/n:")
if ans.lower() == "y":
shutil.rmtree(self.save_dir)
assert not os.path.exists(self.save_dir)
os.makedirs(self.weights_dir)
flow.save(self.model_flow.state_dict(), self.weights_dir)
self.save_param()
colored_string("Model saved.", color="green")
class RobertaTransform(BaseTransform):
def __init__(self, pretrained_model, save_dir, model_dir, cuda=False):
colored_string("Transform weights of Roberta", color="green")
super().__init__(Roberta, transformers.RobertaModel, pretrained_model,
save_dir, model_dir, Roberta_trans, cuda)
def test(self, bs, seq_len):
with open(self.param_path, mode="r") as f:
kwargs = json.load(f)
self.model_flow = Roberta(**kwargs)
self.model_flow.load_state_dict(flow.load(self.weights_dir))
colored_string("Testing outputs...", color="green")
self.model_torch.eval()
self.model_flow.eval()
# Set inputs
input_ids = self.get_random_tensor(0, 2000, (bs, seq_len), if_int=True)
attention_mask = self.get_random_tensor(0, 2, sz=(bs, seq_len))
token_type_ids = self.get_random_tensor(0, self.config.type_vocab_size, (bs, seq_len), if_int=True)
position_ids = self.get_random_tensor(
1, self.config.max_position_embeddings - 1, (bs, seq_len), if_int=True)
head_mask = self.get_random_tensor(
0, 2, (self.config.num_hidden_layers, self.config.num_attention_heads))
inputs_embeds = None
output_attentions = True
output_hidden_states = True
encoder_hidden_states = self.get_random_tensor(
0, 5, (bs, seq_len, self.config.hidden_size))
encoder_attention_mask =self.get_random_tensor(0, 2, (bs, seq_len))
use_cache = False
past_key_values = None
# Run forward
colored_string("Running model with oneflow...", color="green", end="")
out_flow = self.model_flow(input_ids[0], attention_mask[0], token_type_ids[0], position_ids[0], head_mask[0],
inputs_embeds, encoder_hidden_states[0], encoder_attention_mask[0], past_key_values,
use_cache, output_attentions, output_hidden_states)
seq_flow, pool_flow, pkv_flow, hidden_flow, attn_flow, cross_attn_flow = out_flow
colored_string("Done.", color="green")
colored_string("Running model with transformers...", color="green", end="")
output = self.model_torch(input_ids[1], attention_mask[1], token_type_ids[1], position_ids[1], head_mask[1],
inputs_embeds, encoder_hidden_states[1], encoder_attention_mask[1], past_key_values,
use_cache, output_attentions, output_hidden_states, return_dict=True)
seq_trans = output.last_hidden_state
pool_trans = output.pooler_output
pkv_trans = output.past_key_values
hidden_trans = output.hidden_states
attn_trans = output.attentions
cross_attn_trans = output.cross_attentions
colored_string("Done.", color="green")
# Calculate errors
colored_string("Calculating errors...", color="green")
seq_error = self.L1Loss_numpy(seq_flow, seq_trans)
pool_error = self.L1Loss_numpy(pool_flow, pool_trans)
colored_string("Sequence output error:{}".format(
seq_error.item()), color="green")
colored_string("Pooled output error:{}".format(
pool_error.item()), color="green")
colored_string("Done.", color="green")
class RobertaForMaskedLMTransform(BaseTransform):
def __init__(self, pretrained_model, save_dir, model_dir, cuda=False):
colored_string("Transform weights of RobertaForMaskedLM", color="green")
super().__init__(RobertaForMaskedLM, transformers.RobertaForMaskedLM, pretrained_model,
save_dir, model_dir, RobertaForMaskedLM_trans, cuda)
def test(self, bs, seq_len):
with open(self.param_path, mode="r") as f:
kwargs = json.load(f)
self.model_flow = RobertaForMaskedLM(**kwargs)
self.model_flow.load_state_dict(flow.load(self.weights_dir))
colored_string("Testing outputs...", color="green")
self.model_flow.eval()
self.model_torch.eval()
# Set inputs
input_ids = self.get_random_tensor(0, 2000, (bs, seq_len), if_int=True)
attention_mask = self.get_random_tensor(0, 2, sz=(bs, seq_len))
token_type_ids = self.get_random_tensor(0, self.config.type_vocab_size, (bs, seq_len), if_int=True)
position_ids = self.get_random_tensor(
1, self.config.max_position_embeddings - 1, (bs, seq_len), if_int=True)
head_mask = self.get_random_tensor(
0, 2, (self.config.num_hidden_layers, self.config.num_attention_heads))
labels = self.get_random_tensor(0, self.config.vocab_size, (bs, seq_len))
inputs_embeds = None
output_attentions = True
output_hidden_states = True
encoder_hidden_states = self.get_random_tensor(
0, 5, (bs, seq_len, self.config.hidden_size))
encoder_attention_mask =self.get_random_tensor(0, 2, (bs, seq_len))
# Run forward
colored_string("Running model with oneflow...", color="green", end="")
out_flow = self.model_flow(input_ids[0], attention_mask[0], token_type_ids[0], position_ids[0], head_mask[0],
inputs_embeds, encoder_hidden_states[0], encoder_attention_mask[0], labels[0], output_attentions, output_hidden_states)
loss_flow, scores_flow, pkv_flow, hidden_flow, attn_flow, cross_attn_flow = out_flow
colored_string("Done.", color="green")
colored_string("Running model with transformers...", color="green", end="")
output = self.model_torch(input_ids[1], attention_mask[1], token_type_ids[1], position_ids[1], head_mask[1],
inputs_embeds, encoder_hidden_states[1], encoder_attention_mask[1], labels[1], output_attentions, output_hidden_states, return_dict=True)
loss_torch = output.loss
scores_torch = output.logits
colored_string("Done.", color="green")
# Calculate errors
colored_string("Calculating errors...", color="green")
loss_error = self.L1Loss_numpy(loss_flow, loss_torch)
scores_error = self.L1Loss_numpy(scores_flow, scores_torch)
colored_string("Loss error:{}".format(
loss_error.item()), color="green")
colored_string("Logits error:{}".format(
scores_error.item()), color="green")
colored_string("Done.", color="green")
class RobertaForSequenceClassificationTransform(BaseTransform):
def __init__(self, pretrained_model, save_dir, model_dir, cuda=False):
colored_string("Transform weights of RobertaForSequenceClassification", color="green")
super().__init__(RobertaForSequenceClassification, transformers.RobertaForSequenceClassification, pretrained_model,
save_dir, model_dir, RobertaForSequenceClassification_trans, cuda)
def build_params(self):
super().build_params()
self.kwargs["num_labels"] = self.config.num_labels
self.kwargs["problem_type"] = self.config.problem_type
def test(self, bs, seq_len):
with open(self.param_path, mode="r") as f:
kwargs = json.load(f)
self.model_flow = RobertaForSequenceClassification(**kwargs)
self.model_flow.load_state_dict(flow.load(self.weights_dir))
colored_string("Testing outputs...", color="green")
self.model_torch.eval()
self.model_flow.eval()
# Set inputs
input_ids = self.get_random_tensor(0, 2000, (bs, seq_len), if_int=True)
attention_mask = self.get_random_tensor(0, 2, sz=(bs, seq_len))
token_type_ids = self.get_random_tensor(0, self.config.type_vocab_size, (bs, seq_len), if_int=True)
position_ids = self.get_random_tensor(
1, self.config.max_position_embeddings - 1, (bs, seq_len), if_int=True)
head_mask = self.get_random_tensor(
0, 2, (self.config.num_hidden_layers, self.config.num_attention_heads))
labels = self.get_random_tensor(0, self.config.num_labels, (bs, ))
inputs_embeds = None
output_attentions = True
output_hidden_states = True
# Run forward
colored_string("Running model with oneflow...", color="green", end="")
out_flow = self.model_flow(input_ids[0], attention_mask[0], token_type_ids[0], position_ids[0], head_mask[0],
inputs_embeds, labels[0], output_attentions, output_hidden_states)
loss_flow, scores_flow, pkv_flow, hidden_flow, attn_flow, cross_attn_flow = out_flow
colored_string("Done.", color="green")
colored_string("Running model with transformers...", color="green", end="")
output = self.model_torch(input_ids[1], attention_mask[1], token_type_ids[1], position_ids[1], head_mask[1],
inputs_embeds, labels[1], output_attentions, output_hidden_states, return_dict=True)
loss_torch = output.loss
scores_torch = output.logits
colored_string("Done.", color="green")
# Calculate errors
colored_string("Calculating errors...", color="green")
loss_error = self.L1Loss_numpy(loss_flow, loss_torch)
scores_error = self.L1Loss_numpy(scores_flow, scores_torch)
colored_string("Loss error:{}".format(
loss_error.item()), color="green")
colored_string("Logits error:{}".format(
scores_error.item()), color="green")
colored_string("Done.", color="green") | [
"oneflow.tensor",
"oneflow.load"
] | [((298, 320), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (313, 320), False, 'import sys\n'), ((695, 728), 'os.path.join', 'os.path.join', (['save_dir', 'model_dir'], {}), '(save_dir, model_dir)\n', (707, 728), False, 'import os\n'), ((756, 794), 'os.path.join', 'os.path.join', (['self.save_dir', '"""weights"""'], {}), "(self.save_dir, 'weights')\n", (768, 794), False, 'import os\n'), ((821, 867), 'os.path.join', 'os.path.join', (['self.save_dir', '"""parameters.json"""'], {}), "(self.save_dir, 'parameters.json')\n", (833, 867), False, 'import os\n'), ((940, 1000), 'transformers.RobertaConfig.from_pretrained', 'transformers.RobertaConfig.from_pretrained', (['pretrained_model'], {}), '(pretrained_model)\n', (982, 1000), False, 'import transformers\n'), ((2465, 2503), 'base_weight_utils.colored_string', 'colored_string', (['"""Done."""'], {'color': '"""green"""'}), "('Done.', color='green')\n", (2479, 2503), False, 'from base_weight_utils import colored_string\n'), ((2512, 2585), 'base_weight_utils.colored_string', 'colored_string', (['"""Generating model with oneflow..."""'], {'color': '"""green"""', 'end': '""""""'}), "('Generating model with oneflow...', color='green', end='')\n", (2526, 2585), False, 'from base_weight_utils import colored_string\n'), ((2664, 2702), 'base_weight_utils.colored_string', 'colored_string', (['"""Done."""'], {'color': '"""green"""'}), "('Done.', color='green')\n", (2678, 2702), False, 'from base_weight_utils import colored_string\n'), ((2897, 2953), 'base_weight_utils.colored_string', 'colored_string', (['"""Transforming weights..."""'], {'color': '"""green"""'}), "('Transforming weights...', color='green')\n", (2911, 2953), False, 'from base_weight_utils import colored_string\n'), ((3039, 3077), 'base_weight_utils.colored_string', 'colored_string', (['"""Done."""'], {'color': '"""green"""'}), "('Done.', color='green')\n", (3053, 3077), False, 'from base_weight_utils import colored_string\n'), ((3459, 3505), 'numpy.random.randint', 'np.random.randint', ([], {'low': 'low', 'size': 'sz', 'high': 'high'}), '(low=low, size=sz, high=high)\n', (3476, 3505), True, 'import numpy as np\n'), ((3528, 3564), 'oneflow.tensor', 'flow.tensor', (['arr'], {'device': 'self.device'}), '(arr, device=self.device)\n', (3539, 3564), True, 'import oneflow as flow\n'), ((3588, 3625), 'torch.tensor', 'torch.tensor', (['arr'], {'device': 'self.device'}), '(arr, device=self.device)\n', (3600, 3625), False, 'import torch\n'), ((4945, 5006), 'base_weight_utils.colored_string', 'colored_string', (['"""Transform weights of Roberta"""'], {'color': '"""green"""'}), "('Transform weights of Roberta', color='green')\n", (4959, 5006), False, 'from base_weight_utils import colored_string\n'), ((5298, 5315), 'models.roberta.Roberta', 'Roberta', ([], {}), '(**kwargs)\n', (5305, 5315), False, 'from models.roberta import Roberta, RobertaForMaskedLM, RobertaForSequenceClassification\n'), ((5394, 5445), 'base_weight_utils.colored_string', 'colored_string', (['"""Testing outputs..."""'], {'color': '"""green"""'}), "('Testing outputs...', color='green')\n", (5408, 5445), False, 'from base_weight_utils import colored_string\n'), ((6427, 6497), 'base_weight_utils.colored_string', 'colored_string', (['"""Running model with oneflow..."""'], {'color': '"""green"""', 'end': '""""""'}), "('Running model with oneflow...', color='green', end='')\n", (6441, 6497), False, 'from base_weight_utils import colored_string\n'), ((6909, 6947), 'base_weight_utils.colored_string', 'colored_string', (['"""Done."""'], {'color': '"""green"""'}), "('Done.', color='green')\n", (6923, 6947), False, 'from base_weight_utils import colored_string\n'), ((6957, 7032), 'base_weight_utils.colored_string', 'colored_string', (['"""Running model with transformers..."""'], {'color': '"""green"""', 'end': '""""""'}), "('Running model with transformers...', color='green', end='')\n", (6971, 7032), False, 'from base_weight_utils import colored_string\n'), ((7643, 7681), 'base_weight_utils.colored_string', 'colored_string', (['"""Done."""'], {'color': '"""green"""'}), "('Done.', color='green')\n", (7657, 7681), False, 'from base_weight_utils import colored_string\n'), ((7718, 7772), 'base_weight_utils.colored_string', 'colored_string', (['"""Calculating errors..."""'], {'color': '"""green"""'}), "('Calculating errors...', color='green')\n", (7732, 7772), False, 'from base_weight_utils import colored_string\n'), ((8109, 8147), 'base_weight_utils.colored_string', 'colored_string', (['"""Done."""'], {'color': '"""green"""'}), "('Done.', color='green')\n", (8123, 8147), False, 'from base_weight_utils import colored_string\n'), ((8283, 8355), 'base_weight_utils.colored_string', 'colored_string', (['"""Transform weights of RobertaForMaskedLM"""'], {'color': '"""green"""'}), "('Transform weights of RobertaForMaskedLM', color='green')\n", (8297, 8355), False, 'from base_weight_utils import colored_string\n'), ((8675, 8703), 'models.roberta.RobertaForMaskedLM', 'RobertaForMaskedLM', ([], {}), '(**kwargs)\n', (8693, 8703), False, 'from models.roberta import Roberta, RobertaForMaskedLM, RobertaForSequenceClassification\n'), ((8782, 8833), 'base_weight_utils.colored_string', 'colored_string', (['"""Testing outputs..."""'], {'color': '"""green"""'}), "('Testing outputs...', color='green')\n", (8796, 8833), False, 'from base_weight_utils import colored_string\n'), ((9840, 9910), 'base_weight_utils.colored_string', 'colored_string', (['"""Running model with oneflow..."""'], {'color': '"""green"""', 'end': '""""""'}), "('Running model with oneflow...', color='green', end='')\n", (9854, 9910), False, 'from base_weight_utils import colored_string\n'), ((10279, 10317), 'base_weight_utils.colored_string', 'colored_string', (['"""Done."""'], {'color': '"""green"""'}), "('Done.', color='green')\n", (10293, 10317), False, 'from base_weight_utils import colored_string\n'), ((10327, 10402), 'base_weight_utils.colored_string', 'colored_string', (['"""Running model with transformers..."""'], {'color': '"""green"""', 'end': '""""""'}), "('Running model with transformers...', color='green', end='')\n", (10341, 10402), False, 'from base_weight_utils import colored_string\n'), ((10765, 10803), 'base_weight_utils.colored_string', 'colored_string', (['"""Done."""'], {'color': '"""green"""'}), "('Done.', color='green')\n", (10779, 10803), False, 'from base_weight_utils import colored_string\n'), ((10840, 10894), 'base_weight_utils.colored_string', 'colored_string', (['"""Calculating errors..."""'], {'color': '"""green"""'}), "('Calculating errors...', color='green')\n", (10854, 10894), False, 'from base_weight_utils import colored_string\n'), ((11225, 11263), 'base_weight_utils.colored_string', 'colored_string', (['"""Done."""'], {'color': '"""green"""'}), "('Done.', color='green')\n", (11239, 11263), False, 'from base_weight_utils import colored_string\n'), ((11413, 11503), 'base_weight_utils.colored_string', 'colored_string', (['"""Transform weights of RobertaForSequenceClassification"""'], {'color': '"""green"""'}), "('Transform weights of RobertaForSequenceClassification',\n color='green')\n", (11427, 11503), False, 'from base_weight_utils import colored_string\n'), ((12043, 12085), 'models.roberta.RobertaForSequenceClassification', 'RobertaForSequenceClassification', ([], {}), '(**kwargs)\n', (12075, 12085), False, 'from models.roberta import Roberta, RobertaForMaskedLM, RobertaForSequenceClassification\n'), ((12164, 12215), 'base_weight_utils.colored_string', 'colored_string', (['"""Testing outputs..."""'], {'color': '"""green"""'}), "('Testing outputs...', color='green')\n", (12178, 12215), False, 'from base_weight_utils import colored_string\n'), ((13025, 13095), 'base_weight_utils.colored_string', 'colored_string', (['"""Running model with oneflow..."""'], {'color': '"""green"""', 'end': '""""""'}), "('Running model with oneflow...', color='green', end='')\n", (13039, 13095), False, 'from base_weight_utils import colored_string\n'), ((13411, 13449), 'base_weight_utils.colored_string', 'colored_string', (['"""Done."""'], {'color': '"""green"""'}), "('Done.', color='green')\n", (13425, 13449), False, 'from base_weight_utils import colored_string\n'), ((13458, 13533), 'base_weight_utils.colored_string', 'colored_string', (['"""Running model with transformers..."""'], {'color': '"""green"""', 'end': '""""""'}), "('Running model with transformers...', color='green', end='')\n", (13472, 13533), False, 'from base_weight_utils import colored_string\n'), ((13843, 13881), 'base_weight_utils.colored_string', 'colored_string', (['"""Done."""'], {'color': '"""green"""'}), "('Done.', color='green')\n", (13857, 13881), False, 'from base_weight_utils import colored_string\n'), ((13918, 13972), 'base_weight_utils.colored_string', 'colored_string', (['"""Calculating errors..."""'], {'color': '"""green"""'}), "('Calculating errors...', color='green')\n", (13932, 13972), False, 'from base_weight_utils import colored_string\n'), ((14303, 14341), 'base_weight_utils.colored_string', 'colored_string', (['"""Done."""'], {'color': '"""green"""'}), "('Done.', color='green')\n", (14317, 14341), False, 'from base_weight_utils import colored_string\n'), ((3857, 3883), 'json.dump', 'json.dump', (['self.kwargs', 'fp'], {}), '(self.kwargs, fp)\n', (3866, 3883), False, 'import json\n'), ((3929, 3958), 'os.path.exists', 'os.path.exists', (['self.save_dir'], {}), '(self.save_dir)\n', (3943, 3958), False, 'import os\n'), ((3972, 4001), 'os.makedirs', 'os.makedirs', (['self.weights_dir'], {}), '(self.weights_dir)\n', (3983, 4001), False, 'import os\n'), ((4114, 4159), 'base_weight_utils.colored_string', 'colored_string', (['"""Model saved."""'], {'color': '"""green"""'}), "('Model saved.', color='green')\n", (4128, 4159), False, 'from base_weight_utils import colored_string\n'), ((5259, 5271), 'json.load', 'json.load', (['f'], {}), '(f)\n', (5268, 5271), False, 'import json\n'), ((5356, 5383), 'oneflow.load', 'flow.load', (['self.weights_dir'], {}), '(self.weights_dir)\n', (5365, 5383), True, 'import oneflow as flow\n'), ((8636, 8648), 'json.load', 'json.load', (['f'], {}), '(f)\n', (8645, 8648), False, 'import json\n'), ((8744, 8771), 'oneflow.load', 'flow.load', (['self.weights_dir'], {}), '(self.weights_dir)\n', (8753, 8771), True, 'import oneflow as flow\n'), ((12004, 12016), 'json.load', 'json.load', (['f'], {}), '(f)\n', (12013, 12016), False, 'import json\n'), ((12126, 12153), 'oneflow.load', 'flow.load', (['self.weights_dir'], {}), '(self.weights_dir)\n', (12135, 12153), True, 'import oneflow as flow\n'), ((4519, 4547), 'shutil.rmtree', 'shutil.rmtree', (['self.save_dir'], {}), '(self.save_dir)\n', (4532, 4547), False, 'import shutil\n'), ((4621, 4650), 'os.makedirs', 'os.makedirs', (['self.weights_dir'], {}), '(self.weights_dir)\n', (4632, 4650), False, 'import os\n'), ((4775, 4820), 'base_weight_utils.colored_string', 'colored_string', (['"""Model saved."""'], {'color': '"""green"""'}), "('Model saved.', color='green')\n", (4789, 4820), False, 'from base_weight_utils import colored_string\n'), ((4575, 4604), 'os.path.exists', 'os.path.exists', (['self.save_dir'], {}), '(self.save_dir)\n', (4589, 4604), False, 'import os\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow
from oneflow.framework.docstr.utils import add_docstr
add_docstr(
oneflow.index_select,
"""
input.index_select(dim, index) -> Tensor
The interface is consistent with PyTorch.
The documentation is referenced from: https://pytorch.org/docs/1.11/generated/torch.index_select.html#torch.index_select
Select values along an axis specified by `dim`.
:attr:`index` must be an Int32 Tensor with 1-D.
:attr:`dim` must be in the range of input Dimensions.
value of :attr:`index` must be in the range of the dim-th of input.
Note that ``input`` and ``index`` do not broadcast against each other.
Args:
input (Tensor): the source tensor
dim (int): the axis along which to index
index (Tensor): the 1-D tensor containing the indices to index
For example:
.. code-block:: python
>>> import oneflow as flow
>>> input = flow.tensor([[1,2,3],[4,5,6]], dtype=flow.int32)
>>> input
tensor([[1, 2, 3],
[4, 5, 6]], dtype=oneflow.int32)
>>> index = flow.tensor([0,1], dtype=flow.int32)
>>> output = flow.index_select(input, 1, index)
>>> output
tensor([[1, 2],
[4, 5]], dtype=oneflow.int32)
>>> output = input.index_select(1, index)
>>> output
tensor([[1, 2],
[4, 5]], dtype=oneflow.int32)
..
Feature Stage of Operator [index_select].
- Maintainer List [@QiangX-man, @hjchen2, @strint]
- Current Stage [ ]
- Alpha Stage Check List [ ]
- API(Compatible with PyTorch 1.11, anything incompatible must be noted in API Doc.)[Yes]
- Doc(API Doc must be provided and showed normally on the web page.)[Yes]
- Functionality and its' Test [ ]
- Functionality is highly compatiable with PyTorch 1.11. [Yes]
- eager local [Yes] [@QiangX-man, @hjchen2]
- forward [Yes]
- backward [Yes]
- gpu [Yes]
- cpu [Yes]
- graph local [ ] [@BBuf, @strint, @hjchen2]
- forward [Yes]
- backward [ ]
- gpu [Yes]
- cpu [Yes]
- Exception Handling
- Exception Message and Hint must be provided [ ]
- Beta Stage Check List [ ]
- API(High compatibility with PyTorch 1.11, shouldn't have anything incompatible for a naive reason.)[ ]
- Doc(Same standard as Alpha Stage)[ ]
- Functionality and its' Test [ ]
- eager global [ ]
- forward [ ]
- backward [ ]
- gpu [ ]
- cpu [ ]
- graph gloal [ ]
- forward [ ]
- backward [ ]
- gpu [ ]
- cpu [ ]
- Performance and Scalability(Must be evaluated.)[ ]
- CUDA kernel [ ]
- CPU kernel [ ]
- N nodes M devices [ ]
- Exception Handling [ ]
- Exception Message and Hint must be provided [ ]
- Try you best to do Exception Recovery [ ]
- Stable Stage Check List [ ]
- API(Same standard as Beta Stage)[ ]
- Doc(Same standard as Beta Stage)[ ]
- Functionality and its' Test [ ]
- fp16 and AMP [ ]
- NHWC [ ]
- Performance and Scalability(Must be evaluated.)[ ]
- Exception Handling [ ]
""",
)
| [
"oneflow.framework.docstr.utils.add_docstr"
] | [((660, 4095), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.index_select', '"""\n input.index_select(dim, index) -> Tensor\n\n The interface is consistent with PyTorch. \n The documentation is referenced from: https://pytorch.org/docs/1.11/generated/torch.index_select.html#torch.index_select\n\n Select values along an axis specified by `dim`.\n\n :attr:`index` must be an Int32 Tensor with 1-D.\n :attr:`dim` must be in the range of input Dimensions.\n value of :attr:`index` must be in the range of the dim-th of input.\n Note that ``input`` and ``index`` do not broadcast against each other. \n \n Args:\n input (Tensor): the source tensor\n dim (int): the axis along which to index\n index (Tensor): the 1-D tensor containing the indices to index\n \n For example:\n\n .. code-block:: python\n \n >>> import oneflow as flow\n >>> input = flow.tensor([[1,2,3],[4,5,6]], dtype=flow.int32)\n >>> input \n tensor([[1, 2, 3],\n [4, 5, 6]], dtype=oneflow.int32)\n >>> index = flow.tensor([0,1], dtype=flow.int32)\n >>> output = flow.index_select(input, 1, index)\n >>> output\n tensor([[1, 2],\n [4, 5]], dtype=oneflow.int32)\n >>> output = input.index_select(1, index)\n >>> output\n tensor([[1, 2],\n [4, 5]], dtype=oneflow.int32)\n \n ..\n Feature Stage of Operator [index_select].\n - Maintainer List [@QiangX-man, @hjchen2, @strint]\n - Current Stage [ ]\n - Alpha Stage Check List [ ]\n - API(Compatible with PyTorch 1.11, anything incompatible must be noted in API Doc.)[Yes]\n - Doc(API Doc must be provided and showed normally on the web page.)[Yes]\n - Functionality and its\' Test [ ]\n - Functionality is highly compatiable with PyTorch 1.11. [Yes]\n - eager local [Yes] [@QiangX-man, @hjchen2]\n - forward [Yes]\n - backward [Yes]\n - gpu [Yes]\n - cpu [Yes]\n - graph local [ ] [@BBuf, @strint, @hjchen2]\n - forward [Yes]\n - backward [ ]\n - gpu [Yes]\n - cpu [Yes]\n - Exception Handling\n - Exception Message and Hint must be provided [ ]\n - Beta Stage Check List [ ]\n - API(High compatibility with PyTorch 1.11, shouldn\'t have anything incompatible for a naive reason.)[ ]\n - Doc(Same standard as Alpha Stage)[ ]\n - Functionality and its\' Test [ ]\n - eager global [ ]\n - forward [ ]\n - backward [ ]\n - gpu [ ]\n - cpu [ ]\n - graph gloal [ ]\n - forward [ ]\n - backward [ ]\n - gpu [ ]\n - cpu [ ]\n - Performance and Scalability(Must be evaluated.)[ ]\n - CUDA kernel [ ]\n - CPU kernel [ ]\n - N nodes M devices [ ]\n - Exception Handling [ ]\n - Exception Message and Hint must be provided [ ]\n - Try you best to do Exception Recovery [ ]\n - Stable Stage Check List [ ]\n - API(Same standard as Beta Stage)[ ]\n - Doc(Same standard as Beta Stage)[ ]\n - Functionality and its\' Test [ ]\n - fp16 and AMP [ ]\n - NHWC [ ]\n - Performance and Scalability(Must be evaluated.)[ ]\n - Exception Handling [ ]\n """'], {}), '(oneflow.index_select,\n """\n input.index_select(dim, index) -> Tensor\n\n The interface is consistent with PyTorch. \n The documentation is referenced from: https://pytorch.org/docs/1.11/generated/torch.index_select.html#torch.index_select\n\n Select values along an axis specified by `dim`.\n\n :attr:`index` must be an Int32 Tensor with 1-D.\n :attr:`dim` must be in the range of input Dimensions.\n value of :attr:`index` must be in the range of the dim-th of input.\n Note that ``input`` and ``index`` do not broadcast against each other. \n \n Args:\n input (Tensor): the source tensor\n dim (int): the axis along which to index\n index (Tensor): the 1-D tensor containing the indices to index\n \n For example:\n\n .. code-block:: python\n \n >>> import oneflow as flow\n >>> input = flow.tensor([[1,2,3],[4,5,6]], dtype=flow.int32)\n >>> input \n tensor([[1, 2, 3],\n [4, 5, 6]], dtype=oneflow.int32)\n >>> index = flow.tensor([0,1], dtype=flow.int32)\n >>> output = flow.index_select(input, 1, index)\n >>> output\n tensor([[1, 2],\n [4, 5]], dtype=oneflow.int32)\n >>> output = input.index_select(1, index)\n >>> output\n tensor([[1, 2],\n [4, 5]], dtype=oneflow.int32)\n \n ..\n Feature Stage of Operator [index_select].\n - Maintainer List [@QiangX-man, @hjchen2, @strint]\n - Current Stage [ ]\n - Alpha Stage Check List [ ]\n - API(Compatible with PyTorch 1.11, anything incompatible must be noted in API Doc.)[Yes]\n - Doc(API Doc must be provided and showed normally on the web page.)[Yes]\n - Functionality and its\' Test [ ]\n - Functionality is highly compatiable with PyTorch 1.11. [Yes]\n - eager local [Yes] [@QiangX-man, @hjchen2]\n - forward [Yes]\n - backward [Yes]\n - gpu [Yes]\n - cpu [Yes]\n - graph local [ ] [@BBuf, @strint, @hjchen2]\n - forward [Yes]\n - backward [ ]\n - gpu [Yes]\n - cpu [Yes]\n - Exception Handling\n - Exception Message and Hint must be provided [ ]\n - Beta Stage Check List [ ]\n - API(High compatibility with PyTorch 1.11, shouldn\'t have anything incompatible for a naive reason.)[ ]\n - Doc(Same standard as Alpha Stage)[ ]\n - Functionality and its\' Test [ ]\n - eager global [ ]\n - forward [ ]\n - backward [ ]\n - gpu [ ]\n - cpu [ ]\n - graph gloal [ ]\n - forward [ ]\n - backward [ ]\n - gpu [ ]\n - cpu [ ]\n - Performance and Scalability(Must be evaluated.)[ ]\n - CUDA kernel [ ]\n - CPU kernel [ ]\n - N nodes M devices [ ]\n - Exception Handling [ ]\n - Exception Message and Hint must be provided [ ]\n - Try you best to do Exception Recovery [ ]\n - Stable Stage Check List [ ]\n - API(Same standard as Beta Stage)[ ]\n - Doc(Same standard as Beta Stage)[ ]\n - Functionality and its\' Test [ ]\n - fp16 and AMP [ ]\n - NHWC [ ]\n - Performance and Scalability(Must be evaluated.)[ ]\n - Exception Handling [ ]\n """\n )\n', (670, 4095), False, 'from oneflow.framework.docstr.utils import add_docstr\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import random
import unittest
from collections import OrderedDict
import numpy as np
from test_util import (
GenArgDict,
GenArgList,
type_name_to_flow_type,
type_name_to_np_type,
)
import oneflow.compatible.single_client.unittest
from oneflow.compatible import single_client as flow
from oneflow.compatible.single_client import typing as oft
def _compare_with_np(test_case, x_shape, dtype):
x = np.random.randn(*x_shape).astype(type_name_to_np_type[dtype])
ret = flow.Size(x_shape)
for idx in range(0, len(ret)):
test_case.assertEqual(ret[idx], x.shape[idx])
@flow.unittest.skip_unless_1n1d()
class TestSize(flow.unittest.TestCase):
def test_size(test_case):
size = flow.Size((4, 3, 10, 5))
test_case.assertTrue(size[0] == 4)
test_case.assertTrue(size[2] == 10)
test_case.assertTrue(len(size) == 4)
size = flow.Size([4, 3, 10, 5])
test_case.assertTrue(size[0] == 4)
test_case.assertTrue(size[2] == 10)
test_case.assertTrue(len(size) == 4)
size = flow.Size(size)
test_case.assertTrue(size[0] == 4)
test_case.assertTrue(size[2] == 10)
test_case.assertTrue(len(size) == 4)
test_case.assertTrue(size[-1] == 5)
test_case.assertTrue(size[-4] == 4)
def test_unpack(test_case):
(one, two, three, four) = flow.Size((1, 2, 3, 4))
test_case.assertEqual(one, 1)
test_case.assertEqual(two, 2)
test_case.assertEqual(three, 3)
test_case.assertEqual(four, 4)
def test_offical(test_case):
arg_dict = OrderedDict()
arg_dict["x_shape"] = [
(10,),
(20, 10),
(20, 10, 10),
(20, 10, 10, 3),
(20, 10, 10, 3, 3),
]
arg_dict["dtype"] = ["float32", "int32", "double"]
for arg in GenArgDict(arg_dict):
_compare_with_np(test_case, **arg)
def test_equal(test_case):
size = flow.Size((2, 3))
test_case.assertEqual(size == (2, 3), True)
test_case.assertEqual(size == (3, 2), False)
test_case.assertEqual(size == flow.Size((2, 3)), True)
test_case.assertEqual(size == flow.Size((3, 2)), False)
test_case.assertEqual(size == [2, 3], False)
test_case.assertEqual(size == dict(), False)
def test_numel(test_case):
size = flow.Size((1, 2, 3, 4))
test_case.assertEqual(size.numel(), 24)
def test_count(test_case):
size = flow.Size((2, 2, 3, 4))
test_case.assertEqual(size.count(1), 0)
test_case.assertEqual(size.count(2), 2)
test_case.assertEqual(size.count(3), 1)
test_case.assertEqual(size.count(4), 1)
def test_index(test_case):
size = flow.Size((2, 3, 2, 4, 4))
test_case.assertEqual(size.index(2), 0)
test_case.assertEqual(size.index(2, start=0), 0)
test_case.assertEqual(size.index(2, start=0, end=20), 0)
test_case.assertEqual(size.index(2, start=1, end=20), 2)
test_case.assertEqual(size.index(4), 3)
test_case.assertEqual(size.index(4, start=4), 4)
with test_case.assertRaises(ValueError):
size.index(4, start=0, end=3)
with test_case.assertRaises(ValueError):
size.index(5)
with test_case.assertRaises(ValueError):
size.index(2, start=3)
def test_slicing(test_case):
size = flow.Size([2, 3, 4, 5])
test_case.assertTrue(size[1:3] == flow.Size((3, 4)))
test_case.assertTrue(size[1:] == flow.Size((3, 4, 5)))
test_case.assertTrue(size[:2] == (2, 3))
test_case.assertTrue(size[-3:] == flow.Size((3, 4, 5)))
test_case.assertTrue(size[-3:-1] == flow.Size((3, 4)))
if __name__ == "__main__":
unittest.main()
| [
"oneflow.compatible.single_client.unittest.skip_unless_1n1d",
"oneflow.compatible.single_client.Size"
] | [((1203, 1235), 'oneflow.compatible.single_client.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (1233, 1235), True, 'from oneflow.compatible import single_client as flow\n'), ((1092, 1110), 'oneflow.compatible.single_client.Size', 'flow.Size', (['x_shape'], {}), '(x_shape)\n', (1101, 1110), True, 'from oneflow.compatible import single_client as flow\n'), ((4386, 4401), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4399, 4401), False, 'import unittest\n'), ((1321, 1345), 'oneflow.compatible.single_client.Size', 'flow.Size', (['(4, 3, 10, 5)'], {}), '((4, 3, 10, 5))\n', (1330, 1345), True, 'from oneflow.compatible import single_client as flow\n'), ((1493, 1517), 'oneflow.compatible.single_client.Size', 'flow.Size', (['[4, 3, 10, 5]'], {}), '([4, 3, 10, 5])\n', (1502, 1517), True, 'from oneflow.compatible import single_client as flow\n'), ((1665, 1680), 'oneflow.compatible.single_client.Size', 'flow.Size', (['size'], {}), '(size)\n', (1674, 1680), True, 'from oneflow.compatible import single_client as flow\n'), ((1968, 1991), 'oneflow.compatible.single_client.Size', 'flow.Size', (['(1, 2, 3, 4)'], {}), '((1, 2, 3, 4))\n', (1977, 1991), True, 'from oneflow.compatible import single_client as flow\n'), ((2200, 2213), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2211, 2213), False, 'from collections import OrderedDict\n'), ((2462, 2482), 'test_util.GenArgDict', 'GenArgDict', (['arg_dict'], {}), '(arg_dict)\n', (2472, 2482), False, 'from test_util import GenArgDict, GenArgList, type_name_to_flow_type, type_name_to_np_type\n'), ((2578, 2595), 'oneflow.compatible.single_client.Size', 'flow.Size', (['(2, 3)'], {}), '((2, 3))\n', (2587, 2595), True, 'from oneflow.compatible import single_client as flow\n'), ((2981, 3004), 'oneflow.compatible.single_client.Size', 'flow.Size', (['(1, 2, 3, 4)'], {}), '((1, 2, 3, 4))\n', (2990, 3004), True, 'from oneflow.compatible import single_client as flow\n'), ((3100, 3123), 'oneflow.compatible.single_client.Size', 'flow.Size', (['(2, 2, 3, 4)'], {}), '((2, 2, 3, 4))\n', (3109, 3123), True, 'from oneflow.compatible import single_client as flow\n'), ((3363, 3389), 'oneflow.compatible.single_client.Size', 'flow.Size', (['(2, 3, 2, 4, 4)'], {}), '((2, 3, 2, 4, 4))\n', (3372, 3389), True, 'from oneflow.compatible import single_client as flow\n'), ((4029, 4052), 'oneflow.compatible.single_client.Size', 'flow.Size', (['[2, 3, 4, 5]'], {}), '([2, 3, 4, 5])\n', (4038, 4052), True, 'from oneflow.compatible import single_client as flow\n'), ((1020, 1045), 'numpy.random.randn', 'np.random.randn', (['*x_shape'], {}), '(*x_shape)\n', (1035, 1045), True, 'import numpy as np\n'), ((2739, 2756), 'oneflow.compatible.single_client.Size', 'flow.Size', (['(2, 3)'], {}), '((2, 3))\n', (2748, 2756), True, 'from oneflow.compatible import single_client as flow\n'), ((2802, 2819), 'oneflow.compatible.single_client.Size', 'flow.Size', (['(3, 2)'], {}), '((3, 2))\n', (2811, 2819), True, 'from oneflow.compatible import single_client as flow\n'), ((4095, 4112), 'oneflow.compatible.single_client.Size', 'flow.Size', (['(3, 4)'], {}), '((3, 4))\n', (4104, 4112), True, 'from oneflow.compatible import single_client as flow\n'), ((4155, 4175), 'oneflow.compatible.single_client.Size', 'flow.Size', (['(3, 4, 5)'], {}), '((3, 4, 5))\n', (4164, 4175), True, 'from oneflow.compatible import single_client as flow\n'), ((4268, 4288), 'oneflow.compatible.single_client.Size', 'flow.Size', (['(3, 4, 5)'], {}), '((3, 4, 5))\n', (4277, 4288), True, 'from oneflow.compatible import single_client as flow\n'), ((4334, 4351), 'oneflow.compatible.single_client.Size', 'flow.Size', (['(3, 4)'], {}), '((3, 4))\n', (4343, 4351), True, 'from oneflow.compatible import single_client as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import os
from collections import OrderedDict
import numpy as np
import oneflow as flow
import oneflow._oneflow_internal
import tensorflow as tf
from test_util import GenArgList
import oneflow.typing as oft
gpus = tf.config.experimental.list_physical_devices("GPU")
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
def compare_with_tensorflow(
device_type, data_type, input_shape, axis, keepdims, rtol=1e-5, atol=1e-5
):
assert device_type in ["gpu", "cpu"]
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
@flow.global_function(function_config=func_config)
def ReduceSumJob(x: oft.Numpy.Placeholder(input_shape)):
with flow.scope.placement(device_type, "0:0"):
if data_type == "float16":
y = flow.cast(
flow.math.reduce_sum(
flow.cast(x, dtype=flow.float16), axis=axis, keepdims=keepdims
),
dtype=flow.float32,
)
else:
y = flow.math.reduce_sum(x, axis=axis, keepdims=keepdims)
return y
x = np.random.rand(*input_shape).astype(np.float16).astype(np.float32)
# OneFlow
of_out = ReduceSumJob(x).get()
# TensorFlow
tf_out = tf.math.reduce_sum(x, axis=axis, keepdims=keepdims)
if data_type == "float16":
tf_out = tf.cast(tf_out, dtype=tf.float16)
tf_out = tf.cast(tf_out, dtype=tf.float32)
# print("tf: ")
# print(tf_out.numpy())
# print("of: ")
# print(of_out.numpy())
# print("diff: ")
# print(of_out.numpy() - tf_out.numpy())
assert np.allclose(of_out.numpy(), tf_out.numpy(), rtol=rtol, atol=atol), (
of_out.numpy(),
tf_out.numpy(),
)
@flow.unittest.skip_unless_1n2d()
class TestReduceSum(flow.unittest.TestCase):
def test_reduce_sum(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu"]
arg_dict["data_type"] = ["float32", "float16"]
arg_dict["input_shape"] = [(2, 4, 8)]
arg_dict["axis"] = [None, [1], [0, 2]]
arg_dict["keepdims"] = [True, False]
for arg in GenArgList(arg_dict):
compare_with_tensorflow(*arg)
def test_col_reduce(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu"]
arg_dict["data_type"] = ["float32", "float16"]
arg_dict["input_shape"] = [(32, 2)]
arg_dict["axis"] = [[0]]
arg_dict["keepdims"] = [True, False]
for arg in GenArgList(arg_dict):
compare_with_tensorflow(*arg, atol=1e-1)
def test_row_reduce(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu"]
arg_dict["data_type"] = ["float32", "float16"]
arg_dict["input_shape"] = [(2, 64)]
arg_dict["axis"] = [[1]]
arg_dict["keepdims"] = [True, False]
for arg in GenArgList(arg_dict):
compare_with_tensorflow(*arg)
def test_scalar(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu"]
arg_dict["data_type"] = ["float32", "float16"]
arg_dict["input_shape"] = [(64, 2)]
arg_dict["axis"] = [[0, 1]]
arg_dict["keepdims"] = [True, False]
for arg in GenArgList(arg_dict):
compare_with_tensorflow(*arg)
def test_split_axis_reduced(test_case):
flow.config.gpu_device_num(2)
func_config = flow.FunctionConfig()
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def Foo(x: oft.Numpy.Placeholder((10,))):
y = flow.math.reduce_sum(x)
test_case.assertTrue(y.split_axis == flow.INVALID_SPLIT_AXIS)
Foo(np.ndarray((10,), dtype=np.float32))
if __name__ == "__main__":
unittest.main()
| [
"oneflow.scope.placement",
"oneflow.cast",
"oneflow.unittest.skip_unless_1n2d",
"oneflow.scope.consistent_view",
"oneflow.clear_default_session",
"oneflow.global_function",
"oneflow.typing.Numpy.Placeholder",
"oneflow.math.reduce_sum",
"oneflow.config.gpu_device_num",
"oneflow.FunctionConfig"
] | [((822, 873), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (866, 873), True, 'import tensorflow as tf\n'), ((2442, 2474), 'oneflow.unittest.skip_unless_1n2d', 'flow.unittest.skip_unless_1n2d', ([], {}), '()\n', (2472, 2474), True, 'import oneflow as flow\n'), ((895, 946), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['gpu', '(True)'], {}), '(gpu, True)\n', (935, 946), True, 'import tensorflow as tf\n'), ((1104, 1132), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (1130, 1132), True, 'import oneflow as flow\n'), ((1151, 1172), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (1170, 1172), True, 'import oneflow as flow\n'), ((1225, 1274), 'oneflow.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (1245, 1274), True, 'import oneflow as flow\n'), ((1939, 1990), 'tensorflow.math.reduce_sum', 'tf.math.reduce_sum', (['x'], {'axis': 'axis', 'keepdims': 'keepdims'}), '(x, axis=axis, keepdims=keepdims)\n', (1957, 1990), True, 'import tensorflow as tf\n'), ((4538, 4553), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4551, 4553), False, 'import unittest\n'), ((2039, 2072), 'tensorflow.cast', 'tf.cast', (['tf_out'], {'dtype': 'tf.float16'}), '(tf_out, dtype=tf.float16)\n', (2046, 2072), True, 'import tensorflow as tf\n'), ((2090, 2123), 'tensorflow.cast', 'tf.cast', (['tf_out'], {'dtype': 'tf.float32'}), '(tf_out, dtype=tf.float32)\n', (2097, 2123), True, 'import tensorflow as tf\n'), ((2575, 2588), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2586, 2588), False, 'from collections import OrderedDict\n'), ((2843, 2863), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (2853, 2863), False, 'from test_util import GenArgList\n'), ((2963, 2976), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2974, 2976), False, 'from collections import OrderedDict\n'), ((3215, 3235), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (3225, 3235), False, 'from test_util import GenArgList\n'), ((3346, 3359), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3357, 3359), False, 'from collections import OrderedDict\n'), ((3598, 3618), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (3608, 3618), False, 'from test_util import GenArgList\n'), ((3714, 3727), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3725, 3727), False, 'from collections import OrderedDict\n'), ((3969, 3989), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (3979, 3989), False, 'from test_util import GenArgList\n'), ((4086, 4115), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['(2)'], {}), '(2)\n', (4112, 4115), True, 'import oneflow as flow\n'), ((4138, 4159), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (4157, 4159), True, 'import oneflow as flow\n'), ((4241, 4290), 'oneflow.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (4261, 4290), True, 'import oneflow as flow\n'), ((1299, 1333), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['input_shape'], {}), '(input_shape)\n', (1320, 1333), True, 'import oneflow.typing as oft\n'), ((1349, 1389), 'oneflow.scope.placement', 'flow.scope.placement', (['device_type', '"""0:0"""'], {}), "(device_type, '0:0')\n", (1369, 1389), True, 'import oneflow as flow\n'), ((4201, 4229), 'oneflow.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (4227, 4229), True, 'import oneflow as flow\n'), ((4357, 4380), 'oneflow.math.reduce_sum', 'flow.math.reduce_sum', (['x'], {}), '(x)\n', (4377, 4380), True, 'import oneflow as flow\n'), ((4468, 4503), 'numpy.ndarray', 'np.ndarray', (['(10,)'], {'dtype': 'np.float32'}), '((10,), dtype=np.float32)\n', (4478, 4503), True, 'import numpy as np\n'), ((1709, 1762), 'oneflow.math.reduce_sum', 'flow.math.reduce_sum', (['x'], {'axis': 'axis', 'keepdims': 'keepdims'}), '(x, axis=axis, keepdims=keepdims)\n', (1729, 1762), True, 'import oneflow as flow\n'), ((4310, 4338), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(10,)'], {}), '((10,))\n', (4331, 4338), True, 'import oneflow.typing as oft\n'), ((1793, 1821), 'numpy.random.rand', 'np.random.rand', (['*input_shape'], {}), '(*input_shape)\n', (1807, 1821), True, 'import numpy as np\n'), ((1527, 1559), 'oneflow.cast', 'flow.cast', (['x'], {'dtype': 'flow.float16'}), '(x, dtype=flow.float16)\n', (1536, 1559), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
import oneflow.experimental as flow
from test_util import GenArgList, type_name_to_flow_type
def _test_sort(test_case, data_shape, axis, descending, data_type, device):
input = flow.Tensor(
np.random.randn(*data_shape),
dtype=type_name_to_flow_type[data_type],
device=flow.device(device),
)
(of_values, of_indices) = flow.sort(input, dim=axis, descending=descending)
np_input = -input.numpy() if descending else input.numpy()
np_indices = np.argsort(np_input, axis=axis)
np_out = np.sort(np_input, axis=axis)
np_values = -np_out if descending else np_out
test_case.assertTrue(
np.array_equal(of_values.numpy().flatten(), np_values.flatten())
)
test_case.assertTrue(
np.array_equal(of_indices.numpy().flatten(), np_indices.flatten())
)
def _test_tensor_sort(test_case, data_shape, axis, descending, data_type, device):
input = flow.Tensor(
np.random.randn(*data_shape),
dtype=type_name_to_flow_type[data_type],
device=flow.device(device),
)
(of_values, of_indices) = input.sort(dim=axis, descending=descending)
np_input = -input.numpy() if descending else input.numpy()
np_indices = np.argsort(np_input, axis=axis)
np_out = np.sort(np_input, axis=axis)
np_values = -np_out if descending else np_out
test_case.assertTrue(
np.array_equal(of_values.numpy().flatten(), np_values.flatten())
)
test_case.assertTrue(
np.array_equal(of_indices.numpy().flatten(), np_indices.flatten())
)
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
".numpy() doesn't work in lazy mode",
)
class TestSort(flow.unittest.TestCase):
def test_sort(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [_test_sort, _test_tensor_sort]
arg_dict["data_shape"] = [(2, 6, 5, 4), (3, 4, 8)]
arg_dict["axis"] = [-1, 0, 2]
arg_dict["descending"] = [True, False]
arg_dict["data_type"] = ["double", "float32", "int32"]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
if __name__ == "__main__":
unittest.main()
| [
"oneflow.experimental.sort",
"oneflow.experimental.unittest.env.eager_execution_enabled",
"oneflow.experimental.device"
] | [((1018, 1067), 'oneflow.experimental.sort', 'flow.sort', (['input'], {'dim': 'axis', 'descending': 'descending'}), '(input, dim=axis, descending=descending)\n', (1027, 1067), True, 'import oneflow.experimental as flow\n'), ((1148, 1179), 'numpy.argsort', 'np.argsort', (['np_input'], {'axis': 'axis'}), '(np_input, axis=axis)\n', (1158, 1179), True, 'import numpy as np\n'), ((1193, 1221), 'numpy.sort', 'np.sort', (['np_input'], {'axis': 'axis'}), '(np_input, axis=axis)\n', (1200, 1221), True, 'import numpy as np\n'), ((1877, 1908), 'numpy.argsort', 'np.argsort', (['np_input'], {'axis': 'axis'}), '(np_input, axis=axis)\n', (1887, 1908), True, 'import numpy as np\n'), ((1922, 1950), 'numpy.sort', 'np.sort', (['np_input'], {'axis': 'axis'}), '(np_input, axis=axis)\n', (1929, 1950), True, 'import numpy as np\n'), ((2862, 2877), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2875, 2877), False, 'import unittest\n'), ((867, 895), 'numpy.random.randn', 'np.random.randn', (['*data_shape'], {}), '(*data_shape)\n', (882, 895), True, 'import numpy as np\n'), ((1602, 1630), 'numpy.random.randn', 'np.random.randn', (['*data_shape'], {}), '(*data_shape)\n', (1617, 1630), True, 'import numpy as np\n'), ((2419, 2432), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2430, 2432), False, 'from collections import OrderedDict\n'), ((2767, 2787), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (2777, 2787), False, 'from test_util import GenArgList, type_name_to_flow_type\n'), ((2241, 2284), 'oneflow.experimental.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (2282, 2284), True, 'import oneflow.experimental as flow\n'), ((961, 980), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (972, 980), True, 'import oneflow.experimental as flow\n'), ((1696, 1715), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (1707, 1715), True, 'import oneflow.experimental as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import random
import unittest
from collections import OrderedDict
from typing import Dict
import numpy as np
from test_util import GenArgList
import oneflow.compatible.single_client.unittest
from oneflow.compatible import single_client as flow
from oneflow.compatible.single_client import typing as tp
def _compare_elu_with_np(
input_shape, alpha, device_type, value_type, machine_ids, device_counts
):
if value_type[1] == flow.float16:
input_1 = np.random.uniform(-1, 1, size=input_shape).astype(np.float16)
input_1 = np.array(input_1, dtype=value_type[0])
else:
input_1 = np.random.uniform(-1, 1, size=input_shape).astype(value_type[0])
assert device_type in ["cpu", "gpu"]
flow.clear_default_session()
if device_type == "cpu":
flow.config.cpu_device_num(device_counts)
else:
flow.config.gpu_device_num(device_counts)
func_config = flow.FunctionConfig()
func_config.default_placement_scope(flow.scope.placement(device_type, machine_ids))
if value_type[1] == flow.float16:
func_config.default_data_type(flow.float32)
else:
func_config.default_data_type(value_type[1])
def np_elu(input, alpha):
elem_cnt = input.size
init_shape = input.shape
input = input.flatten()
out = np.zeros_like(input)
for i in range(elem_cnt):
if input[i] > 0:
out[i] = input[i]
else:
out[i] = alpha * (np.exp(input[i]) - 1)
out = np.reshape(out, init_shape)
return np.array(out).astype(value_type[0])
np_out_elu = np_elu(input_1, alpha)
def np_diff(input, alpha):
input_shape = input.shape
input = input.flatten()
elem_cnt = input.size
diff = np.zeros(shape=(elem_cnt,))
for i in range(elem_cnt):
if input[i] > 0:
diff[i] = 1
else:
diff[i] = alpha * np.exp(input[i])
diff = np.reshape(diff, newshape=input_shape)
diff = np.array(diff, dtype=value_type[0])
return diff
_np_grad = np_diff(input_1, alpha)
def assert_prediction_grad(blob: tp.Numpy):
if value_type[1] == flow.float16:
assert np.allclose(blob, _np_grad, atol=0.001)
else:
assert np.allclose(blob, _np_grad, atol=1e-05)
if value_type[1] == flow.float16:
@flow.global_function(type="train", function_config=func_config)
def oneflow_elu(
of_input_1: tp.Numpy.Placeholder(shape=input_1.shape, dtype=flow.float32)
) -> tp.Numpy:
with flow.scope.placement(device_type, "0:0"):
v = flow.get_variable(
shape=input_1.shape,
dtype=flow.float32,
initializer=flow.zeros_initializer(),
name="x_var",
)
x_var = of_input_1 + v
x_f16 = flow.cast(x_var, flow.float16)
of_elu_out_f16 = flow.nn.elu(x_f16, alpha)
of_elu_out_f32 = flow.cast(of_elu_out_f16, flow.float32)
with flow.scope.placement(device_type, "0:0"):
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [0.001]), momentum=0
).minimize(of_elu_out_f32)
flow.watch_diff(x_var, assert_prediction_grad)
return of_elu_out_f32
else:
@flow.global_function(type="train", function_config=func_config)
def oneflow_elu(
of_input_1: tp.Numpy.Placeholder(shape=input_1.shape, dtype=value_type[1])
) -> tp.Numpy:
with flow.scope.placement(device_type, "0:0"):
v = flow.get_variable(
shape=input_1.shape,
dtype=value_type[1],
initializer=flow.zeros_initializer(),
name="x_var",
)
x_var = of_input_1 + v
flow.watch_diff(x_var, assert_prediction_grad)
of_elu_out = flow.nn.elu(x_var, alpha)
with flow.scope.placement(device_type, "0:0"):
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [0.001]), momentum=0
).minimize(of_elu_out)
return of_elu_out
of_out_elu = oneflow_elu(input_1)
if value_type[1] == flow.float16:
assert np.allclose(of_out_elu, np_out_elu, atol=0.001)
else:
assert np.allclose(of_out_elu, np_out_elu, atol=1e-05)
def _gen_arg_dict(shape, alpha, device_type, value_type, machine_ids, device_counts):
arg_dict = OrderedDict()
arg_dict["input_shape"] = [shape]
arg_dict["alpha"] = [alpha]
arg_dict["device_type"] = [device_type]
if value_type == "float" and device_type == "cpu":
arg_dict["value_type"] = [
(np.float32, flow.float32),
(np.float64, flow.float64),
]
else:
arg_dict["value_type"] = [
(np.float32, flow.float16),
(np.float32, flow.float32),
(np.float64, flow.float64),
]
arg_dict["machine_ids"] = [machine_ids]
arg_dict["device_counts"] = [device_counts]
return arg_dict
@flow.unittest.skip_unless_1n1d()
class Testelu1n1d(flow.unittest.TestCase):
def test_elu_cpu(test_case):
arg_dict = _gen_arg_dict(
shape=(3, 3),
alpha=1.0,
device_type="cpu",
value_type="float",
machine_ids="0:0",
device_counts=1,
)
for arg in GenArgList(arg_dict):
_compare_elu_with_np(*arg)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_elu_gpu(test_case):
arg_dict = _gen_arg_dict(
shape=(4, 4),
alpha=2.0,
device_type="gpu",
value_type="float",
machine_ids="0:0",
device_counts=1,
)
for arg in GenArgList(arg_dict):
_compare_elu_with_np(*arg)
@flow.unittest.skip_unless_1n2d()
class Testelu1n2d(flow.unittest.TestCase):
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_elu_gpu_1n2d(test_case):
arg_dict = _gen_arg_dict(
shape=(4, 8, 4),
alpha=1.0,
device_type="gpu",
value_type="float",
machine_ids="0:0-1",
device_counts=2,
)
for arg in GenArgList(arg_dict):
_compare_elu_with_np(*arg)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.compatible.single_client.zeros_initializer",
"oneflow.compatible.single_client.nn.elu",
"oneflow.compatible.single_client.scope.placement",
"oneflow.compatible.single_client.FunctionConfig",
"oneflow.compatible.single_client.watch_diff",
"oneflow.compatible.single_client.unittest.skip_unless_1n1d... | [((5855, 5887), 'oneflow.compatible.single_client.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (5885, 5887), True, 'from oneflow.compatible import single_client as flow\n'), ((6673, 6705), 'oneflow.compatible.single_client.unittest.skip_unless_1n2d', 'flow.unittest.skip_unless_1n2d', ([], {}), '()\n', (6703, 6705), True, 'from oneflow.compatible import single_client as flow\n'), ((1325, 1353), 'oneflow.compatible.single_client.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (1351, 1353), True, 'from oneflow.compatible import single_client as flow\n'), ((1511, 1532), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (1530, 1532), True, 'from oneflow.compatible import single_client as flow\n'), ((5257, 5270), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5268, 5270), False, 'from collections import OrderedDict\n'), ((7201, 7216), 'unittest.main', 'unittest.main', ([], {}), '()\n', (7214, 7216), False, 'import unittest\n'), ((1148, 1186), 'numpy.array', 'np.array', (['input_1'], {'dtype': 'value_type[0]'}), '(input_1, dtype=value_type[0])\n', (1156, 1186), True, 'import numpy as np\n'), ((1391, 1432), 'oneflow.compatible.single_client.config.cpu_device_num', 'flow.config.cpu_device_num', (['device_counts'], {}), '(device_counts)\n', (1417, 1432), True, 'from oneflow.compatible import single_client as flow\n'), ((1451, 1492), 'oneflow.compatible.single_client.config.gpu_device_num', 'flow.config.gpu_device_num', (['device_counts'], {}), '(device_counts)\n', (1477, 1492), True, 'from oneflow.compatible import single_client as flow\n'), ((1573, 1619), 'oneflow.compatible.single_client.scope.placement', 'flow.scope.placement', (['device_type', 'machine_ids'], {}), '(device_type, machine_ids)\n', (1593, 1619), True, 'from oneflow.compatible import single_client as flow\n'), ((1914, 1934), 'numpy.zeros_like', 'np.zeros_like', (['input'], {}), '(input)\n', (1927, 1934), True, 'import numpy as np\n'), ((2120, 2147), 'numpy.reshape', 'np.reshape', (['out', 'init_shape'], {}), '(out, init_shape)\n', (2130, 2147), True, 'import numpy as np\n'), ((2383, 2410), 'numpy.zeros', 'np.zeros', ([], {'shape': '(elem_cnt,)'}), '(shape=(elem_cnt,))\n', (2391, 2410), True, 'import numpy as np\n'), ((2586, 2624), 'numpy.reshape', 'np.reshape', (['diff'], {'newshape': 'input_shape'}), '(diff, newshape=input_shape)\n', (2596, 2624), True, 'import numpy as np\n'), ((2640, 2675), 'numpy.array', 'np.array', (['diff'], {'dtype': 'value_type[0]'}), '(diff, dtype=value_type[0])\n', (2648, 2675), True, 'import numpy as np\n'), ((3008, 3071), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'type': '"""train"""', 'function_config': 'func_config'}), "(type='train', function_config=func_config)\n", (3028, 3071), True, 'from oneflow.compatible import single_client as flow\n'), ((4052, 4115), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'type': '"""train"""', 'function_config': 'func_config'}), "(type='train', function_config=func_config)\n", (4072, 4115), True, 'from oneflow.compatible import single_client as flow\n'), ((5033, 5080), 'numpy.allclose', 'np.allclose', (['of_out_elu', 'np_out_elu'], {'atol': '(0.001)'}), '(of_out_elu, np_out_elu, atol=0.001)\n', (5044, 5080), True, 'import numpy as np\n'), ((5106, 5153), 'numpy.allclose', 'np.allclose', (['of_out_elu', 'np_out_elu'], {'atol': '(1e-05)'}), '(of_out_elu, np_out_elu, atol=1e-05)\n', (5117, 5153), True, 'import numpy as np\n'), ((6199, 6219), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (6209, 6219), False, 'from test_util import GenArgList\n'), ((6609, 6629), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (6619, 6629), False, 'from test_util import GenArgList\n'), ((6282, 6316), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (6291, 6316), False, 'import os\n'), ((7107, 7127), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (7117, 7127), False, 'from test_util import GenArgList\n'), ((6770, 6804), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (6779, 6804), False, 'import os\n'), ((2846, 2885), 'numpy.allclose', 'np.allclose', (['blob', '_np_grad'], {'atol': '(0.001)'}), '(blob, _np_grad, atol=0.001)\n', (2857, 2885), True, 'import numpy as np\n'), ((2919, 2958), 'numpy.allclose', 'np.allclose', (['blob', '_np_grad'], {'atol': '(1e-05)'}), '(blob, _np_grad, atol=1e-05)\n', (2930, 2958), True, 'import numpy as np\n'), ((3618, 3643), 'oneflow.compatible.single_client.nn.elu', 'flow.nn.elu', (['x_f16', 'alpha'], {}), '(x_f16, alpha)\n', (3629, 3643), True, 'from oneflow.compatible import single_client as flow\n'), ((3673, 3712), 'oneflow.compatible.single_client.cast', 'flow.cast', (['of_elu_out_f16', 'flow.float32'], {}), '(of_elu_out_f16, flow.float32)\n', (3682, 3712), True, 'from oneflow.compatible import single_client as flow\n'), ((3950, 3996), 'oneflow.compatible.single_client.watch_diff', 'flow.watch_diff', (['x_var', 'assert_prediction_grad'], {}), '(x_var, assert_prediction_grad)\n', (3965, 3996), True, 'from oneflow.compatible import single_client as flow\n'), ((4592, 4638), 'oneflow.compatible.single_client.watch_diff', 'flow.watch_diff', (['x_var', 'assert_prediction_grad'], {}), '(x_var, assert_prediction_grad)\n', (4607, 4638), True, 'from oneflow.compatible import single_client as flow\n'), ((4664, 4689), 'oneflow.compatible.single_client.nn.elu', 'flow.nn.elu', (['x_var', 'alpha'], {}), '(x_var, alpha)\n', (4675, 4689), True, 'from oneflow.compatible import single_client as flow\n'), ((1068, 1110), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': 'input_shape'}), '(-1, 1, size=input_shape)\n', (1085, 1110), True, 'import numpy as np\n'), ((1215, 1257), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': 'input_shape'}), '(-1, 1, size=input_shape)\n', (1232, 1257), True, 'import numpy as np\n'), ((2163, 2176), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (2171, 2176), True, 'import numpy as np\n'), ((3121, 3182), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', ([], {'shape': 'input_1.shape', 'dtype': 'flow.float32'}), '(shape=input_1.shape, dtype=flow.float32)\n', (3141, 3182), True, 'from oneflow.compatible.single_client import typing as tp\n'), ((3223, 3263), 'oneflow.compatible.single_client.scope.placement', 'flow.scope.placement', (['device_type', '"""0:0"""'], {}), "(device_type, '0:0')\n", (3243, 3263), True, 'from oneflow.compatible import single_client as flow\n'), ((3558, 3588), 'oneflow.compatible.single_client.cast', 'flow.cast', (['x_var', 'flow.float16'], {}), '(x_var, flow.float16)\n', (3567, 3588), True, 'from oneflow.compatible import single_client as flow\n'), ((3730, 3770), 'oneflow.compatible.single_client.scope.placement', 'flow.scope.placement', (['device_type', '"""0:0"""'], {}), "(device_type, '0:0')\n", (3750, 3770), True, 'from oneflow.compatible import single_client as flow\n'), ((4165, 4227), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', ([], {'shape': 'input_1.shape', 'dtype': 'value_type[1]'}), '(shape=input_1.shape, dtype=value_type[1])\n', (4185, 4227), True, 'from oneflow.compatible.single_client import typing as tp\n'), ((4268, 4308), 'oneflow.compatible.single_client.scope.placement', 'flow.scope.placement', (['device_type', '"""0:0"""'], {}), "(device_type, '0:0')\n", (4288, 4308), True, 'from oneflow.compatible import single_client as flow\n'), ((4707, 4747), 'oneflow.compatible.single_client.scope.placement', 'flow.scope.placement', (['device_type', '"""0:0"""'], {}), "(device_type, '0:0')\n", (4727, 4747), True, 'from oneflow.compatible import single_client as flow\n'), ((2554, 2570), 'numpy.exp', 'np.exp', (['input[i]'], {}), '(input[i])\n', (2560, 2570), True, 'import numpy as np\n'), ((2084, 2100), 'numpy.exp', 'np.exp', (['input[i]'], {}), '(input[i])\n', (2090, 2100), True, 'import numpy as np\n'), ((3417, 3441), 'oneflow.compatible.single_client.zeros_initializer', 'flow.zeros_initializer', ([], {}), '()\n', (3439, 3441), True, 'from oneflow.compatible import single_client as flow\n'), ((4463, 4487), 'oneflow.compatible.single_client.zeros_initializer', 'flow.zeros_initializer', ([], {}), '()\n', (4485, 4487), True, 'from oneflow.compatible import single_client as flow\n'), ((3828, 3882), 'oneflow.compatible.single_client.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[0.001]'], {}), '([], [0.001])\n', (3869, 3882), True, 'from oneflow.compatible import single_client as flow\n'), ((4805, 4859), 'oneflow.compatible.single_client.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[0.001]'], {}), '([], [0.001])\n', (4846, 4859), True, 'from oneflow.compatible import single_client as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
from collections import OrderedDict
from oneflow.test_utils.test_util import GenArgList, type_name_to_flow_type
from oneflow.test_utils.automated_test_util import *
import oneflow as flow
def _test_normal(test_case, mean, std, shape, device, dtype):
dtype = type_name_to_flow_type[dtype]
y1 = flow.normal(mean, std, *shape, dtype=dtype, device=flow.device(device))
y2 = flow.normal(mean, std, *shape, dtype=dtype, device=flow.device(device))
test_case.assertFalse(np.array_equal(y1.numpy(), y2.numpy()))
test_case.assertEqual(shape, y1.shape)
test_case.assertEqual(dtype, y1.dtype)
def _test_with_generator(test_case, mean, std, shape, device, dtype):
dtype = type_name_to_flow_type[dtype]
gen = flow.Generator()
gen.manual_seed(0)
y1 = flow.normal(
mean, std, *shape, generator=gen, dtype=dtype, device=flow.device(device)
)
gen.manual_seed(0)
y2 = flow.normal(
mean, std, *shape, generator=gen, dtype=dtype, device=flow.device(device)
)
test_case.assertTrue(np.array_equal(y1.numpy(), y2.numpy()))
def _test_backward(test_case, mean, std, shape, device, dtype):
dtype = type_name_to_flow_type[dtype]
x = flow.normal(
mean, std, *shape, dtype=dtype, device=flow.device(device), requires_grad=True
)
y = x.sum()
y.backward()
test_case.assertTrue(np.array_equal(np.ones(shape), x.grad.numpy()))
@flow.unittest.skip_unless_1n1d()
class TestNormModule(flow.unittest.TestCase):
def test_norm(test_case):
arg_dict = OrderedDict()
arg_dict["fun"] = [_test_normal, _test_with_generator, _test_backward]
arg_dict["mean"] = [-1, 0, 1]
arg_dict["std"] = [1, 2, 8]
arg_dict["shape"] = [(2, 3), (2, 3, 4), (2, 3, 4, 5)]
arg_dict["device"] = ["cpu", "cuda"]
arg_dict["dtype"] = ["float32", "double"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
if __name__ == "__main__":
unittest.main()
| [
"oneflow.unittest.skip_unless_1n1d",
"oneflow.Generator",
"oneflow.test_utils.test_util.GenArgList",
"oneflow.device"
] | [((2038, 2070), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (2068, 2070), True, 'import oneflow as flow\n'), ((1359, 1375), 'oneflow.Generator', 'flow.Generator', ([], {}), '()\n', (1373, 1375), True, 'import oneflow as flow\n'), ((2605, 2620), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2618, 2620), False, 'import unittest\n'), ((2166, 2179), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2177, 2179), False, 'from collections import OrderedDict\n'), ((2510, 2530), 'oneflow.test_utils.test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (2520, 2530), False, 'from oneflow.test_utils.test_util import GenArgList, type_name_to_flow_type\n'), ((981, 1000), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (992, 1000), True, 'import oneflow as flow\n'), ((1062, 1081), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (1073, 1081), True, 'import oneflow as flow\n'), ((1483, 1502), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (1494, 1502), True, 'import oneflow as flow\n'), ((1616, 1635), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (1627, 1635), True, 'import oneflow as flow\n'), ((1883, 1902), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (1894, 1902), True, 'import oneflow as flow\n'), ((2002, 2016), 'numpy.ones', 'np.ones', (['shape'], {}), '(shape)\n', (2009, 2016), True, 'import numpy as np\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from oneflow.support.blocking import BlockingInfoContext
import oneflow as flow
from oneflow.framework.tensor import register_tensor_op, Tensor
from oneflow.nn.module import Module
class ToConsistent(Module):
def __init__(self, placement, sbp):
super().__init__()
self.placement = placement
if isinstance(sbp, flow.sbp.sbp):
sbp = [sbp]
for elem in sbp:
assert isinstance(
elem, flow.sbp.sbp
), "element %s is not an sbp instance" % (sbp)
self.sbp = sbp
def forward(self, x, sbp, placement):
return flow._C.to_consistent(x, placement=placement, sbp=sbp)
@register_tensor_op("to_consistent")
def to_consistent_op(input, placement=None, sbp=None, grad_sbp=None):
"""Cast a local tensor to consistent tensor or cast a
consistent tensor to another consistent tensor with
different sbp or placement
Args:
input (Tensor): the input tensor.
placement (flow.placement, optional): the desired placement of returned consistent tensor. Default: if None, the input tensor must be consistent one and use its own placement.
sbp (flow.sbp.sbp or tuple of flow.sbp.sbp, optional): the desired sbp descriptor of returned consistent tensor. Default: if None, the input tensor must be consistent one and use its own sbp.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> np_arr = np.array([0.5, 0.6, 0.7]).astype(np.float32)
>>> input = flow.Tensor(np_arr)
>>> placement = flow.placement("cpu", {0:range(1)})
>>> output_tensor = input.to_consistent(placement, [flow.sbp.split(0)])
>>> output_tensor.is_consistent
True
"""
assert isinstance(input, Tensor)
def _check_sbp(sbp):
if sbp is None:
pass
elif isinstance(sbp, (tuple, list)):
if not all(isinstance(sbp_item, flow.sbp.sbp) for sbp_item in sbp):
raise TypeError(
"sbp parameter must be type of oneflow.sbp.sbp or list/tuple of oneflow.sbp.sbp"
)
elif isinstance(sbp, flow.sbp.sbp):
sbp = (sbp,)
else:
raise TypeError(f"Invalid parameter sbp with type {type(sbp)}")
return sbp
sbp = _check_sbp(sbp)
if input.is_consistent:
# convert consistent tensor to another consistent tensor with different placement or sbp
if placement is None:
placement = input.placement
if sbp is None:
sbp = input.sbp
grad_sbp = _check_sbp(grad_sbp)
else:
# local tensor to consistent tensor
if placement is None or sbp is None:
raise ValueError(
"Converting a local tensor to consistent tensor must have placement and sbp parameters."
)
if not isinstance(placement, flow.placement):
raise ValueError(f"Invalid parameter placement with type {type(placement)}")
if grad_sbp is None:
grad_sbp = tuple()
with BlockingInfoContext() as ctx:
return flow._C.to_consistent(input, placement, sbp, grad_sbp)
class ToLocal(Module):
def __init__(self):
super().__init__()
def forward(self, x):
return flow._C.to_local(x)
@register_tensor_op("to_local")
def to_local_op(input):
"""Returns the local tensor of a consistent tensor.
Args:
input (Tensor): the input tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> np_arr = np.array([0.5, 0.6, 0.7]).astype(np.float32)
>>> input = flow.tensor(np_arr, dtype=flow.float32)
>>> placement = flow.placement("cpu", {0:range(1)})
>>> consistent_tensor = input.to_consistent(placement, [flow.sbp.split(0)])
>>> consistent_tensor.to_local()
tensor([0.5000, 0.6000, 0.7000], dtype=oneflow.float32)
"""
assert input.is_consistent, "input must be a consistent tensor!"
return flow._C.to_local(input)
| [
"oneflow._C.to_consistent",
"oneflow._C.to_local",
"oneflow.support.blocking.BlockingInfoContext",
"oneflow.framework.tensor.register_tensor_op"
] | [((1258, 1293), 'oneflow.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""to_consistent"""'], {}), "('to_consistent')\n", (1276, 1293), False, 'from oneflow.framework.tensor import register_tensor_op, Tensor\n'), ((3940, 3970), 'oneflow.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""to_local"""'], {}), "('to_local')\n", (3958, 3970), False, 'from oneflow.framework.tensor import register_tensor_op, Tensor\n'), ((4681, 4704), 'oneflow._C.to_local', 'flow._C.to_local', (['input'], {}), '(input)\n', (4697, 4704), True, 'import oneflow as flow\n'), ((1200, 1254), 'oneflow._C.to_consistent', 'flow._C.to_consistent', (['x'], {'placement': 'placement', 'sbp': 'sbp'}), '(x, placement=placement, sbp=sbp)\n', (1221, 1254), True, 'import oneflow as flow\n'), ((3699, 3720), 'oneflow.support.blocking.BlockingInfoContext', 'BlockingInfoContext', ([], {}), '()\n', (3718, 3720), False, 'from oneflow.support.blocking import BlockingInfoContext\n'), ((3744, 3798), 'oneflow._C.to_consistent', 'flow._C.to_consistent', (['input', 'placement', 'sbp', 'grad_sbp'], {}), '(input, placement, sbp, grad_sbp)\n', (3765, 3798), True, 'import oneflow as flow\n'), ((3917, 3936), 'oneflow._C.to_local', 'flow._C.to_local', (['x'], {}), '(x)\n', (3933, 3936), True, 'import oneflow as flow\n')] |
from oneflow.compatible import single_client as flow
from oneflow.compatible.single_client import typing as tp
import numpy as np
import os
BATCH_SIZE = 100
flow.config.enable_legacy_model_io(True)
flow.config.enable_model_io_v2(True)
flow.enable_eager_execution(False)
print(os.getpid())
def mlp_model(images, labels, train=True):
# [batch_size, image_sizes] -> [batch_size, pixels]
# reshape = flow.reshape(images, [images.shape[0], -1])
reshape = flow.flatten(images, start_dim=1)
# dense, [batch_size, pixels] -> [batch_size, 500]
initializer1 = flow.random_uniform_initializer(-1 / 28.0, 1 / 28.0)
hidden = flow.layers.dense(
reshape,
500,
activation=flow.nn.relu,
kernel_initializer=initializer1,
bias_initializer=initializer1,
name="dense1"
)
# dense, [batch_size, 500] -> [batch_size, logits]
initializer2 = flow.random_uniform_initializer(
-np.sqrt(1 / 500.0), np.sqrt(1 / 500.0)
)
logits = flow.layers.dense(
hidden,
10,
kernel_initializer=initializer2,
bias_initializer=initializer2,
name="dense2"
)
if train:
loss = flow.nn.sparse_softmax_cross_entropy_with_logits(labels, logits)
return loss
else:
return logits
@flow.global_function(type="train")
def train_job(
images: tp.Numpy.Placeholder((BATCH_SIZE, 1, 28, 28), dtype=flow.float),
labels: tp.Numpy.Placeholder((BATCH_SIZE,), dtype=flow.int32),
) -> tp.Numpy:
with flow.scope.placement("gpu", "0:0"):
loss = mlp_model(images, labels)
flow.optimizer.Adam(
flow.optimizer.PiecewiseConstantScheduler([], [0.001])
).minimize(loss)
return loss
if __name__ == '__main__':
(train_images, train_labels), (test_images, test_labels) = flow.data.load_mnist(
BATCH_SIZE, BATCH_SIZE
)
for epoch in range(20):
for i, (images, labels) in enumerate(zip(train_images, train_labels)):
loss = train_job(images, labels)
if i % 20 == 0:
print("Epoch [{}/{}], Loss: {:.4f}".format(epoch + 1, 20, loss.mean()))
flow.checkpoint.save("./mlp_model")
| [
"oneflow.compatible.single_client.random_uniform_initializer",
"oneflow.compatible.single_client.config.enable_legacy_model_io",
"oneflow.compatible.single_client.checkpoint.save",
"oneflow.compatible.single_client.config.enable_model_io_v2",
"oneflow.compatible.single_client.enable_eager_execution",
"one... | [((158, 198), 'oneflow.compatible.single_client.config.enable_legacy_model_io', 'flow.config.enable_legacy_model_io', (['(True)'], {}), '(True)\n', (192, 198), True, 'from oneflow.compatible import single_client as flow\n'), ((199, 235), 'oneflow.compatible.single_client.config.enable_model_io_v2', 'flow.config.enable_model_io_v2', (['(True)'], {}), '(True)\n', (229, 235), True, 'from oneflow.compatible import single_client as flow\n'), ((236, 270), 'oneflow.compatible.single_client.enable_eager_execution', 'flow.enable_eager_execution', (['(False)'], {}), '(False)\n', (263, 270), True, 'from oneflow.compatible import single_client as flow\n'), ((1310, 1344), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'type': '"""train"""'}), "(type='train')\n", (1330, 1344), True, 'from oneflow.compatible import single_client as flow\n'), ((277, 288), 'os.getpid', 'os.getpid', ([], {}), '()\n', (286, 288), False, 'import os\n'), ((465, 498), 'oneflow.compatible.single_client.flatten', 'flow.flatten', (['images'], {'start_dim': '(1)'}), '(images, start_dim=1)\n', (477, 498), True, 'from oneflow.compatible import single_client as flow\n'), ((574, 626), 'oneflow.compatible.single_client.random_uniform_initializer', 'flow.random_uniform_initializer', (['(-1 / 28.0)', '(1 / 28.0)'], {}), '(-1 / 28.0, 1 / 28.0)\n', (605, 626), True, 'from oneflow.compatible import single_client as flow\n'), ((640, 780), 'oneflow.compatible.single_client.layers.dense', 'flow.layers.dense', (['reshape', '(500)'], {'activation': 'flow.nn.relu', 'kernel_initializer': 'initializer1', 'bias_initializer': 'initializer1', 'name': '"""dense1"""'}), "(reshape, 500, activation=flow.nn.relu, kernel_initializer\n =initializer1, bias_initializer=initializer1, name='dense1')\n", (657, 780), True, 'from oneflow.compatible import single_client as flow\n'), ((1005, 1117), 'oneflow.compatible.single_client.layers.dense', 'flow.layers.dense', (['hidden', '(10)'], {'kernel_initializer': 'initializer2', 'bias_initializer': 'initializer2', 'name': '"""dense2"""'}), "(hidden, 10, kernel_initializer=initializer2,\n bias_initializer=initializer2, name='dense2')\n", (1022, 1117), True, 'from oneflow.compatible import single_client as flow\n'), ((1831, 1875), 'oneflow.compatible.single_client.data.load_mnist', 'flow.data.load_mnist', (['BATCH_SIZE', 'BATCH_SIZE'], {}), '(BATCH_SIZE, BATCH_SIZE)\n', (1851, 1875), True, 'from oneflow.compatible import single_client as flow\n'), ((2163, 2198), 'oneflow.compatible.single_client.checkpoint.save', 'flow.checkpoint.save', (['"""./mlp_model"""'], {}), "('./mlp_model')\n", (2183, 2198), True, 'from oneflow.compatible import single_client as flow\n'), ((967, 985), 'numpy.sqrt', 'np.sqrt', (['(1 / 500.0)'], {}), '(1 / 500.0)\n', (974, 985), True, 'import numpy as np\n'), ((1190, 1254), 'oneflow.compatible.single_client.nn.sparse_softmax_cross_entropy_with_logits', 'flow.nn.sparse_softmax_cross_entropy_with_logits', (['labels', 'logits'], {}), '(labels, logits)\n', (1238, 1254), True, 'from oneflow.compatible import single_client as flow\n'), ((1376, 1439), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', (['(BATCH_SIZE, 1, 28, 28)'], {'dtype': 'flow.float'}), '((BATCH_SIZE, 1, 28, 28), dtype=flow.float)\n', (1396, 1439), True, 'from oneflow.compatible.single_client import typing as tp\n'), ((1457, 1510), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', (['(BATCH_SIZE,)'], {'dtype': 'flow.int32'}), '((BATCH_SIZE,), dtype=flow.int32)\n', (1477, 1510), True, 'from oneflow.compatible.single_client import typing as tp\n'), ((1536, 1570), 'oneflow.compatible.single_client.scope.placement', 'flow.scope.placement', (['"""gpu"""', '"""0:0"""'], {}), "('gpu', '0:0')\n", (1556, 1570), True, 'from oneflow.compatible import single_client as flow\n'), ((947, 965), 'numpy.sqrt', 'np.sqrt', (['(1 / 500.0)'], {}), '(1 / 500.0)\n', (954, 965), True, 'import numpy as np\n'), ((1647, 1701), 'oneflow.compatible.single_client.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[0.001]'], {}), '([], [0.001])\n', (1688, 1701), True, 'from oneflow.compatible import single_client as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from oneflow.test_utils.automated_test_util import *
import oneflow as flow
import oneflow.unittest
@flow.unittest.skip_unless_1n1d()
class TestModuleToHalf(flow.unittest.TestCase):
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_module_to_half(test_case):
input = flow.randn(10, 10).to(flow.float16).cuda()
model = flow.nn.Linear(10, 20).half().cuda()
output = model(input)
test_case.assertEqual(output.dtype, flow.float16)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.unittest.skip_unless_1n1d",
"oneflow.randn",
"oneflow.nn.Linear"
] | [((709, 741), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (739, 741), True, 'import oneflow as flow\n'), ((1143, 1158), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1156, 1158), False, 'import unittest\n'), ((926, 944), 'oneflow.randn', 'flow.randn', (['(10)', '(10)'], {}), '(10, 10)\n', (936, 944), True, 'import oneflow as flow\n'), ((985, 1007), 'oneflow.nn.Linear', 'flow.nn.Linear', (['(10)', '(20)'], {}), '(10, 20)\n', (999, 1007), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow as flow
from oneflow.framework.tensor import register_tensor_op
from oneflow.nn.module import Module
def nms_op(boxes, scores, iou_threshold: float):
score_inds = flow.argsort(scores, dim=0, descending=True)
boxes = flow._C.gather(boxes, score_inds, axis=0)
keep = flow._C.nms(boxes, iou_threshold)
index = flow.squeeze(flow.argwhere(keep), dim=[1])
return flow._C.gather(score_inds, index, axis=0)
| [
"oneflow.argsort",
"oneflow._C.nms",
"oneflow.argwhere",
"oneflow._C.gather"
] | [((774, 818), 'oneflow.argsort', 'flow.argsort', (['scores'], {'dim': '(0)', 'descending': '(True)'}), '(scores, dim=0, descending=True)\n', (786, 818), True, 'import oneflow as flow\n'), ((831, 872), 'oneflow._C.gather', 'flow._C.gather', (['boxes', 'score_inds'], {'axis': '(0)'}), '(boxes, score_inds, axis=0)\n', (845, 872), True, 'import oneflow as flow\n'), ((884, 917), 'oneflow._C.nms', 'flow._C.nms', (['boxes', 'iou_threshold'], {}), '(boxes, iou_threshold)\n', (895, 917), True, 'import oneflow as flow\n'), ((984, 1025), 'oneflow._C.gather', 'flow._C.gather', (['score_inds', 'index'], {'axis': '(0)'}), '(score_inds, index, axis=0)\n', (998, 1025), True, 'import oneflow as flow\n'), ((943, 962), 'oneflow.argwhere', 'flow.argwhere', (['keep'], {}), '(keep)\n', (956, 962), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
import oneflow as flow
import oneflow.unittest
import oneflow.nn as nn
def get_bn_graph():
model = nn.BatchNorm1d(6)
model.eval()
model.to_global(flow.env.all_device_placement("cpu"), flow.sbp.broadcast)
class Testgraph(flow.nn.Graph):
def __init__(self, model):
super(Testgraph, self).__init__()
self.module = model
def build(self, x):
return self.module(x)
test_graph = Testgraph(model)
return test_graph
@flow.unittest.skip_unless_1n1d()
class TestFreeTensorNotInJob(flow.unittest.TestCase):
def test_free_tensor_not_in_job(test_case):
x = flow.randn(1, 6, 2).to_global(
placement=flow.env.all_device_placement("cpu"), sbp=flow.sbp.split(0)
)
y = get_bn_graph()(x)
test_case.assertEqual(y.size(), (1, 6, 2))
if __name__ == "__main__":
unittest.main()
| [
"oneflow.unittest.skip_unless_1n1d",
"oneflow.randn",
"oneflow.env.all_device_placement",
"oneflow.sbp.split",
"oneflow.nn.BatchNorm1d"
] | [((1117, 1149), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (1147, 1149), True, 'import oneflow as flow\n'), ((731, 748), 'oneflow.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(6)'], {}), '(6)\n', (745, 748), True, 'import oneflow.nn as nn\n'), ((1501, 1516), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1514, 1516), False, 'import unittest\n'), ((786, 822), 'oneflow.env.all_device_placement', 'flow.env.all_device_placement', (['"""cpu"""'], {}), "('cpu')\n", (815, 822), True, 'import oneflow as flow\n'), ((1264, 1283), 'oneflow.randn', 'flow.randn', (['(1)', '(6)', '(2)'], {}), '(1, 6, 2)\n', (1274, 1283), True, 'import oneflow as flow\n'), ((1317, 1353), 'oneflow.env.all_device_placement', 'flow.env.all_device_placement', (['"""cpu"""'], {}), "('cpu')\n", (1346, 1353), True, 'import oneflow as flow\n'), ((1359, 1376), 'oneflow.sbp.split', 'flow.sbp.split', (['(0)'], {}), '(0)\n', (1373, 1376), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from typing import Optional, Sequence
from oneflow.python.oneflow_export import oneflow_export
import oneflow as flow
import oneflow.core.operator.op_conf_pb2 as op_conf_util
import oneflow.python.framework.distribute as distribute_util
import oneflow.python.framework.remote_blob as remote_blob_util
@oneflow_export("layers.prelu")
def prelu(
inputs: remote_blob_util.BlobDef,
alpha_initializer: Optional[op_conf_util.InitializerConf] = None,
alpha_regularizer: Optional[op_conf_util.RegularizerConf] = None,
shared_axes: Optional[Sequence[int]] = None,
trainable: bool = True,
name: str = "PRelu",
model_distribute: distribute_util.Distribute = distribute_util.broadcast(),
) -> remote_blob_util.BlobDef:
alpha_shape = list(inputs.shape[1:])
if shared_axes is not None:
for i in shared_axes:
assert i >= 1 and i < len(inputs.shape)
alpha_shape[i - 1] = 1
if alpha_initializer is None:
alpha_initializer = flow.constant_initializer(0)
with flow.scope.namespace(name):
alpha = flow.get_variable(
name="alpha",
shape=alpha_shape,
dtype=inputs.dtype,
initializer=alpha_initializer,
regularizer=alpha_regularizer,
trainable=trainable,
distribute=model_distribute,
reuse=False,
)
op = (
flow.user_op_builder(name)
.Op("prelu")
.Input("x", [inputs])
.Input("alpha", [alpha])
.Output("y")
.Build()
)
return op.InferAndTryRun().SoleOutputBlob()
| [
"oneflow.python.framework.distribute.broadcast",
"oneflow.scope.namespace",
"oneflow.constant_initializer",
"oneflow.get_variable",
"oneflow.user_op_builder",
"oneflow.python.oneflow_export.oneflow_export"
] | [((934, 964), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""layers.prelu"""'], {}), "('layers.prelu')\n", (948, 964), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((1307, 1334), 'oneflow.python.framework.distribute.broadcast', 'distribute_util.broadcast', ([], {}), '()\n', (1332, 1334), True, 'import oneflow.python.framework.distribute as distribute_util\n'), ((1620, 1648), 'oneflow.constant_initializer', 'flow.constant_initializer', (['(0)'], {}), '(0)\n', (1645, 1648), True, 'import oneflow as flow\n'), ((1659, 1685), 'oneflow.scope.namespace', 'flow.scope.namespace', (['name'], {}), '(name)\n', (1679, 1685), True, 'import oneflow as flow\n'), ((1703, 1907), 'oneflow.get_variable', 'flow.get_variable', ([], {'name': '"""alpha"""', 'shape': 'alpha_shape', 'dtype': 'inputs.dtype', 'initializer': 'alpha_initializer', 'regularizer': 'alpha_regularizer', 'trainable': 'trainable', 'distribute': 'model_distribute', 'reuse': '(False)'}), "(name='alpha', shape=alpha_shape, dtype=inputs.dtype,\n initializer=alpha_initializer, regularizer=alpha_regularizer, trainable\n =trainable, distribute=model_distribute, reuse=False)\n", (1720, 1907), True, 'import oneflow as flow\n'), ((2026, 2052), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (2046, 2052), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow
from oneflow.framework.docstr.utils import add_docstr
add_docstr(
oneflow.tensordot,
r"""
tensordot(a, b, dims=Union[int, Tensor, Tuple[List[int], List[int]], List[List[int]]], out=None) -> Tensor
Compute tensor dot along given dimensions.
Given two tensors a and b, and dims which represent two lists containing dim indices, `tensordot` traverses the two
lists and calculate the tensor dot along every dim pair.
Args:
a (oneflow.Tensor): The input tensor to compute tensordot
b (oneflow.Tensor): The input tensor to compute tensordot
dims (int or list or tuple or oneflow.Tensor):
The dims to calculate tensordot.
If it's an integer or oneflow.Tensor with only one element,
the last dims of tensor `a` and the first dims of tensor `b` will be calculated.
If it's a list or tuple or oneflow.Tensor with more than one element,
it must contain two array-like object, which represent the dims of tensor a and tensor b to be calculated.
out (oneflow.Tensor): The tensor to save result (NOT IMPLEMENTED YET)
Returns:
oneflow.Tensor: The result tensor
For example:
.. code-block:: python
>>> import oneflow as flow
>>> a = flow.randn(3, 4, 5)
>>> b = flow.randn(4, 5, 6)
>>> flow.tensordot(a, b, dims=2).shape
oneflow.Size([3, 6])
>>> b = flow.randn(5, 6, 7)
>>> flow.tensordot(a, b, dims=1).shape
oneflow.Size([3, 4, 6, 7])
>>> b = flow.randn(3, 4, 7)
>>> flow.tensordot(a, b, dims=[[0, 1], [0, 1]]).shape
oneflow.Size([5, 7])
Note:
Three common use cases are:
- dims = 0 : tensor product :math:`a \otimes b`
- dims = 1 : tensor dot product :math:`a \cdot b`
- dims = 2 : (default) tensor double contraction :math:`a : b`
The part of documentation is referenced from https://numpy.org/doc/stable/reference/generated/numpy.tensordot.html.
Note:
The operation is equivalent to the series of operations:
- Permute the dimensions of the tensor A that require tensordot to the end
- Permute the dimensions of the tensor B that require tensordot to the start
- Reshape the permuted tensor A into a 2-dimensional tensor, where the size of the 0th dimension is the product of the dimensions that do not require dot product, and the size of the 1st dimension is the product of the dimensions that require dot product
- Reshape the permuted tensor B into a 2-dimensional tensor, where the size of the 0th dimension is the product of the dimensions that require dot product, and the size of the 1st dimension is the product of the dimensions that do not require dot product
- Calculate the matrix multiplication of reshaped tensor A and reshaped tensor B
- Reshape the result of matrix multiplication, the target shape is the concatenation of the dimensions that do not require tensordot of tensor A and B
This series of operations can be equivalently represented by the following code:
.. code-block:: python
>>> import oneflow as flow
>>> a = flow.randn(2, 4, 3)
>>> b = flow.randn(3, 4, 2)
>>> dims = [[0, 2], [2, 0]]
>>> permuted_a = a.permute(1, 0, 2) # 0, 2 are the dimensions requiring tensordot and are placed in the end in permuting
>>> permuted_b = b.permute(2, 0, 1) # 2, 0 are the dimensions requiring tensordot and are placed at the beginning in permuting
>>> reshaped_a = permuted_a.reshape(4, 2 * 3) # 4 is the dimensions of a that do not require tensordot
>>> reshaped_b = permuted_b.reshape(2 * 3, 4) # 4 is the dimensions of a that do not require tensordot
>>> matmul_result = flow.matmul(reshaped_a, reshaped_b)
>>> result = matmul_result.reshape(4, 4) # 4, 4 are the concatentation of dimensions that do not require tensordot of a and b
>>> flow.all(result == flow.tensordot(a, b, dims))
tensor(True, dtype=oneflow.bool)
..
Feature Stage of Operator [tensordot].
- Maintainer List [@marigoold]
- Current Stage [ ]
- Alpha Stage Check List [ ]
- API(Compatible with PyTorch 1.11, anything incompatible must be noted in API Doc.)[Yes]
- Doc(API Doc must be provided and showed normally on the web page.)[Yes]
- Functionality and its' Test [ ]
- Functionality is highly compatiable with PyTorch 1.11. [ ] (out parameter is not implemented yet)
- eager local [Yes]
- forward [Yes]
- backward [Yes]
- gpu [Yes]
- cpu [Yes]
- graph local [ ] (when the type of param `dims` is oneflow.Tensor, the tensor.item() will make graph fail)
- forward [ ]
- backward [ ]
- gpu [ ]
- cpu [ ]
- Exception Handling
- Exception Message and Hint must be provided [Yes]
- Beta Stage Check List [ ]
- API(High compatibility with PyTorch 1.11, shouldn't have anything incompatible for a naive reason.)[ ]
- Doc(Same standard as Alpha Stage)[ ]
- Functionality and its' Test [ ]
- eager global [ ]
- forward [ ]
- backward [ ]
- gpu [ ]
- cpu [ ]
- graph gloal [ ]
- forward [ ]
- backward [ ]
- gpu [ ]
- cpu [ ]
- Performance and Scalability(Must be evaluated.)[ ]
- CUDA kernel [ ]
- CPU kernel [ ]
- N nodes M devices [ ]
- Exception Handling [ ]
- Exception Message and Hint must be provided [ ]
- Try you best to do Exception Recovery [ ]
- Stable Stage Check List [ ]
- API(Same standard as Beta Stage)[ ]
- Doc(Same standard as Beta Stage)[ ]
- Functionality and its' Test [ ]
- fp16 and AMP [ ]
- NHWC [ ]
- Performance and Scalability(Must be evaluated.)[ ]
- Exception Handling [ ]
""",
)
| [
"oneflow.framework.docstr.utils.add_docstr"
] | [((660, 6844), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.tensordot', '"""\n tensordot(a, b, dims=Union[int, Tensor, Tuple[List[int], List[int]], List[List[int]]], out=None) -> Tensor\n \n Compute tensor dot along given dimensions.\n \n Given two tensors a and b, and dims which represent two lists containing dim indices, `tensordot` traverses the two\n lists and calculate the tensor dot along every dim pair.\n\n Args:\n a (oneflow.Tensor): The input tensor to compute tensordot\n b (oneflow.Tensor): The input tensor to compute tensordot\n dims (int or list or tuple or oneflow.Tensor):\n The dims to calculate tensordot.\n If it\'s an integer or oneflow.Tensor with only one element,\n the last dims of tensor `a` and the first dims of tensor `b` will be calculated.\n If it\'s a list or tuple or oneflow.Tensor with more than one element,\n it must contain two array-like object, which represent the dims of tensor a and tensor b to be calculated.\n out (oneflow.Tensor): The tensor to save result (NOT IMPLEMENTED YET)\n \n Returns:\n oneflow.Tensor: The result tensor\n\n For example:\n \n .. code-block:: python\n\n >>> import oneflow as flow\n >>> a = flow.randn(3, 4, 5)\n >>> b = flow.randn(4, 5, 6)\n >>> flow.tensordot(a, b, dims=2).shape\n oneflow.Size([3, 6])\n >>> b = flow.randn(5, 6, 7)\n >>> flow.tensordot(a, b, dims=1).shape\n oneflow.Size([3, 4, 6, 7])\n >>> b = flow.randn(3, 4, 7)\n >>> flow.tensordot(a, b, dims=[[0, 1], [0, 1]]).shape\n oneflow.Size([5, 7])\n \n Note:\n\n Three common use cases are:\n\n - dims = 0 : tensor product :math:`a \\\\otimes b`\n\n - dims = 1 : tensor dot product :math:`a \\\\cdot b`\n\n - dims = 2 : (default) tensor double contraction :math:`a : b`\n\n The part of documentation is referenced from https://numpy.org/doc/stable/reference/generated/numpy.tensordot.html.\n\n\n Note:\n The operation is equivalent to the series of operations:\n\n - Permute the dimensions of the tensor A that require tensordot to the end\n\n - Permute the dimensions of the tensor B that require tensordot to the start\n\n - Reshape the permuted tensor A into a 2-dimensional tensor, where the size of the 0th dimension is the product of the dimensions that do not require dot product, and the size of the 1st dimension is the product of the dimensions that require dot product\n\n - Reshape the permuted tensor B into a 2-dimensional tensor, where the size of the 0th dimension is the product of the dimensions that require dot product, and the size of the 1st dimension is the product of the dimensions that do not require dot product\n\n - Calculate the matrix multiplication of reshaped tensor A and reshaped tensor B\n\n - Reshape the result of matrix multiplication, the target shape is the concatenation of the dimensions that do not require tensordot of tensor A and B\n\n This series of operations can be equivalently represented by the following code:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> a = flow.randn(2, 4, 3)\n >>> b = flow.randn(3, 4, 2)\n >>> dims = [[0, 2], [2, 0]]\n >>> permuted_a = a.permute(1, 0, 2) # 0, 2 are the dimensions requiring tensordot and are placed in the end in permuting\n >>> permuted_b = b.permute(2, 0, 1) # 2, 0 are the dimensions requiring tensordot and are placed at the beginning in permuting\n >>> reshaped_a = permuted_a.reshape(4, 2 * 3) # 4 is the dimensions of a that do not require tensordot\n >>> reshaped_b = permuted_b.reshape(2 * 3, 4) # 4 is the dimensions of a that do not require tensordot\n >>> matmul_result = flow.matmul(reshaped_a, reshaped_b)\n >>> result = matmul_result.reshape(4, 4) # 4, 4 are the concatentation of dimensions that do not require tensordot of a and b\n >>> flow.all(result == flow.tensordot(a, b, dims))\n tensor(True, dtype=oneflow.bool)\n\n ..\n Feature Stage of Operator [tensordot].\n - Maintainer List [@marigoold]\n - Current Stage [ ]\n - Alpha Stage Check List [ ]\n - API(Compatible with PyTorch 1.11, anything incompatible must be noted in API Doc.)[Yes]\n - Doc(API Doc must be provided and showed normally on the web page.)[Yes]\n - Functionality and its\' Test [ ]\n - Functionality is highly compatiable with PyTorch 1.11. [ ] (out parameter is not implemented yet)\n - eager local [Yes]\n - forward [Yes]\n - backward [Yes]\n - gpu [Yes]\n - cpu [Yes]\n - graph local [ ] (when the type of param `dims` is oneflow.Tensor, the tensor.item() will make graph fail)\n - forward [ ]\n - backward [ ]\n - gpu [ ]\n - cpu [ ]\n - Exception Handling\n - Exception Message and Hint must be provided [Yes]\n - Beta Stage Check List [ ]\n - API(High compatibility with PyTorch 1.11, shouldn\'t have anything incompatible for a naive reason.)[ ]\n - Doc(Same standard as Alpha Stage)[ ]\n - Functionality and its\' Test [ ]\n - eager global [ ]\n - forward [ ]\n - backward [ ]\n - gpu [ ]\n - cpu [ ]\n - graph gloal [ ]\n - forward [ ]\n - backward [ ]\n - gpu [ ]\n - cpu [ ]\n - Performance and Scalability(Must be evaluated.)[ ]\n - CUDA kernel [ ]\n - CPU kernel [ ]\n - N nodes M devices [ ]\n - Exception Handling [ ]\n - Exception Message and Hint must be provided [ ]\n - Try you best to do Exception Recovery [ ]\n - Stable Stage Check List [ ]\n - API(Same standard as Beta Stage)[ ]\n - Doc(Same standard as Beta Stage)[ ]\n - Functionality and its\' Test [ ]\n - fp16 and AMP [ ]\n - NHWC [ ]\n - Performance and Scalability(Must be evaluated.)[ ]\n - Exception Handling [ ]\n """'], {}), '(oneflow.tensordot,\n """\n tensordot(a, b, dims=Union[int, Tensor, Tuple[List[int], List[int]], List[List[int]]], out=None) -> Tensor\n \n Compute tensor dot along given dimensions.\n \n Given two tensors a and b, and dims which represent two lists containing dim indices, `tensordot` traverses the two\n lists and calculate the tensor dot along every dim pair.\n\n Args:\n a (oneflow.Tensor): The input tensor to compute tensordot\n b (oneflow.Tensor): The input tensor to compute tensordot\n dims (int or list or tuple or oneflow.Tensor):\n The dims to calculate tensordot.\n If it\'s an integer or oneflow.Tensor with only one element,\n the last dims of tensor `a` and the first dims of tensor `b` will be calculated.\n If it\'s a list or tuple or oneflow.Tensor with more than one element,\n it must contain two array-like object, which represent the dims of tensor a and tensor b to be calculated.\n out (oneflow.Tensor): The tensor to save result (NOT IMPLEMENTED YET)\n \n Returns:\n oneflow.Tensor: The result tensor\n\n For example:\n \n .. code-block:: python\n\n >>> import oneflow as flow\n >>> a = flow.randn(3, 4, 5)\n >>> b = flow.randn(4, 5, 6)\n >>> flow.tensordot(a, b, dims=2).shape\n oneflow.Size([3, 6])\n >>> b = flow.randn(5, 6, 7)\n >>> flow.tensordot(a, b, dims=1).shape\n oneflow.Size([3, 4, 6, 7])\n >>> b = flow.randn(3, 4, 7)\n >>> flow.tensordot(a, b, dims=[[0, 1], [0, 1]]).shape\n oneflow.Size([5, 7])\n \n Note:\n\n Three common use cases are:\n\n - dims = 0 : tensor product :math:`a \\\\otimes b`\n\n - dims = 1 : tensor dot product :math:`a \\\\cdot b`\n\n - dims = 2 : (default) tensor double contraction :math:`a : b`\n\n The part of documentation is referenced from https://numpy.org/doc/stable/reference/generated/numpy.tensordot.html.\n\n\n Note:\n The operation is equivalent to the series of operations:\n\n - Permute the dimensions of the tensor A that require tensordot to the end\n\n - Permute the dimensions of the tensor B that require tensordot to the start\n\n - Reshape the permuted tensor A into a 2-dimensional tensor, where the size of the 0th dimension is the product of the dimensions that do not require dot product, and the size of the 1st dimension is the product of the dimensions that require dot product\n\n - Reshape the permuted tensor B into a 2-dimensional tensor, where the size of the 0th dimension is the product of the dimensions that require dot product, and the size of the 1st dimension is the product of the dimensions that do not require dot product\n\n - Calculate the matrix multiplication of reshaped tensor A and reshaped tensor B\n\n - Reshape the result of matrix multiplication, the target shape is the concatenation of the dimensions that do not require tensordot of tensor A and B\n\n This series of operations can be equivalently represented by the following code:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> a = flow.randn(2, 4, 3)\n >>> b = flow.randn(3, 4, 2)\n >>> dims = [[0, 2], [2, 0]]\n >>> permuted_a = a.permute(1, 0, 2) # 0, 2 are the dimensions requiring tensordot and are placed in the end in permuting\n >>> permuted_b = b.permute(2, 0, 1) # 2, 0 are the dimensions requiring tensordot and are placed at the beginning in permuting\n >>> reshaped_a = permuted_a.reshape(4, 2 * 3) # 4 is the dimensions of a that do not require tensordot\n >>> reshaped_b = permuted_b.reshape(2 * 3, 4) # 4 is the dimensions of a that do not require tensordot\n >>> matmul_result = flow.matmul(reshaped_a, reshaped_b)\n >>> result = matmul_result.reshape(4, 4) # 4, 4 are the concatentation of dimensions that do not require tensordot of a and b\n >>> flow.all(result == flow.tensordot(a, b, dims))\n tensor(True, dtype=oneflow.bool)\n\n ..\n Feature Stage of Operator [tensordot].\n - Maintainer List [@marigoold]\n - Current Stage [ ]\n - Alpha Stage Check List [ ]\n - API(Compatible with PyTorch 1.11, anything incompatible must be noted in API Doc.)[Yes]\n - Doc(API Doc must be provided and showed normally on the web page.)[Yes]\n - Functionality and its\' Test [ ]\n - Functionality is highly compatiable with PyTorch 1.11. [ ] (out parameter is not implemented yet)\n - eager local [Yes]\n - forward [Yes]\n - backward [Yes]\n - gpu [Yes]\n - cpu [Yes]\n - graph local [ ] (when the type of param `dims` is oneflow.Tensor, the tensor.item() will make graph fail)\n - forward [ ]\n - backward [ ]\n - gpu [ ]\n - cpu [ ]\n - Exception Handling\n - Exception Message and Hint must be provided [Yes]\n - Beta Stage Check List [ ]\n - API(High compatibility with PyTorch 1.11, shouldn\'t have anything incompatible for a naive reason.)[ ]\n - Doc(Same standard as Alpha Stage)[ ]\n - Functionality and its\' Test [ ]\n - eager global [ ]\n - forward [ ]\n - backward [ ]\n - gpu [ ]\n - cpu [ ]\n - graph gloal [ ]\n - forward [ ]\n - backward [ ]\n - gpu [ ]\n - cpu [ ]\n - Performance and Scalability(Must be evaluated.)[ ]\n - CUDA kernel [ ]\n - CPU kernel [ ]\n - N nodes M devices [ ]\n - Exception Handling [ ]\n - Exception Message and Hint must be provided [ ]\n - Try you best to do Exception Recovery [ ]\n - Stable Stage Check List [ ]\n - API(Same standard as Beta Stage)[ ]\n - Doc(Same standard as Beta Stage)[ ]\n - Functionality and its\' Test [ ]\n - fp16 and AMP [ ]\n - NHWC [ ]\n - Performance and Scalability(Must be evaluated.)[ ]\n - Exception Handling [ ]\n """\n )\n', (670, 6844), False, 'from oneflow.framework.docstr.utils import add_docstr\n')] |
import math
from numpy.lib.arraysetops import isin
# import torch
# import torch.nn as nn
# import torch.nn.functional as F
import oneflow as of
import oneflow.nn as nn
import oneflow.nn.functional as F
# from libs.components.scores import CosineScore
# class GE2ELoss(nn.Module):
#
# def __init__(self, init_w=10.0, init_b=-5.0, loss_method='softmax'):
# '''
# Implementation of the Generalized End-to-End loss defined in https://arxiv.org/abs/1710.10467 [1]
# Accepts an input of size (N, M, D)
# where N is the number of speakers in the batch,
# M is the number of utterances per speaker,
# and D is the dimensionality of the embedding vector (e.g. d-vector)
# Args:
# - init_w (float): defines the initial value of w in Equation (5) of [1]
# - init_b (float): definies the initial value of b in Equation (5) of [1]
# '''
# super(GE2ELoss, self).__init__()
# self.w = nn.Parameter(torch.tensor(init_w))
# self.b = nn.Parameter(torch.tensor(init_b))
# self.loss_method = loss_method
#
# assert self.loss_method in ['softmax', 'contrast']
#
# if self.loss_method == 'softmax':
# self.embed_loss = self.embed_loss_softmax
# if self.loss_method == 'contrast':
# self.embed_loss = self.embed_loss_contrast
#
# def calc_new_centroids(self, dvecs, centroids, spkr, utt):
# '''
# Calculates the new centroids excluding the reference utterance
# '''
# excl = torch.cat((dvecs[spkr,:utt], dvecs[spkr,utt+1:]))
# excl = torch.mean(excl, 0)
# new_centroids = []
# for i, centroid in enumerate(centroids):
# if i == spkr:
# new_centroids.append(excl)
# else:
# new_centroids.append(centroid)
# return torch.stack(new_centroids)
#
# def calc_cosine_sim(self, dvecs, centroids):
# '''
# Make the cosine similarity matrix with dims (N,M,N)
# '''
# cos_sim_matrix = []
# for spkr_idx, speaker in enumerate(dvecs):
# cs_row = []
# for utt_idx, utterance in enumerate(speaker):
# new_centroids = self.calc_new_centroids(dvecs, centroids, spkr_idx, utt_idx)
# # vector based cosine similarity for speed
# cs_row.append(torch.clamp(torch.mm(utterance.unsqueeze(1).transpose(0,1), new_centroids.transpose(0,1)) / (torch.norm(utterance) * torch.norm(new_centroids, dim=1)), 1e-6))
# cs_row = torch.cat(cs_row, dim=0)
# cos_sim_matrix.append(cs_row)
# return torch.stack(cos_sim_matrix)
#
# def embed_loss_softmax(self, dvecs, cos_sim_matrix):
# '''
# Calculates the loss on each embedding $L(e_{ji})$ by taking softmax
# '''
# N, M, _ = dvecs.shape
# L = []
# for j in range(N):
# L_row = []
# for i in range(M):
# L_row.append(-F.log_softmax(cos_sim_matrix[j,i], 0)[j])
# L_row = torch.stack(L_row)
# L.append(L_row)
# return torch.stack(L)
#
# def embed_loss_contrast(self, dvecs, cos_sim_matrix):
# '''
# Calculates the loss on each embedding $L(e_{ji})$ by contrast loss with closest centroid
# '''
# N, M, _ = dvecs.shape
# L = []
# for j in range(N):
# L_row = []
# for i in range(M):
# centroids_sigmoids = torch.sigmoid(cos_sim_matrix[j,i])
# excl_centroids_sigmoids = torch.cat((centroids_sigmoids[:j], centroids_sigmoids[j+1:]))
# L_row.append(1. - torch.sigmoid(cos_sim_matrix[j,i,j]) + torch.max(excl_centroids_sigmoids))
# L_row = torch.stack(L_row)
# L.append(L_row)
# return torch.stack(L)
#
# def forward(self, dvecs):
# '''
# Calculates the GE2E loss for an input of dimensions (num_speakers, num_utts_per_speaker, dvec_feats)
# '''
# #Calculate centroids
# centroids = torch.mean(dvecs, 1)
#
# #Calculate the cosine similarity matrix
# cos_sim_matrix = self.calc_cosine_sim(dvecs, centroids)
# print(cos_sim_matrix.shape)
# torch.clamp(self.w, 1e-6)
# cos_sim_matrix = cos_sim_matrix * self.w + self.b
# L = self.embed_loss(dvecs, cos_sim_matrix)
# return L.sum()
#
# class GE2E(nn.Module):
# def __init__(self):
# super(GE2E, self).__init__()
# self.w = nn.Parameter(torch.Tensor(1, 1), requires_grad = True)
# self.b = nn.Parameter(torch.Tensor(1, 1), requires_grad = True)
# self.cosine_score = CosineScore()
# self._init()
#
# def _init(self):
# nn.init.kaiming_normal_(self.w)
# nn.init.kaiming_normal_(self.b)
#
# def center(self, embedding):
# num_spks, num_utts, dim = embedding.size()
# x = embedding.repeat(1, num_utts, 1) # 重复n_utts次
# mask = torch.logical_not(torch.eye(num_utts)).repeat(num_spks, 1).view(-1).to(x.device) # mask掉本身
# masked_x = x.view(-1, dim)[mask].contiguous().view(num_spks * num_utts, -1, dim) # 每n_spks个划分为一组,分别对应mask掉的部分
# center = masked_x.mean(dim = 1)
# return center
#
# def get_prob(self, score):
# return self.w * score + self.b, score
#
# def forward(self, embedding):
# '''
# embedding: N, M, D (num_spks, num_utts, dim)
# '''
# center = self.center(embedding) # N * M, D
# num_spks, num_utts, dim = embedding.size()
# score_matrix = self.cosine_score(embedding.view(-1, dim), center)
# mask = torch.eye(num_utts, dtype = torch.bool).repeat(num_spks, num_spks).to(embedding.device) # mask
# score_matrix = score_matrix.view(-1)[mask.view(-1)].view(num_spks * num_utts, -1)
# score_matrix, _ = self.get_prob(score_matrix)
# loss_mask_matrix = torch.eye(mask.size(0), dtype = torch.long).to(embedding.device) # 候选单位阵
# loss_mask = loss_mask_matrix.view(-1)[mask.view(-1)] # .view(-1, 1)
# loss = -F.log_softmax(score_matrix, dim = 1).view(-1)
# loss = loss[loss_mask.bool()]
# return loss.sum()
#
# class GE2EBackend(nn.Module):
# def __init__(self):
# super(GE2EBackend, self).__init__()
# self.w = nn.Parameter(torch.Tensor(1, 1))
# self.b = nn.Parameter(torch.Tensor(1, 1))
# self.prob = nn.Sigmoid()
#
# self.bce = nn.BCELoss(reduction = 'sum')
# self._init()
#
# def _init(self):
# nn.init.kaiming_normal_(self.w)
# nn.init.kaiming_normal_(self.b)
#
# def get_prob(self, score):
# score_matrix = self.w * score + self.b # scaled cosine score
# score_matrix = self.prob(score_matrix) # sigmoid prob
# return score_matrix, score
#
# def forward(self, embedding, score_matrix, ground_truth, hard_num = None):
# '''
# score_matrix: num_spks * num_utts * num_spks
# '''
# num_spks, num_utts, _ = embedding.size()
# score_matrix, _ = self.get_prob(score_matrix)
# loss = -F.log_softmax(score_matrix.view(num_spks * num_utts, -1), dim = 1).view(-1)
# mask = ground_truth.bool()
# loss = loss[mask]
# if isinstance(hard_num, int):
# loss, _ = torch.topk(loss, k = hard_num)
# loss = loss.sum()
# labels = ground_truth.view(-1, 1).float()
# bce_loss = self.bce(score_matrix, labels)
# # loss = bce_loss + 0.1 * loss
# return loss, bce_loss, score_matrix.view(-1, 1)
#
# class NoiseConEstLoss(nn.Module):
# def __init__(self):
# super(NoiseConEstLoss, self).__init__()
#
# def forward(self, scores, labels):
# pass
#
# class SupConLoss(nn.Module):
# """Supervised Contrastive Learning: https://arxiv.org/pdf/2004.11362.pdf.
# It also supports the unsupervised contrastive loss in SimCLR"""
# def __init__(self, temperature=0.07, contrast_mode='all',
# base_temperature=0.07):
# super(SupConLoss, self).__init__()
# self.temperature = temperature
# self.contrast_mode = contrast_mode
# self.base_temperature = base_temperature
#
# def get_prob(self, features, labels):
# pass
#
# def forward(self, features, labels=None, mask=None):
# """Compute loss for model. If both `labels` and `mask` are None,
# it degenerates to SimCLR unsupervised loss:
# https://arxiv.org/pdf/2002.05709.pdf
# Args:
# features: hidden vector of shape [bsz, n_views, ...].
# labels: ground truth of shape [bsz].
# mask: contrastive mask of shape [bsz, bsz], mask_{i,j}=1 if sample j
# has the same class as sample i. Can be asymmetric.
# Returns:
# A loss scalar.
# """
# device = (torch.device('cuda')
# if features.is_cuda
# else torch.device('cpu'))
#
# if len(features.shape) < 3:
# raise ValueError('`features` needs to be [bsz, n_views, ...],'
# 'at least 3 dimensions are required')
# if len(features.shape) > 3:
# features = features.view(features.shape[0], features.shape[1], -1)
#
# batch_size = features.shape[0]
# if labels is not None and mask is not None:
# raise ValueError('Cannot define both `labels` and `mask`')
# elif labels is None and mask is None:
# mask = torch.eye(batch_size, dtype=torch.float32).to(device)
# elif labels is not None:
# labels = labels.contiguous().view(-1, 1)
# if labels.shape[0] != batch_size:
# raise ValueError('Num of labels does not match num of features')
# mask = torch.eq(labels, labels.T).float().to(device)
# else:
# mask = mask.float().to(device)
#
# contrast_count = features.shape[1]
# contrast_feature = torch.cat(torch.unbind(features, dim=1), dim=0)
# if self.contrast_mode == 'one':
# anchor_feature = features[:, 0]
# anchor_count = 1
# elif self.contrast_mode == 'all':
# anchor_feature = contrast_feature
# anchor_count = contrast_count
# else:
# raise ValueError('Unknown mode: {}'.format(self.contrast_mode))
#
# # compute logits
# anchor_dot_contrast = torch.div(
# torch.matmul(anchor_feature, contrast_feature.T),
# self.temperature)
# # for numerical stability
# logits_max, _ = torch.max(anchor_dot_contrast, dim=1, keepdim=True)
# logits = anchor_dot_contrast - logits_max.detach()
#
# # tile mask
# mask = mask.repeat(anchor_count, contrast_count)
# # mask-out self-contrast cases
# logits_mask = torch.scatter(
# torch.ones_like(mask),
# 1,
# torch.arange(batch_size * anchor_count).view(-1, 1).to(device),
# 0
# )
# mask = mask * logits_mask
#
# # compute log_prob
# exp_logits = torch.exp(logits) * logits_mask
# log_prob = logits - torch.log(exp_logits.sum(1, keepdim=True))
#
# # compute mean of log-likelihood over positive
# mean_log_prob_pos = (mask * log_prob).sum(1) / mask.sum(1)
#
# # loss
# loss = - (self.temperature / self.base_temperature) * mean_log_prob_pos
# loss = loss.view(anchor_count, batch_size).mean()
#
# return loss
#
# class BCELoss(nn.Module):
# def __init__(self, input_dim, num_classes = 1, affine = True, reduction = 'mean'):
# super(BCELoss, self).__init__()
# self.input_dim = input_dim
# self.num_classes = num_classes
# self.affine = affine
# if self.affine:
# self.logistic = nn.Linear(input_dim, num_classes)
# self.sigmoid = nn.Sigmoid()
# self.bce_loss = nn.BCELoss(reduction = reduction)
#
# def get_prob(self, inputs):
# if self.affine:
# logits = self.logistic(inputs)
# prob = self.sigmoid(logits)
# # return prob, logits
# return logits, prob
# else:
# prob = self.sigmoid(inputs)
# return inputs, prob
#
# def forward(self, inputs, labels):
# labels = labels.view(-1, 1).float()
# # prob, _ = self.get_prob(inputs)
# _, prob = self.get_prob(inputs)
# loss = self.bce_loss(prob, labels)
# return loss, prob
class CrossEntropy(nn.Module):
def __init__(self, embedding_size, num_classes, reduction = 'mean'):
super(CrossEntropy, self).__init__()
self.embedding_size = embedding_size
self.num_classes = num_classes
self.reduction = reduction
self.fc = nn.Linear(embedding_size, num_classes)
def get_prob(self, inputs):
logits = self.fc(inputs)
scores = F.softmax(logits, dim = 1)
return scores[:, 1], logits
def forward(self, embeddings, labels):
logits = self.fc(embeddings)
loss = F.cross_entropy(logits, labels, reduction = self.reduction)
return loss, logits
class ASoftmax(nn.Module):
def __init__(self, embedding_size, num_classes, margin):
super(ASoftmax, self).__init__()
def forward(self, embeddings, labels):
pass
class AMSoftmax(nn.Module):
def __init__(self, embedding_size, num_classes, s, margin):
super(AMSoftmax, self).__init__()
self.embedding_size = embedding_size
self.num_classes = num_classes
self.s = s
self.margin = margin
self.weights = nn.Parameter(of.Tensor(num_classes, embedding_size))
nn.init.kaiming_normal_(self.weights)
self.cross_entropy = nn.CrossEntropyLoss()
def forward(self, embeddings, labels):
logits = F.linear(F.l2_normalize(embeddings, dim = 1), F.l2_normalize(self.weights, dim = 1))
margin = of.zeros_like(logits)
# margin.scatter_(1, labels.view(-1,1), self.margin)
margin = of.scatter(margin, 1, labels.view(-1, 1), self.margin)
m_logits = self.s * (logits - margin)
loss = self.cross_entropy(m_logits, labels)
return loss, logits
class LMCL(AMSoftmax):
def __init__(self, embedding_size, num_classes, s, margin):
super(LMCL, self).__init__(embedding_size, num_classes, s, margin)
def forward(self, embeddings, labels):
super().forward(embeddings, labels)
class AAMSoftmax(nn.Module):
def __init__(self):
super(AAMSoftmax, self).__init__()
def forward(self, embeddings, labels):
pass
# class OnlineTripletLoss(nn.Module):
# def __init__(self, centers, margin, selector = 'hardest'):
# super(OnlineTripletLoss, self).__init__()
# self.margin = margin
# self.centers = F.normalize(centers)
# self.centers.requires_grad = False
# self.selector = selector
#
# def forward(self, embeddings, labels):
# embeddings = embeddings.cpu()
# cos_matrix = F.linear(embeddings, self.centers)# cos_matrix batch_size * 1211
# rows = torch.arange(embeddings.size(0))
# positive_cos = cos_matrix[rows, labels].view(-1,1) # 32 * 1
# idx = torch.ones((embeddings.size(0), self.centers.size(0)), dtype = rows.dtype) # 32 * 1211
# idx[rows, labels] = 0
# negative_cos_matrix = cos_matrix[idx > 0].view(embeddings.size(0), -1) # 32 * 1210
# loss_values = negative_cos_matrix + self.margin - positive_cos # 求出所有的loss 32 * 1210
# if self.selector == 'hardest': # 挑选出最大的loss
# loss_value, _ = torch.max(loss_values, dim = 1)
# if self.selector == 'hard':
# pass
# if self.selector == 'semihard':
# pass
# losses = F.relu(loss_value.view(-1,1))
# return losses.mean(), (loss_value > 0).sum().item()
class OnlineTripletLoss(nn.Module):
"""
Online Triplets loss
Takes a batch of embeddings and corresponding labels.
Triplets are generated using triplet_selector object that take embeddings and targets and return indices of
triplets
"""
def __init__(self, margin, triplet_selector):
super(OnlineTripletLoss, self).__init__()
self.margin = margin
self.triplet_selector = triplet_selector
def forward(self, embeddings, target):
triplets = self.triplet_selector.get_triplets(embeddings, target)
if embeddings.is_cuda:
triplets = triplets.cuda()
ap_cos = F.cosine_similarity(embeddings[triplets[:,0]], embeddings[triplets[:,1]]) # .pow(.5)
an_cos = F.cosine_similarity(embeddings[triplets[:,0]], embeddings[triplets[:,2]]) # .pow(.5)
losses = F.relu(an_cos - ap_cos + self.margin)
return losses.mean(), len(triplets)
class OnlineContrastiveLoss(nn.Module):
def __init__(self, margin, pairs_selector):
super(OnlineContrastiveLoss, self).__init__()
self.margin = margin
self.pairs_selector = pairs_selector
def forward(self, embeddings, target):
pass
class FocalLoss(nn.Module):
def __init__(self):
super(FocalLoss, self).__init__()
def forward(self, embeddings, labels):
pass
if __name__ == '__main__':
pass
# ocsoftmax = OCSoftmax(64)
# inputs = torch.randn(4, 64)
# target = torch.tensor([0,0,1,0], dtype = torch.int64)
# output = ocsoftmax(inputs, target)
# print(output)
# ge2e = GE2E()
# a = torch.randn(4,5,8, requires_grad=True)
# loss = ge2e(a)
# loss.backward()
| [
"oneflow.Tensor",
"oneflow.nn.init.kaiming_normal_",
"oneflow.nn.functional.softmax",
"oneflow.nn.functional.cosine_similarity",
"oneflow.nn.functional.relu",
"oneflow.nn.CrossEntropyLoss",
"oneflow.zeros_like",
"oneflow.nn.functional.l2_normalize",
"oneflow.nn.Linear",
"oneflow.nn.functional.cros... | [((13298, 13336), 'oneflow.nn.Linear', 'nn.Linear', (['embedding_size', 'num_classes'], {}), '(embedding_size, num_classes)\n', (13307, 13336), True, 'import oneflow.nn as nn\n'), ((13420, 13444), 'oneflow.nn.functional.softmax', 'F.softmax', (['logits'], {'dim': '(1)'}), '(logits, dim=1)\n', (13429, 13444), True, 'import oneflow.nn.functional as F\n'), ((13579, 13636), 'oneflow.nn.functional.cross_entropy', 'F.cross_entropy', (['logits', 'labels'], {'reduction': 'self.reduction'}), '(logits, labels, reduction=self.reduction)\n', (13594, 13636), True, 'import oneflow.nn.functional as F\n'), ((14205, 14242), 'oneflow.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['self.weights'], {}), '(self.weights)\n', (14228, 14242), True, 'import oneflow.nn as nn\n'), ((14272, 14293), 'oneflow.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (14291, 14293), True, 'import oneflow.nn as nn\n'), ((14457, 14478), 'oneflow.zeros_like', 'of.zeros_like', (['logits'], {}), '(logits)\n', (14470, 14478), True, 'import oneflow as of\n'), ((17093, 17168), 'oneflow.nn.functional.cosine_similarity', 'F.cosine_similarity', (['embeddings[triplets[:, 0]]', 'embeddings[triplets[:, 1]]'], {}), '(embeddings[triplets[:, 0]], embeddings[triplets[:, 1]])\n', (17112, 17168), True, 'import oneflow.nn.functional as F\n'), ((17196, 17271), 'oneflow.nn.functional.cosine_similarity', 'F.cosine_similarity', (['embeddings[triplets[:, 0]]', 'embeddings[triplets[:, 2]]'], {}), '(embeddings[triplets[:, 0]], embeddings[triplets[:, 2]])\n', (17215, 17271), True, 'import oneflow.nn.functional as F\n'), ((17298, 17335), 'oneflow.nn.functional.relu', 'F.relu', (['(an_cos - ap_cos + self.margin)'], {}), '(an_cos - ap_cos + self.margin)\n', (17304, 17335), True, 'import oneflow.nn.functional as F\n'), ((14157, 14195), 'oneflow.Tensor', 'of.Tensor', (['num_classes', 'embedding_size'], {}), '(num_classes, embedding_size)\n', (14166, 14195), True, 'import oneflow as of\n'), ((14364, 14397), 'oneflow.nn.functional.l2_normalize', 'F.l2_normalize', (['embeddings'], {'dim': '(1)'}), '(embeddings, dim=1)\n', (14378, 14397), True, 'import oneflow.nn.functional as F\n'), ((14401, 14436), 'oneflow.nn.functional.l2_normalize', 'F.l2_normalize', (['self.weights'], {'dim': '(1)'}), '(self.weights, dim=1)\n', (14415, 14436), True, 'import oneflow.nn.functional as F\n')] |
import oneflow.experimental as flow
import os
class OFRecordDataLoader(object):
def __init__(self,data_dir, batch_size, data_part_num, seq_length, part_name_prefix, dataset_size, shuffle=True):
self.train_record_reader= flow.nn.OfrecordReader(data_dir,
batch_size=batch_size,
data_part_num=data_part_num,
part_name_prefix=part_name_prefix,
random_shuffle=shuffle,
shuffle_after_epoch=shuffle)
self.dataset_size = dataset_size
self.batch_size = batch_size
self.input_ids_decoder = flow.nn.OfrecordRawDecoder("input_ids", [seq_length], dtype=flow.int32)
self.input_mask_decoder = flow.nn.OfrecordRawDecoder("input_mask", [seq_length], dtype=flow.int32)
self.segment_ids_decoder = flow.nn.OfrecordRawDecoder("segment_ids", [seq_length], dtype=flow.int32)
self.label_ids_decoder = flow.nn.OfrecordRawDecoder("label_ids", [1], dtype=flow.int32)
self.is_real_example_decoder = flow.nn.OfrecordRawDecoder("is_real_example", [1], dtype=flow.int32)
def __len__(self):
return self.dataset_size // self.batch_size
def get_batch(self):
train_record = self.train_record_reader()
input_ids = self.input_ids_decoder(train_record)
input_mask = self.input_mask_decoder(train_record)
segment_ids = self.segment_ids_decoder(train_record)
label_ids = self.label_ids_decoder(train_record)
is_real_example = self.is_real_example_decoder(train_record)
blob_confs = {"input_ids":input_ids,"input_mask":input_mask,"segment_ids":segment_ids,"label_ids":label_ids,"is_real_example":is_real_example}
return blob_confs
if __name__ == "__main__":
data_dir = '/remote-home/rpluo/Oneflow-Model-Compression/model_compress/data/glue_ofrecord_test/SST-2/train/'
batch_size = 16
data_part_num = 1
seq_length = 128
part_name_prefix = 'train.of_record-'
shuffle=True
flow.enable_eager_execution()
flow.InitEagerGlobalSession()
dataloader = OFRecordDataLoader(data_dir,batch_size,data_part_num,seq_length,part_name_prefix,67349)
result = dataloader.get_batch()
print(result) | [
"oneflow.experimental.nn.OfrecordReader",
"oneflow.experimental.enable_eager_execution",
"oneflow.experimental.InitEagerGlobalSession",
"oneflow.experimental.nn.OfrecordRawDecoder"
] | [((2172, 2201), 'oneflow.experimental.enable_eager_execution', 'flow.enable_eager_execution', ([], {}), '()\n', (2199, 2201), True, 'import oneflow.experimental as flow\n'), ((2206, 2235), 'oneflow.experimental.InitEagerGlobalSession', 'flow.InitEagerGlobalSession', ([], {}), '()\n', (2233, 2235), True, 'import oneflow.experimental as flow\n'), ((232, 414), 'oneflow.experimental.nn.OfrecordReader', 'flow.nn.OfrecordReader', (['data_dir'], {'batch_size': 'batch_size', 'data_part_num': 'data_part_num', 'part_name_prefix': 'part_name_prefix', 'random_shuffle': 'shuffle', 'shuffle_after_epoch': 'shuffle'}), '(data_dir, batch_size=batch_size, data_part_num=\n data_part_num, part_name_prefix=part_name_prefix, random_shuffle=\n shuffle, shuffle_after_epoch=shuffle)\n', (254, 414), True, 'import oneflow.experimental as flow\n'), ((778, 849), 'oneflow.experimental.nn.OfrecordRawDecoder', 'flow.nn.OfrecordRawDecoder', (['"""input_ids"""', '[seq_length]'], {'dtype': 'flow.int32'}), "('input_ids', [seq_length], dtype=flow.int32)\n", (804, 849), True, 'import oneflow.experimental as flow\n'), ((884, 956), 'oneflow.experimental.nn.OfrecordRawDecoder', 'flow.nn.OfrecordRawDecoder', (['"""input_mask"""', '[seq_length]'], {'dtype': 'flow.int32'}), "('input_mask', [seq_length], dtype=flow.int32)\n", (910, 956), True, 'import oneflow.experimental as flow\n'), ((992, 1065), 'oneflow.experimental.nn.OfrecordRawDecoder', 'flow.nn.OfrecordRawDecoder', (['"""segment_ids"""', '[seq_length]'], {'dtype': 'flow.int32'}), "('segment_ids', [seq_length], dtype=flow.int32)\n", (1018, 1065), True, 'import oneflow.experimental as flow\n'), ((1099, 1161), 'oneflow.experimental.nn.OfrecordRawDecoder', 'flow.nn.OfrecordRawDecoder', (['"""label_ids"""', '[1]'], {'dtype': 'flow.int32'}), "('label_ids', [1], dtype=flow.int32)\n", (1125, 1161), True, 'import oneflow.experimental as flow\n'), ((1201, 1269), 'oneflow.experimental.nn.OfrecordRawDecoder', 'flow.nn.OfrecordRawDecoder', (['"""is_real_example"""', '[1]'], {'dtype': 'flow.int32'}), "('is_real_example', [1], dtype=flow.int32)\n", (1227, 1269), True, 'import oneflow.experimental as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
import cv2
import oneflow.experimental as flow
@flow.unittest.skip_unless_1n1d()
class TestImageDecode(flow.unittest.TestCase):
def test_image_decode(test_case):
images = [
"/dataset/mscoco_2017/val2017/000000000139.jpg",
"/dataset/mscoco_2017/val2017/000000000632.jpg",
]
image_files = [open(im, "rb") for im in images]
images_bytes = [imf.read() for imf in image_files]
static_shape = (len(images_bytes), max([len(bys) for bys in images_bytes]))
for imf in image_files:
imf.close()
image_decoder = flow.nn.image.decode(color_space="BGR")
images_np_arr = [
np.frombuffer(bys, dtype=np.byte).reshape(1, -1) for bys in images_bytes
]
images_np_arr_static = np.zeros(static_shape, dtype=np.int8)
for idx, np_arr in enumerate(images_np_arr):
images_np_arr_static[idx, : np_arr.shape[1]] = np_arr
input = flow.Tensor(
images_np_arr_static, dtype=flow.int8, device=flow.device("cpu")
)
images_buffer = flow.tensor_to_tensor_buffer(input, instance_dims=1)
decoded_images_buffer = image_decoder(images_buffer)
of_decoded_images = decoded_images_buffer.numpy()
cv2_images = [cv2.imread(image) for image in images]
cv2_decoded_images = [np.array(image) for image in cv2_images]
for of_decoded_image, cv2_decoded_image in zip(
of_decoded_images, cv2_decoded_images
):
test_case.assertTrue(len(of_decoded_image.shape) == 3)
test_case.assertTrue(len(cv2_decoded_image.shape) == 3)
test_case.assertTrue(np.allclose(of_decoded_image, cv2_decoded_image))
if __name__ == "__main__":
unittest.main()
| [
"oneflow.experimental.unittest.skip_unless_1n1d",
"oneflow.experimental.tensor_to_tensor_buffer",
"oneflow.experimental.nn.image.decode",
"oneflow.experimental.device"
] | [((677, 709), 'oneflow.experimental.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (707, 709), True, 'import oneflow.experimental as flow\n'), ((2390, 2405), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2403, 2405), False, 'import unittest\n'), ((1226, 1265), 'oneflow.experimental.nn.image.decode', 'flow.nn.image.decode', ([], {'color_space': '"""BGR"""'}), "(color_space='BGR')\n", (1246, 1265), True, 'import oneflow.experimental as flow\n'), ((1418, 1455), 'numpy.zeros', 'np.zeros', (['static_shape'], {'dtype': 'np.int8'}), '(static_shape, dtype=np.int8)\n', (1426, 1455), True, 'import numpy as np\n'), ((1715, 1767), 'oneflow.experimental.tensor_to_tensor_buffer', 'flow.tensor_to_tensor_buffer', (['input'], {'instance_dims': '(1)'}), '(input, instance_dims=1)\n', (1743, 1767), True, 'import oneflow.experimental as flow\n'), ((1910, 1927), 'cv2.imread', 'cv2.imread', (['image'], {}), '(image)\n', (1920, 1927), False, 'import cv2\n'), ((1979, 1994), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (1987, 1994), True, 'import numpy as np\n'), ((1662, 1680), 'oneflow.experimental.device', 'flow.device', (['"""cpu"""'], {}), "('cpu')\n", (1673, 1680), True, 'import oneflow.experimental as flow\n'), ((2307, 2355), 'numpy.allclose', 'np.allclose', (['of_decoded_image', 'cv2_decoded_image'], {}), '(of_decoded_image, cv2_decoded_image)\n', (2318, 2355), True, 'import numpy as np\n'), ((1304, 1337), 'numpy.frombuffer', 'np.frombuffer', (['bys'], {'dtype': 'np.byte'}), '(bys, dtype=np.byte)\n', (1317, 1337), True, 'import numpy as np\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
from oneflow.test_utils.test_util import GenArgDict
import numpy as np
import oneflow as flow
import oneflow.unittest
from oneflow.test_utils.automated_test_util import *
def _test_id_shuffle(test_case, has_column_id, num_columns):
batch_size = 512
ids = np.random.randint(0, 1000, (batch_size, num_columns), dtype=np.int64)
if has_column_id:
column_ids = (
ids % num_columns
) # same id must have same column id, so in this case get column_ids from ids
column_ids_tensor = flow.tensor(
column_ids.astype(np.int32), requires_grad=False
).to("cuda")
else:
column_ids_tensor = None
ids_tensor = flow.tensor(ids, requires_grad=False).to("cuda")
class TestGraph(flow.nn.Graph):
def __init__(self):
super().__init__()
def build(self, ids, column_ids):
(
num_unique_matrix,
inverse_unique_partition_indices,
cur_rank_num_unique,
cur_rank_unique_ids,
cur_rank_unique_column_ids,
cur_rank_inverse_indices,
) = flow._C.one_embedding_id_shuffle(ids, column_ids, num_columns)
return (
flow.cast(num_unique_matrix, flow.int32),
flow.cast(inverse_unique_partition_indices, flow.int32),
flow.cast(cur_rank_num_unique, flow.int32),
flow.cast(cur_rank_unique_ids, flow.int32),
flow.cast(cur_rank_unique_column_ids, flow.int32),
flow.cast(cur_rank_inverse_indices, flow.int32),
)
graph = TestGraph()
(
num_unique_matrix,
inverse_unique_partition_indices,
cur_rank_num_unique,
cur_rank_unique_ids,
cur_rank_unique_column_ids,
cur_rank_inverse_indices,
) = graph(ids_tensor, column_ids_tensor)
np_unique_ids, np_inverse = np.unique(ids, return_inverse=True)
np_num_unique = np_unique_ids.size
test_case.assertTrue(np.array_equal(np_num_unique, num_unique_matrix[0]))
test_case.assertTrue(np.array_equal(np_num_unique, cur_rank_num_unique[0]))
reversed_ids = cur_rank_unique_ids[cur_rank_inverse_indices][
inverse_unique_partition_indices
]
test_case.assertTrue(np.array_equal(reversed_ids.numpy(), ids))
if has_column_id:
reversed_column_ids = cur_rank_unique_column_ids[cur_rank_inverse_indices][
inverse_unique_partition_indices
]
test_case.assertTrue(np.array_equal(reversed_column_ids.numpy(), column_ids))
# when has_column_id=False, we can not test column ids because in this case same ids not lead to same column id
def _test_embedding_shuffle(test_case, dtype):
batch_size = 512
num_columns = 26
ids = np.random.randint(0, 1000, (batch_size, num_columns), dtype=np.int64)
column_ids = (
ids % num_columns
) # same id must have same column id, so in this case get column_ids from ids
if dtype == flow.float16:
np_dtype = np.float16
else:
np_dtype = np.float32
data = np.random.rand(1000, 128).astype(np_dtype)
ids_tensor = flow.tensor(ids, requires_grad=False).to("cuda")
column_ids_tensor = flow.tensor(
column_ids.astype(np.int32), requires_grad=False
).to("cuda")
data_tensor = flow.tensor(data, requires_grad=False).to("cuda")
class TestGraph(flow.nn.Graph):
def __init__(self):
super().__init__()
def build(self, ids, column_ids, data):
(
num_unique_matrix,
inverse_unique_partition_indices,
_,
cur_rank_unique_ids,
_,
cur_rank_inverse_indices,
) = flow._C.one_embedding_id_shuffle(ids, column_ids, num_columns)
unique_embeddings = flow._C.gather(data, cur_rank_unique_ids, axis=0)
embeddings = flow._C.one_embedding_embedding_shuffle(
unique_embeddings,
num_unique_matrix,
cur_rank_inverse_indices,
inverse_unique_partition_indices,
)
return embeddings
graph = TestGraph()
embeddings = graph(ids_tensor, column_ids_tensor, data_tensor)
np_embeddings = data[ids]
test_case.assertTrue(np.array_equal(embeddings.numpy(), np_embeddings))
def _test_embedding_gradient_shuffle(test_case):
batch_size = 512
num_columns = 26
embedding_size = 128
ids = np.random.randint(0, 1000, (batch_size, num_columns), dtype=np.int64)
column_ids = (
ids % num_columns
) # same id must have same column id, so in this case get column_ids from ids
embedding_grad = np.random.rand(batch_size, num_columns, embedding_size).astype(
np.float32
)
ids_tensor = flow.tensor(ids, requires_grad=False).to("cuda")
column_ids_tensor = flow.tensor(
column_ids.astype(np.int32), requires_grad=False
).to("cuda")
embedding_grad_tensor = flow.tensor(embedding_grad, requires_grad=False).to("cuda")
class TestGraph(flow.nn.Graph):
def __init__(self):
super().__init__()
def build(self, ids, column_ids, embedding_grad):
(
num_unique_matrix,
inverse_unique_partition_indices,
_,
cur_rank_unique_ids,
_,
cur_rank_inverse_indices,
) = flow._C.one_embedding_id_shuffle(ids, column_ids, num_columns)
cur_rank_unique_embedding_grad = flow._C.one_embedding_embedding_gradient_shuffle(
embedding_grad,
num_unique_matrix,
cur_rank_inverse_indices,
inverse_unique_partition_indices,
)
return (
cur_rank_unique_embedding_grad,
flow.cast(cur_rank_unique_ids, flow.int32),
flow.cast(cur_rank_inverse_indices, flow.int32),
flow.cast(inverse_unique_partition_indices, flow.int32),
)
graph = TestGraph()
(
cur_rank_unique_embedding_grad,
cur_rank_unique_ids,
cur_rank_inverse_indices,
inverse_unique_partition_indices,
) = graph(ids_tensor, column_ids_tensor, embedding_grad_tensor)
np_unique_ids, np_inverse = np.unique(ids, return_inverse=True)
np_num_unique = np_unique_ids.size
np_cur_rank_unique_embedding_grad = np.zeros(
cur_rank_unique_embedding_grad.shape
).reshape(-1, embedding_size)
for k in range(np_num_unique):
np_cur_rank_unique_embedding_grad[k, :] = sum(
embedding_grad.reshape(-1, embedding_size)[
np.where(ids.flatten() == np_unique_ids[k])[0]
]
)
reversed_ids = cur_rank_unique_ids[cur_rank_inverse_indices][
inverse_unique_partition_indices
]
test_case.assertTrue(np.array_equal(reversed_ids.numpy(), ids))
test_case.assertTrue(
np.allclose(
cur_rank_unique_embedding_grad[cur_rank_inverse_indices][
inverse_unique_partition_indices
]
.numpy()
.flatten(),
np_cur_rank_unique_embedding_grad[np_inverse].flatten(),
atol=1e-4,
rtol=1e-4,
)
)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
@flow.unittest.skip_unless_1n1d()
class DataShuffleTestCase(flow.unittest.TestCase):
def test_id_shuffle(test_case):
arg_dict = OrderedDict()
arg_dict["has_column_id"] = [True, False]
arg_dict["num_columns"] = [1, 26]
for kwargs in GenArgDict(arg_dict):
_test_id_shuffle(test_case, **kwargs)
def test_embedding_shuffle(test_case):
arg_dict = OrderedDict()
arg_dict["dtype"] = [flow.float32, flow.float16]
for kwargs in GenArgDict(arg_dict):
_test_embedding_shuffle(test_case, **kwargs)
def test_embedding_gradient_shuffle(test_case):
arg_dict = OrderedDict()
for kwargs in GenArgDict(arg_dict):
_test_embedding_gradient_shuffle(test_case, **kwargs)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.test_utils.test_util.GenArgDict",
"oneflow._C.one_embedding_id_shuffle",
"oneflow.unittest.skip_unless_1n1d",
"oneflow.tensor",
"oneflow.cast",
"oneflow._C.one_embedding_embedding_shuffle",
"oneflow._C.one_embedding_embedding_gradient_shuffle",
"oneflow._C.gather"
] | [((8070, 8102), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (8100, 8102), True, 'import oneflow as flow\n'), ((909, 978), 'numpy.random.randint', 'np.random.randint', (['(0)', '(1000)', '(batch_size, num_columns)'], {'dtype': 'np.int64'}), '(0, 1000, (batch_size, num_columns), dtype=np.int64)\n', (926, 978), True, 'import numpy as np\n'), ((2573, 2608), 'numpy.unique', 'np.unique', (['ids'], {'return_inverse': '(True)'}), '(ids, return_inverse=True)\n', (2582, 2608), True, 'import numpy as np\n'), ((3451, 3520), 'numpy.random.randint', 'np.random.randint', (['(0)', '(1000)', '(batch_size, num_columns)'], {'dtype': 'np.int64'}), '(0, 1000, (batch_size, num_columns), dtype=np.int64)\n', (3468, 3520), True, 'import numpy as np\n'), ((5169, 5238), 'numpy.random.randint', 'np.random.randint', (['(0)', '(1000)', '(batch_size, num_columns)'], {'dtype': 'np.int64'}), '(0, 1000, (batch_size, num_columns), dtype=np.int64)\n', (5186, 5238), True, 'import numpy as np\n'), ((7017, 7052), 'numpy.unique', 'np.unique', (['ids'], {'return_inverse': '(True)'}), '(ids, return_inverse=True)\n', (7026, 7052), True, 'import numpy as np\n'), ((8873, 8888), 'unittest.main', 'unittest.main', ([], {}), '()\n', (8886, 8888), False, 'import unittest\n'), ((2673, 2724), 'numpy.array_equal', 'np.array_equal', (['np_num_unique', 'num_unique_matrix[0]'], {}), '(np_num_unique, num_unique_matrix[0])\n', (2687, 2724), True, 'import numpy as np\n'), ((2751, 2804), 'numpy.array_equal', 'np.array_equal', (['np_num_unique', 'cur_rank_num_unique[0]'], {}), '(np_num_unique, cur_rank_num_unique[0])\n', (2765, 2804), True, 'import numpy as np\n'), ((8209, 8222), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (8220, 8222), False, 'from collections import OrderedDict\n'), ((8337, 8357), 'oneflow.test_utils.test_util.GenArgDict', 'GenArgDict', (['arg_dict'], {}), '(arg_dict)\n', (8347, 8357), False, 'from oneflow.test_utils.test_util import GenArgDict\n'), ((8472, 8485), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (8483, 8485), False, 'from collections import OrderedDict\n'), ((8565, 8585), 'oneflow.test_utils.test_util.GenArgDict', 'GenArgDict', (['arg_dict'], {}), '(arg_dict)\n', (8575, 8585), False, 'from oneflow.test_utils.test_util import GenArgDict\n'), ((8716, 8729), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (8727, 8729), False, 'from collections import OrderedDict\n'), ((8752, 8772), 'oneflow.test_utils.test_util.GenArgDict', 'GenArgDict', (['arg_dict'], {}), '(arg_dict)\n', (8762, 8772), False, 'from oneflow.test_utils.test_util import GenArgDict\n'), ((1324, 1361), 'oneflow.tensor', 'flow.tensor', (['ids'], {'requires_grad': '(False)'}), '(ids, requires_grad=False)\n', (1335, 1361), True, 'import oneflow as flow\n'), ((1787, 1849), 'oneflow._C.one_embedding_id_shuffle', 'flow._C.one_embedding_id_shuffle', (['ids', 'column_ids', 'num_columns'], {}), '(ids, column_ids, num_columns)\n', (1819, 1849), True, 'import oneflow as flow\n'), ((3760, 3785), 'numpy.random.rand', 'np.random.rand', (['(1000)', '(128)'], {}), '(1000, 128)\n', (3774, 3785), True, 'import numpy as np\n'), ((3820, 3857), 'oneflow.tensor', 'flow.tensor', (['ids'], {'requires_grad': '(False)'}), '(ids, requires_grad=False)\n', (3831, 3857), True, 'import oneflow as flow\n'), ((3998, 4036), 'oneflow.tensor', 'flow.tensor', (['data'], {'requires_grad': '(False)'}), '(data, requires_grad=False)\n', (4009, 4036), True, 'import oneflow as flow\n'), ((4425, 4487), 'oneflow._C.one_embedding_id_shuffle', 'flow._C.one_embedding_id_shuffle', (['ids', 'column_ids', 'num_columns'], {}), '(ids, column_ids, num_columns)\n', (4457, 4487), True, 'import oneflow as flow\n'), ((4520, 4569), 'oneflow._C.gather', 'flow._C.gather', (['data', 'cur_rank_unique_ids'], {'axis': '(0)'}), '(data, cur_rank_unique_ids, axis=0)\n', (4534, 4569), True, 'import oneflow as flow\n'), ((4595, 4740), 'oneflow._C.one_embedding_embedding_shuffle', 'flow._C.one_embedding_embedding_shuffle', (['unique_embeddings', 'num_unique_matrix', 'cur_rank_inverse_indices', 'inverse_unique_partition_indices'], {}), '(unique_embeddings,\n num_unique_matrix, cur_rank_inverse_indices,\n inverse_unique_partition_indices)\n', (4634, 4740), True, 'import oneflow as flow\n'), ((5388, 5443), 'numpy.random.rand', 'np.random.rand', (['batch_size', 'num_columns', 'embedding_size'], {}), '(batch_size, num_columns, embedding_size)\n', (5402, 5443), True, 'import numpy as np\n'), ((5494, 5531), 'oneflow.tensor', 'flow.tensor', (['ids'], {'requires_grad': '(False)'}), '(ids, requires_grad=False)\n', (5505, 5531), True, 'import oneflow as flow\n'), ((5682, 5730), 'oneflow.tensor', 'flow.tensor', (['embedding_grad'], {'requires_grad': '(False)'}), '(embedding_grad, requires_grad=False)\n', (5693, 5730), True, 'import oneflow as flow\n'), ((6129, 6191), 'oneflow._C.one_embedding_id_shuffle', 'flow._C.one_embedding_id_shuffle', (['ids', 'column_ids', 'num_columns'], {}), '(ids, column_ids, num_columns)\n', (6161, 6191), True, 'import oneflow as flow\n'), ((6237, 6388), 'oneflow._C.one_embedding_embedding_gradient_shuffle', 'flow._C.one_embedding_embedding_gradient_shuffle', (['embedding_grad', 'num_unique_matrix', 'cur_rank_inverse_indices', 'inverse_unique_partition_indices'], {}), '(embedding_grad,\n num_unique_matrix, cur_rank_inverse_indices,\n inverse_unique_partition_indices)\n', (6285, 6388), True, 'import oneflow as flow\n'), ((7132, 7178), 'numpy.zeros', 'np.zeros', (['cur_rank_unique_embedding_grad.shape'], {}), '(cur_rank_unique_embedding_grad.shape)\n', (7140, 7178), True, 'import numpy as np\n'), ((1887, 1927), 'oneflow.cast', 'flow.cast', (['num_unique_matrix', 'flow.int32'], {}), '(num_unique_matrix, flow.int32)\n', (1896, 1927), True, 'import oneflow as flow\n'), ((1945, 2000), 'oneflow.cast', 'flow.cast', (['inverse_unique_partition_indices', 'flow.int32'], {}), '(inverse_unique_partition_indices, flow.int32)\n', (1954, 2000), True, 'import oneflow as flow\n'), ((2018, 2060), 'oneflow.cast', 'flow.cast', (['cur_rank_num_unique', 'flow.int32'], {}), '(cur_rank_num_unique, flow.int32)\n', (2027, 2060), True, 'import oneflow as flow\n'), ((2078, 2120), 'oneflow.cast', 'flow.cast', (['cur_rank_unique_ids', 'flow.int32'], {}), '(cur_rank_unique_ids, flow.int32)\n', (2087, 2120), True, 'import oneflow as flow\n'), ((2138, 2187), 'oneflow.cast', 'flow.cast', (['cur_rank_unique_column_ids', 'flow.int32'], {}), '(cur_rank_unique_column_ids, flow.int32)\n', (2147, 2187), True, 'import oneflow as flow\n'), ((2205, 2252), 'oneflow.cast', 'flow.cast', (['cur_rank_inverse_indices', 'flow.int32'], {}), '(cur_rank_inverse_indices, flow.int32)\n', (2214, 2252), True, 'import oneflow as flow\n'), ((6545, 6587), 'oneflow.cast', 'flow.cast', (['cur_rank_unique_ids', 'flow.int32'], {}), '(cur_rank_unique_ids, flow.int32)\n', (6554, 6587), True, 'import oneflow as flow\n'), ((6605, 6652), 'oneflow.cast', 'flow.cast', (['cur_rank_inverse_indices', 'flow.int32'], {}), '(cur_rank_inverse_indices, flow.int32)\n', (6614, 6652), True, 'import oneflow as flow\n'), ((6670, 6725), 'oneflow.cast', 'flow.cast', (['inverse_unique_partition_indices', 'flow.int32'], {}), '(inverse_unique_partition_indices, flow.int32)\n', (6679, 6725), True, 'import oneflow as flow\n')] |
import oneflow as flow
from oneflow import nn
class OfRecordDataLoader(nn.Module):
def __init__(
self,
ofrecord_dir: str,
mode: str,
dataset_size: int,
batch_size: int,
data_part_num: int,
seq_length: int,
max_predictions_per_seq: int,
consistent: bool = False,
):
super().__init__()
self.placement = None
self.sbp = None
self.use_consistent = consistent
self.data_part_num = data_part_num
if self.use_consistent:
self.world_size = flow.env.get_world_size()
if data_part_num < self.world_size:
self.placement = flow.placement("cpu", {0: [0]})
self.sbp = flow.sbp.broadcast
else:
self.placement = flow.placement("cpu", {0: range(self.world_size)})
self.sbp = flow.sbp.split(0)
self.ofrecord_reader = nn.OfrecordReader(
ofrecord_dir,
batch_size=batch_size,
data_part_num=data_part_num,
random_shuffle=True if mode == "train" else False,
shuffle_after_epoch=True if mode == "train" else False,
placement=self.placement,
sbp=self.sbp,
)
blob_confs = {}
def _blob_conf(name, shape, dtype=flow.int32):
blob_confs[name] = nn.OfrecordRawDecoder(name, shape=shape, dtype=dtype)
_blob_conf("input_ids", [seq_length])
_blob_conf("next_sentence_labels", [1])
_blob_conf("input_mask", [seq_length])
_blob_conf("segment_ids", [seq_length])
_blob_conf("masked_lm_ids", [max_predictions_per_seq])
_blob_conf("masked_lm_positions", [max_predictions_per_seq])
_blob_conf("masked_lm_weights", [max_predictions_per_seq], flow.float)
self.blob_confs = blob_confs
self.batch_size = batch_size
self.dataset_size = dataset_size
def __len__(self):
return self.dataset_size // self.batch_size
def forward(self):
data_record = self.ofrecord_reader() # get an item
input_ids = self.blob_confs["input_ids"](data_record)
next_sent_labels = self.blob_confs["next_sentence_labels"](data_record)
input_mask = self.blob_confs["input_mask"](data_record)
segment_ids = self.blob_confs["segment_ids"](data_record)
masked_lm_ids = self.blob_confs["masked_lm_ids"](data_record)
masked_lm_positions = self.blob_confs["masked_lm_positions"](data_record)
masked_lm_weights = self.blob_confs["masked_lm_weights"](data_record)
if self.use_consistent and self.data_part_num < self.world_size:
placement = flow.placement("cpu", {0: range(self.world_size)})
sbp = flow.sbp.split(0)
input_ids = input_ids.to_consistent(placement=placement, sbp=sbp)
next_sent_labels = next_sent_labels.to_consistent(
placement=placement, sbp=sbp
)
input_mask = input_mask.to_consistent(placement=placement, sbp=sbp)
segment_ids = segment_ids.to_consistent(placement=placement, sbp=sbp)
masked_lm_ids = masked_lm_ids.to_consistent(placement=placement, sbp=sbp)
masked_lm_positions = masked_lm_positions.to_consistent(
placement=placement, sbp=sbp
)
masked_lm_weights = masked_lm_weights.to_consistent(
placement=placement, sbp=sbp
)
return (
input_ids,
next_sent_labels,
input_mask,
segment_ids,
masked_lm_ids,
masked_lm_positions,
masked_lm_weights,
)
| [
"oneflow.nn.OfrecordReader",
"oneflow.env.get_world_size",
"oneflow.placement",
"oneflow.nn.OfrecordRawDecoder",
"oneflow.sbp.split"
] | [((940, 1184), 'oneflow.nn.OfrecordReader', 'nn.OfrecordReader', (['ofrecord_dir'], {'batch_size': 'batch_size', 'data_part_num': 'data_part_num', 'random_shuffle': "(True if mode == 'train' else False)", 'shuffle_after_epoch': "(True if mode == 'train' else False)", 'placement': 'self.placement', 'sbp': 'self.sbp'}), "(ofrecord_dir, batch_size=batch_size, data_part_num=\n data_part_num, random_shuffle=True if mode == 'train' else False,\n shuffle_after_epoch=True if mode == 'train' else False, placement=self.\n placement, sbp=self.sbp)\n", (957, 1184), False, 'from oneflow import nn\n'), ((576, 601), 'oneflow.env.get_world_size', 'flow.env.get_world_size', ([], {}), '()\n', (599, 601), True, 'import oneflow as flow\n'), ((1378, 1431), 'oneflow.nn.OfrecordRawDecoder', 'nn.OfrecordRawDecoder', (['name'], {'shape': 'shape', 'dtype': 'dtype'}), '(name, shape=shape, dtype=dtype)\n', (1399, 1431), False, 'from oneflow import nn\n'), ((2778, 2795), 'oneflow.sbp.split', 'flow.sbp.split', (['(0)'], {}), '(0)\n', (2792, 2795), True, 'import oneflow as flow\n'), ((683, 716), 'oneflow.placement', 'flow.placement', (['"""cpu"""', '{(0): [0]}'], {}), "('cpu', {(0): [0]})\n", (697, 716), True, 'import oneflow as flow\n'), ((890, 907), 'oneflow.sbp.split', 'flow.sbp.split', (['(0)'], {}), '(0)\n', (904, 907), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import Union
import oneflow as flow
from oneflow.framework.tensor import register_tensor_op
@register_tensor_op("negative")
def negative_op(input):
"""This operator computes the negative value of Tensor.
Args:
input (oneflow.Tensor): A Tensor
Returns:
oneflow.Tensor: The result Tensor
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow as flow
>>> input = flow.Tensor(
... np.array([1.0, -1.0, 2.3]).astype(np.float32), dtype=flow.float32
... )
>>> out = flow.negative(input)
>>> out
tensor([-1.0000, 1.0000, -2.3000], dtype=oneflow.float32)
"""
return flow._C.negative(input)
@register_tensor_op("type_as")
def type_as_op(input, target):
r"""Returns this tensor cast to the type of the given tensor.
This is a no-op if the tensor is already of the correct type.
Args:
input (Tensor): the input tensor.
target (Tensor): the tensor which has the desired type.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> input = flow.Tensor(np.random.randn(1, 2, 3), dtype=flow.float32)
>>> target = flow.Tensor(np.random.randn(4, 5, 6), dtype = flow.int32)
>>> input = input.type_as(target)
>>> input.dtype
oneflow.int32
"""
return input.to(dtype=target.dtype)
@register_tensor_op("int")
def int(input):
r"""`Tensor.int()` is equivalent to `Tensor.to(flow.int32)`. See to().
Args:
input (Tensor): the input tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> input = flow.Tensor(np.random.randn(1, 2, 3), dtype=flow.float32)
>>> input = input.int()
>>> input.dtype
oneflow.int32
"""
return input.to(dtype=flow.int32)
@register_tensor_op("long")
def long(input):
r"""`Tensor.long()` is equivalent to `Tensor.to(flow.int64)`. See to().
Args:
input (Tensor): the input tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> input = flow.Tensor(np.random.randn(1, 2, 3), dtype=flow.float32)
>>> input = input.long()
>>> input.dtype
oneflow.int64
"""
return input.to(dtype=flow.int64)
@register_tensor_op("float")
def float(input):
r"""`Tensor.float()` is equivalent to `Tensor.to(flow.float32)`. See to().
Args:
input (Tensor): the input tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> input = flow.Tensor(np.random.randn(1, 2, 3), dtype=flow.int)
>>> input = input.float()
>>> input.dtype
oneflow.float32
"""
return input.to(dtype=flow.float32)
@register_tensor_op("double")
def double(input):
r"""`Tensor.double()` is equivalent to `Tensor.to(flow.float64)`. See to().
Args:
input (Tensor): the input tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> input = flow.Tensor(np.random.randn(1, 2, 3), dtype=flow.int)
>>> input = input.double()
>>> input.dtype
oneflow.float64
"""
return input.to(dtype=flow.float64)
@register_tensor_op("is_floating_point")
def is_floating_point(input):
r"""Returns True if the data type of input is a floating point data type i.e., one of flow.float64, flow.float32, flow.float16.
Args:
input (Tensor): the input tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> input = flow.Tensor([1, 2, 3, 4, 5], dtype=flow.int)
>>> output = flow.is_floating_point(input)
>>> output
False
"""
if input.dtype in (flow.float, flow.float16, flow.float32, flow.float64):
return True
return False
@register_tensor_op("cpu")
def cpu(input):
r"""Returns a copy of this object in CPU memory.
If this object is already in CPU memory and on the correct device, then no copy is performed and the original object is returned.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> input = flow.Tensor([1, 2, 3, 4, 5], device=flow.device("cuda"))
>>> output = input.cpu()
>>> output.device
device(type='cpu', index=0)
"""
return input.to(device="cpu")
@register_tensor_op("cuda")
def cuda(input, device: Union[int, str, flow.device] = None):
r"""Returns a copy of this object in CUDA memory.
If this object is already in CUDA memory and on the correct device, then no copy is performed and the original object is returned.
Args:
device (flow.device): The destination GPU device. Defaults to the current CUDA device.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> input = flow.Tensor([1, 2, 3, 4, 5])
>>> output = input.cuda()
>>> output.device
device(type='cuda', index=0)
"""
if device is None:
device = "cuda"
elif device is isinstance(int):
device = "cuda:" + str(device)
return input.to(device=device)
@register_tensor_op("item")
def item_op(input):
r"""Returns the value of this tensor as a standard Python number. This only works for tensors with one element.
For other cases, see tolist().
This operation is not differentiable.
Args:
input (Tensor): the input tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> x = flow.tensor([1.0])
>>> x.item()
1.0
"""
assert input.numel() == 1, "Only a Tensor with 1 element can be converted to Scalar"
return input.numpy().item()
@register_tensor_op("tolist")
def tolist_op(input):
r"""Returns the tensor as a (nested) list. For scalars, a standard Python number is returned,
just like with `item()`. Tensors are automatically moved to the CPU first if necessary.
This operation is not differentiable.
Args:
input (Tensor): the input tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> input = flow.tensor([[1,2,3], [4,5,6]])
>>> input.tolist()
[[1, 2, 3], [4, 5, 6]]
"""
if input.numel() == 1 and input.ndim == 0:
return input.item()
return input.numpy().tolist()
if __name__ == "__main__":
import doctest
doctest.testmod(raise_on_error=True)
| [
"oneflow._C.negative",
"oneflow.framework.tensor.register_tensor_op"
] | [((698, 728), 'oneflow.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""negative"""'], {}), "('negative')\n", (716, 728), False, 'from oneflow.framework.tensor import register_tensor_op\n'), ((1340, 1369), 'oneflow.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""type_as"""'], {}), "('type_as')\n", (1358, 1369), False, 'from oneflow.framework.tensor import register_tensor_op\n'), ((2074, 2099), 'oneflow.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""int"""'], {}), "('int')\n", (2092, 2099), False, 'from oneflow.framework.tensor import register_tensor_op\n'), ((2572, 2598), 'oneflow.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""long"""'], {}), "('long')\n", (2590, 2598), False, 'from oneflow.framework.tensor import register_tensor_op\n'), ((3074, 3101), 'oneflow.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""float"""'], {}), "('float')\n", (3092, 3101), False, 'from oneflow.framework.tensor import register_tensor_op\n'), ((3582, 3610), 'oneflow.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""double"""'], {}), "('double')\n", (3600, 3610), False, 'from oneflow.framework.tensor import register_tensor_op\n'), ((4094, 4133), 'oneflow.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""is_floating_point"""'], {}), "('is_floating_point')\n", (4112, 4133), False, 'from oneflow.framework.tensor import register_tensor_op\n'), ((4716, 4741), 'oneflow.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""cpu"""'], {}), "('cpu')\n", (4734, 4741), False, 'from oneflow.framework.tensor import register_tensor_op\n'), ((5253, 5279), 'oneflow.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""cuda"""'], {}), "('cuda')\n", (5271, 5279), False, 'from oneflow.framework.tensor import register_tensor_op\n'), ((6043, 6069), 'oneflow.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""item"""'], {}), "('item')\n", (6061, 6069), False, 'from oneflow.framework.tensor import register_tensor_op\n'), ((6621, 6649), 'oneflow.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""tolist"""'], {}), "('tolist')\n", (6639, 6649), False, 'from oneflow.framework.tensor import register_tensor_op\n'), ((1313, 1336), 'oneflow._C.negative', 'flow._C.negative', (['input'], {}), '(input)\n', (1329, 1336), True, 'import oneflow as flow\n'), ((7322, 7358), 'doctest.testmod', 'doctest.testmod', ([], {'raise_on_error': '(True)'}), '(raise_on_error=True)\n', (7337, 7358), False, 'import doctest\n')] |
import os,time
import numpy as np
import cv2
import oneflow as flow
from .reid_model import resreid
from .restrack_reid import *
## todo:
# 将bbox_tlwh, ori_img转化为numpy矩阵,批量处理,提取特征
class Extractor():
def __init__(self, model_name, load_path, gpu_ids='0', use_gpu=True, height=256, width=128, seed=1, cls=0):
self.model_name = model_name
self.load_path = load_path
self.gpu_ids = gpu_ids
self.use_gpu = use_gpu
self.height = height
self.width = width
self.seed = seed
winSize = (20, 20)
blockSize = (10, 10)
blockStride = (5, 5)
cellSize = (5, 5)
nbins = 9
if cls != 0:
self.hog = cv2.HOGDescriptor(winSize, blockSize, blockStride, cellSize, nbins)
else:
assert os.path.isdir(load_path)
print("Restoring model from {}.".format(load_path))
check_point = flow.train.CheckPoint()
check_point.load(load_path)
def __call__(self, input, batch_size=10, feature_type = 0):
'''
:param input: detected images, numpy array
feature_type = 0 表示提取reid特征,1 表示提取hog特征
:return: image features extracted from input
'''
if feature_type == 1:
winStride = (20, 20)
padding = (0, 0)
if isinstance(input, list):
if len(input) == 0:
return np.array([])
features = []
for ind in input:
ind_ = cv2.resize(ind, (100,75), interpolation=cv2.INTER_LINEAR)
extracted_feature = self.hog.compute(ind_, winStride, padding)
extracted_feature = extracted_feature.T
features.append(extracted_feature)
else:
input_ = cv2.resize(input, (100,75), interpolation=cv2.INTER_LINEAR)
features = self.hog.compute(input_, winStride, padding)
features = features.T
features = np.vstack(features)
#print("hog size: ", (features.shape))
return features
else:
if len(input) == 0:
return np.array([])
#print(len(input))
features = []
for ind in input:
datest = one_batch_image_preprocess(ind, 256, 128)
outs = reid_eval_job(datest).get()
features.append(outs.ndarray_list_[0])
features = np.vstack(features)
return features
if __name__ == "__main__":
print("hello wolrd!\n")
# etreactor = Extractor(model_name='osnet_x1_0',
# load_path='/home/kcadmin/user/xz/deep-person-reid/checkpoint/model.pth.tar-460',
# gpu_ids='0, 1')
#
# feature = etreactor(test_img_numpy)
# print(feature.shape)
# print(type(feature))
# print(feature) | [
"oneflow.train.CheckPoint"
] | [((732, 799), 'cv2.HOGDescriptor', 'cv2.HOGDescriptor', (['winSize', 'blockSize', 'blockStride', 'cellSize', 'nbins'], {}), '(winSize, blockSize, blockStride, cellSize, nbins)\n', (749, 799), False, 'import cv2\n'), ((835, 859), 'os.path.isdir', 'os.path.isdir', (['load_path'], {}), '(load_path)\n', (848, 859), False, 'import os, time\n'), ((952, 975), 'oneflow.train.CheckPoint', 'flow.train.CheckPoint', ([], {}), '()\n', (973, 975), True, 'import oneflow as flow\n'), ((2073, 2092), 'numpy.vstack', 'np.vstack', (['features'], {}), '(features)\n', (2082, 2092), True, 'import numpy as np\n'), ((2559, 2578), 'numpy.vstack', 'np.vstack', (['features'], {}), '(features)\n', (2568, 2578), True, 'import numpy as np\n'), ((1877, 1937), 'cv2.resize', 'cv2.resize', (['input', '(100, 75)'], {'interpolation': 'cv2.INTER_LINEAR'}), '(input, (100, 75), interpolation=cv2.INTER_LINEAR)\n', (1887, 1937), False, 'import cv2\n'), ((2250, 2262), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2258, 2262), True, 'import numpy as np\n'), ((1466, 1478), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1474, 1478), True, 'import numpy as np\n'), ((1573, 1631), 'cv2.resize', 'cv2.resize', (['ind', '(100, 75)'], {'interpolation': 'cv2.INTER_LINEAR'}), '(ind, (100, 75), interpolation=cv2.INTER_LINEAR)\n', (1583, 1631), False, 'import cv2\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow as flow
from typing import Tuple
from oneflow.python.oneflow_export import oneflow_export
import oneflow.python.framework.remote_blob as remote_blob_util
import oneflow.python.framework.id_util as id_util
import oneflow_api
@oneflow_export("experimental.ssp_variable_proxy")
def ssp_variable_proxy(
var: oneflow_api.BlobDesc, buffer_size: int = 1, name=None
) -> Tuple[oneflow_api.BlobDesc, oneflow_api.BlobDesc]:
r""" return ref_blob, value_blob """
if name is None:
name = id_util.UniqueStr("SspVariableProxy_")
blob_dict = (
flow.user_op_builder(name)
.Op("ssp_variable_proxy")
.Input("var", [var])
.Output("ref")
.Output("value")
.Attr("buffer_size", buffer_size)
.Build()
.InferAndTryRun()
.RemoteBlobDict()
)
return blob_dict["ref"][0], blob_dict["value"][0]
| [
"oneflow.user_op_builder",
"oneflow.python.oneflow_export.oneflow_export",
"oneflow.python.framework.id_util.UniqueStr"
] | [((832, 881), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""experimental.ssp_variable_proxy"""'], {}), "('experimental.ssp_variable_proxy')\n", (846, 881), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((1102, 1140), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""SspVariableProxy_"""'], {}), "('SspVariableProxy_')\n", (1119, 1140), True, 'import oneflow.python.framework.id_util as id_util\n'), ((1167, 1193), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (1187, 1193), True, 'import oneflow as flow\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import numpy as np
import unittest
import oneflow.experimental as flow
from zhusuan_of.distributions.base import *
flow.enable_eager_execution()
class Dist(Distribution):
def __init__(self,
dtype=flow.float32,
param_dtype=flow.float32,
group_ndims=0,
shape_fully_defined=True,
**kwargs):
super(Dist, self).__init__(dtype,
param_dtype,
is_continuous=True,
is_reparameterized=True,
group_ndims=group_ndims,
**kwargs)
self._shape_fully_defined = shape_fully_defined
def _value_shape(self):
return [5]
def _get_value_shape(self):
if self._shape_fully_defined:
return [5]
return None
def _batch_shape(self):
return [2,3,4]
def _get_batch_shape(self):
if self._shape_fully_defined:
return [2,3,4]
return [None,3,4]
def _sample(self, n_samples):
return flow.ones((n_samples, 2, 3, 4, 5))
def _log_prob(self, given):
return flow.sum(flow.zeros_like(given), -1)
class TestDistributions(unittest.TestCase):
def test_baseclass(self):
dist = Distribution(
dtype=flow.float32,
param_dtype=flow.float32,
is_continuous = True,
is_reparameterized = True,
use_path_derivative=False,
group_ndims=2)
self.assertEqual(dist.dtype, flow.float32)
self.assertEqual(dist.param_dtype, flow.float32)
self.assertEqual(dist.is_continuous, True)
self.assertEqual(dist.is_reparameterized, True)
self.assertEqual(dist.group_ndims, 2)
with self.assertRaises(NotImplementedError):
dist._value_shape()
with self.assertRaises(NotImplementedError):
dist._get_value_shape()
with self.assertRaises(NotImplementedError):
dist._batch_shape()
with self.assertRaises(NotImplementedError):
dist._get_batch_shape()
with self.assertRaises(NotImplementedError):
dist._sample(n_samples=1)
with self.assertRaises(NotImplementedError):
dist._log_prob(flow.ones((2, 3, 4, 5)))
with self.assertRaisesRegex(ValueError, "must be non-negative"):
dist2 = Distribution(flow.float32, flow.float32, True, True, False, -1)
def test_subclass(self):
dist = Dist(group_ndims=2)
self.assertEqual(dist.dtype, flow.float32)
self.assertEqual(dist.is_continuous, True)
self.assertEqual(dist.is_reparameterized, True)
self.assertEqual(dist.group_ndims, 2)
# shape
get_v_shape = dist.get_value_shape()
self.assertListEqual(get_v_shape, [5])
v_shape = dist.value_shape
self.assertListEqual(v_shape, [5])
get_b_shape = dist.get_batch_shape()
self.assertListEqual(get_b_shape, [2, 3, 4])
b_shape = dist.batch_shape
self.assertListEqual(b_shape, [2, 3, 4])
# sample
samples_1 = dist.sample()
self.assertListEqual(samples_1.numpy().flatten().astype(np.int32).tolist(),
np.ones((2, 3, 4, 5), dtype=np.int32).flatten().tolist())
for n in [1, 2]:
samples_2 = dist.sample(n_samples=n)
self.assertListEqual(samples_2.numpy().flatten().astype(np.int32).tolist(),
np.ones((n, 2, 3, 4, 5), dtype=np.int32).flatten().tolist())
# log_prob
given_1 = flow.ones((2, 3, 4, 5))
log_p_1 = dist.log_prob(given_1)
self.assertListEqual(log_p_1.numpy().astype(np.int32).tolist(),
np.zeros((2)).tolist())
try:
dist.log_prob(flow.ones((3, 3, 4, 5)))
except:
raise ValueError("broadcast to match batch_shape and value_shape")
given_2 = flow.ones((1, 2, 3, 4, 5))
log_p_2 = dist.log_prob(given_2)
self.assertListEqual(log_p_2.numpy().astype(np.int32).tolist(), np.zeros((1, 2)).tolist())
given_3 = flow.ones((1, 1, 2, 3, 4, 5))
log_p_3 = dist.log_prob(given_3)
self.assertListEqual(log_p_3.numpy().astype(np.int32).tolist(), np.zeros((1, 1, 2)).tolist())
try:
Dist(group_event_ndims=1)
except:
raise ValueError("has been deprecated")
try:
dist2 = Dist(group_ndims=[1, 2])
except:
raise TypeError("should be a scalar")
# shape not fully defined
dist3 = Dist(shape_fully_defined=False)
get_v_shape = dist3.get_value_shape()
self.assertEqual(get_v_shape, None)
v_shape = dist3.value_shape
self.assertListEqual(v_shape, [5])
get_b_shape = dist3.get_batch_shape()
self.assertListEqual(get_b_shape, [None, 3, 4])
b_shape = dist3.batch_shape
self.assertListEqual(b_shape, [2, 3, 4])
# given type of log_prob and prob
def _test_log_prob_raise(dtype, given_dtype):
dist = Dist(dtype=dtype)
given = flow.cast(flow.Tensor([1]), given_dtype)
try:
dist.log_prob(given)
except:
ValueError
_test_log_prob_raise(flow.float32, flow.float64)
_test_log_prob_raise(flow.float32, flow.int32)
_test_log_prob_raise(flow.float32, flow.int64)
_test_log_prob_raise(flow.float64, flow.float32)
_test_log_prob_raise(flow.float64, flow.int32)
_test_log_prob_raise(flow.int32, flow.float32)
_test_log_prob_raise(flow.int32, flow.int64)
_test_log_prob_raise(flow.int64, flow.int32)
_test_log_prob_raise(flow.int64, flow.float64)
# NOTE(<NAME>): not support data type
# _test_log_prob_raise(flow.float32, flow.float16)
| [
"oneflow.experimental.zeros_like",
"oneflow.experimental.Tensor",
"oneflow.experimental.enable_eager_execution",
"oneflow.experimental.ones"
] | [((274, 303), 'oneflow.experimental.enable_eager_execution', 'flow.enable_eager_execution', ([], {}), '()\n', (301, 303), True, 'import oneflow.experimental as flow\n'), ((1291, 1325), 'oneflow.experimental.ones', 'flow.ones', (['(n_samples, 2, 3, 4, 5)'], {}), '((n_samples, 2, 3, 4, 5))\n', (1300, 1325), True, 'import oneflow.experimental as flow\n'), ((3879, 3902), 'oneflow.experimental.ones', 'flow.ones', (['(2, 3, 4, 5)'], {}), '((2, 3, 4, 5))\n', (3888, 3902), True, 'import oneflow.experimental as flow\n'), ((4248, 4274), 'oneflow.experimental.ones', 'flow.ones', (['(1, 2, 3, 4, 5)'], {}), '((1, 2, 3, 4, 5))\n', (4257, 4274), True, 'import oneflow.experimental as flow\n'), ((4434, 4463), 'oneflow.experimental.ones', 'flow.ones', (['(1, 1, 2, 3, 4, 5)'], {}), '((1, 1, 2, 3, 4, 5))\n', (4443, 4463), True, 'import oneflow.experimental as flow\n'), ((1383, 1405), 'oneflow.experimental.zeros_like', 'flow.zeros_like', (['given'], {}), '(given)\n', (1398, 1405), True, 'import oneflow.experimental as flow\n'), ((2536, 2559), 'oneflow.experimental.ones', 'flow.ones', (['(2, 3, 4, 5)'], {}), '((2, 3, 4, 5))\n', (2545, 2559), True, 'import oneflow.experimental as flow\n'), ((4109, 4132), 'oneflow.experimental.ones', 'flow.ones', (['(3, 3, 4, 5)'], {}), '((3, 3, 4, 5))\n', (4118, 4132), True, 'import oneflow.experimental as flow\n'), ((5458, 5474), 'oneflow.experimental.Tensor', 'flow.Tensor', (['[1]'], {}), '([1])\n', (5469, 5474), True, 'import oneflow.experimental as flow\n'), ((4045, 4056), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (4053, 4056), True, 'import numpy as np\n'), ((4388, 4404), 'numpy.zeros', 'np.zeros', (['(1, 2)'], {}), '((1, 2))\n', (4396, 4404), True, 'import numpy as np\n'), ((4577, 4596), 'numpy.zeros', 'np.zeros', (['(1, 1, 2)'], {}), '((1, 1, 2))\n', (4585, 4596), True, 'import numpy as np\n'), ((3527, 3564), 'numpy.ones', 'np.ones', (['(2, 3, 4, 5)'], {'dtype': 'np.int32'}), '((2, 3, 4, 5), dtype=np.int32)\n', (3534, 3564), True, 'import numpy as np\n'), ((3780, 3820), 'numpy.ones', 'np.ones', (['(n, 2, 3, 4, 5)'], {'dtype': 'np.int32'}), '((n, 2, 3, 4, 5), dtype=np.int32)\n', (3787, 3820), True, 'import numpy as np\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow as flow
import oneflow.distribute as distribute_util
init = flow.random_normal_initializer(stddev=0.02)
def conv2d(
input,
filters,
size,
name,
strides=1,
padding="same",
trainable=True,
reuse=False,
const_init=False,
use_bias=True,
):
name_ = name if reuse == False else name + "_reuse"
# (output_dim, k_h, k_w, input.shape[3]) if NHWC
weight_shape = (filters, input.shape[1], size, size)
weight = flow.get_variable(
name + "-weight",
shape=weight_shape,
dtype=input.dtype,
initializer=init,
trainable=trainable,
reuse=reuse,
)
output = flow.nn.conv2d(
input,
weight,
strides=strides,
padding=padding,
data_format="NCHW",
name=name_,
)
if use_bias:
bias = flow.get_variable(
name + "-bias",
shape=(filters,),
dtype=input.dtype,
initializer=flow.constant_initializer(0.0),
trainable=trainable,
reuse=reuse,
)
output = flow.nn.bias_add(output, bias, "NCHW")
return output
def deconv2d(
input,
filters,
size,
name,
strides=2,
trainable=True,
reuse=False,
const_init=False,
use_bias=False,
):
name_ = name if reuse == False else name + "_reuse"
# weight : [in_channels, out_channels, height, width]
weight_shape = (input.shape[1], filters, size, size)
output_shape = (
input.shape[0],
input.shape[1],
input.shape[2] * strides,
input.shape[3] * strides,
)
weight = flow.get_variable(
name + "-weight",
shape=weight_shape,
dtype=input.dtype,
initializer=init
if not const_init
else get_const_initializer(),
trainable=trainable,
)
output = flow.nn.conv2d_transpose(
input,
weight,
strides=[strides, strides],
output_shape=output_shape,
padding="SAME",
data_format="NCHW",
name=name_,
)
if use_bias:
bias = flow.get_variable(
name + "-bias",
shape=(filters,),
dtype=input.dtype,
initializer=flow.constant_initializer(0.0),
trainable=trainable,
)
output = flow.nn.bias_add(output, bias, "NCHW")
return output
def _batch_norm(inputs, name, trainable=True, training=True):
params_shape = [inputs.shape[1]]
# Float32 required to avoid precision-loss when using fp16 input/output
params_dtype = flow.float32 if inputs.dtype == flow.float16 else inputs.dtype
if not flow.current_global_function_desc().IsTrainable() or not trainable:
training = False
with flow.scope.namespace(name):
beta = flow.get_variable(
name="beta",
shape=params_shape,
dtype=params_dtype,
initializer=flow.zeros_initializer(),
trainable=trainable,
distribute=distribute_util.broadcast(),
)
gamma = flow.get_variable(
name="gamma",
shape=params_shape,
dtype=params_dtype,
initializer=flow.ones_initializer(),
trainable=trainable,
distribute=distribute_util.broadcast(),
)
moving_mean = flow.get_variable(
name="moving_mean",
shape=params_shape,
dtype=params_dtype,
initializer=flow.zeros_initializer(),
trainable=False,
distribute=distribute_util.broadcast(),
)
moving_variance = flow.get_variable(
name="moving_variance",
shape=params_shape,
dtype=params_dtype,
initializer=flow.ones_initializer(),
trainable=False,
distribute=distribute_util.broadcast(),
)
builder = (
flow.user_op_builder(name)
.Op("normalization")
.Input("x", [inputs])
.Input("moving_mean", [moving_mean])
.Input("moving_variance", [moving_variance])
.Input("gamma", [gamma])
.Input("beta", [beta])
.Output("y")
.Attr("axis", 1)
.Attr("epsilon", 1.001e-5)
.Attr("training", training)
.Attr("momentum", 0.997)
)
if trainable and training:
builder = builder.Output("mean").Output("inv_variance")
return builder.Build().InferAndTryRun().RemoteBlobList()[0]
def batch_norm(input, name, axis=1, reuse=False, trainable=True):
# use separated BN from real and fake batch
name = name+'_reuse' if reuse else name
return _batch_norm(input, name, trainable=trainable)
def avg_pool2d(input, name, size, strides, padding, reuse=False):
name = name+'_reuse' if reuse else name
return flow.nn.avg_pool2d(input, ksize=size, strides=strides, padding=padding, name=name)
def max_pool2d(input, size, strides, name, padding="VALID", data_format="NCHW", reuse=False):
name = name+'_reuse' if reuse else name
return flow.nn.max_pool2d(input, ksize=size, strides=strides, padding=padding, data_format=data_format, name=name)
def residual_block(inputs, name, filters=64, size=3, trainable=True, reuse=False):
with flow.scope.namespace(name):
conv1=conv2d(inputs, filters=filters, size=size, name="conv1", strides=1, trainable=trainable, reuse=reuse)
bn1 = batch_norm(conv1, "bn1", trainable=trainable, reuse=reuse)
# prelu1 = flow.layers.prelu(bn1, name='prelu', trainable=trainable)
relu1 = flow.math.relu(bn1)
conv2 = conv2d(relu1, filters, size=size, name="conv2", strides=1, trainable=trainable, reuse=reuse)
bn2 = batch_norm(conv2, "bn2", trainable=trainable, reuse=reuse)
return inputs + bn2
def residual_blocks(inputs, filters, block_num, trainable=True):
output = inputs
# outputs = []
for i in range(block_num):
block_name = "block%d" % (i)
output = residual_block(output, block_name, filters=filters, trainable=trainable)
# outputs.append(output)
return output
def upsample_blocks(inputs, filters, block_num, trainable=True):
output = inputs
# outputs = []
for i in range(block_num):
block_name = "block%d" % (i)
output = upsample_block(output, block_name, filters=filters, trainable=trainable)
# outputs.append(output)
return output
def upsample_block(inputs, name, filters, size=3, trainable=True, reuse=False):
output = inputs
with flow.scope.namespace(name):
deconv = deconv2d(output, name="deconv", filters=filters, size=size, trainable=trainable, reuse=reuse)
bn = batch_norm(deconv, name="bn", trainable=trainable)
# output = flow.layers.prelu(bn, name='prelu', trainable=trainable)
output = flow.math.relu(bn)
return output
| [
"oneflow.distribute.broadcast",
"oneflow.current_global_function_desc",
"oneflow.nn.conv2d",
"oneflow.scope.namespace",
"oneflow.ones_initializer",
"oneflow.nn.conv2d_transpose",
"oneflow.random_normal_initializer",
"oneflow.constant_initializer",
"oneflow.get_variable",
"oneflow.zeros_initializer... | [((664, 707), 'oneflow.random_normal_initializer', 'flow.random_normal_initializer', ([], {'stddev': '(0.02)'}), '(stddev=0.02)\n', (694, 707), True, 'import oneflow as flow\n'), ((1061, 1191), 'oneflow.get_variable', 'flow.get_variable', (["(name + '-weight')"], {'shape': 'weight_shape', 'dtype': 'input.dtype', 'initializer': 'init', 'trainable': 'trainable', 'reuse': 'reuse'}), "(name + '-weight', shape=weight_shape, dtype=input.dtype,\n initializer=init, trainable=trainable, reuse=reuse)\n", (1078, 1191), True, 'import oneflow as flow\n'), ((1257, 1357), 'oneflow.nn.conv2d', 'flow.nn.conv2d', (['input', 'weight'], {'strides': 'strides', 'padding': 'padding', 'data_format': '"""NCHW"""', 'name': 'name_'}), "(input, weight, strides=strides, padding=padding, data_format\n ='NCHW', name=name_)\n", (1271, 1357), True, 'import oneflow as flow\n'), ((2470, 2616), 'oneflow.nn.conv2d_transpose', 'flow.nn.conv2d_transpose', (['input', 'weight'], {'strides': '[strides, strides]', 'output_shape': 'output_shape', 'padding': '"""SAME"""', 'data_format': '"""NCHW"""', 'name': 'name_'}), "(input, weight, strides=[strides, strides],\n output_shape=output_shape, padding='SAME', data_format='NCHW', name=name_)\n", (2494, 2616), True, 'import oneflow as flow\n'), ((5418, 5504), 'oneflow.nn.avg_pool2d', 'flow.nn.avg_pool2d', (['input'], {'ksize': 'size', 'strides': 'strides', 'padding': 'padding', 'name': 'name'}), '(input, ksize=size, strides=strides, padding=padding,\n name=name)\n', (5436, 5504), True, 'import oneflow as flow\n'), ((5651, 5762), 'oneflow.nn.max_pool2d', 'flow.nn.max_pool2d', (['input'], {'ksize': 'size', 'strides': 'strides', 'padding': 'padding', 'data_format': 'data_format', 'name': 'name'}), '(input, ksize=size, strides=strides, padding=padding,\n data_format=data_format, name=name)\n', (5669, 5762), True, 'import oneflow as flow\n'), ((1691, 1729), 'oneflow.nn.bias_add', 'flow.nn.bias_add', (['output', 'bias', '"""NCHW"""'], {}), "(output, bias, 'NCHW')\n", (1707, 1729), True, 'import oneflow as flow\n'), ((2934, 2972), 'oneflow.nn.bias_add', 'flow.nn.bias_add', (['output', 'bias', '"""NCHW"""'], {}), "(output, bias, 'NCHW')\n", (2950, 2972), True, 'import oneflow as flow\n'), ((3363, 3389), 'oneflow.scope.namespace', 'flow.scope.namespace', (['name'], {}), '(name)\n', (3383, 3389), True, 'import oneflow as flow\n'), ((5853, 5879), 'oneflow.scope.namespace', 'flow.scope.namespace', (['name'], {}), '(name)\n', (5873, 5879), True, 'import oneflow as flow\n'), ((6163, 6182), 'oneflow.math.relu', 'flow.math.relu', (['bn1'], {}), '(bn1)\n', (6177, 6182), True, 'import oneflow as flow\n'), ((7128, 7154), 'oneflow.scope.namespace', 'flow.scope.namespace', (['name'], {}), '(name)\n', (7148, 7154), True, 'import oneflow as flow\n'), ((7427, 7445), 'oneflow.math.relu', 'flow.math.relu', (['bn'], {}), '(bn)\n', (7441, 7445), True, 'import oneflow as flow\n'), ((1573, 1603), 'oneflow.constant_initializer', 'flow.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (1598, 1603), True, 'import oneflow as flow\n'), ((2841, 2871), 'oneflow.constant_initializer', 'flow.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (2866, 2871), True, 'import oneflow as flow\n'), ((3538, 3562), 'oneflow.zeros_initializer', 'flow.zeros_initializer', ([], {}), '()\n', (3560, 3562), True, 'import oneflow as flow\n'), ((3620, 3647), 'oneflow.distribute.broadcast', 'distribute_util.broadcast', ([], {}), '()\n', (3645, 3647), True, 'import oneflow.distribute as distribute_util\n'), ((3808, 3831), 'oneflow.ones_initializer', 'flow.ones_initializer', ([], {}), '()\n', (3829, 3831), True, 'import oneflow as flow\n'), ((3889, 3916), 'oneflow.distribute.broadcast', 'distribute_util.broadcast', ([], {}), '()\n', (3914, 3916), True, 'import oneflow.distribute as distribute_util\n'), ((4089, 4113), 'oneflow.zeros_initializer', 'flow.zeros_initializer', ([], {}), '()\n', (4111, 4113), True, 'import oneflow as flow\n'), ((4167, 4194), 'oneflow.distribute.broadcast', 'distribute_util.broadcast', ([], {}), '()\n', (4192, 4194), True, 'import oneflow.distribute as distribute_util\n'), ((4375, 4398), 'oneflow.ones_initializer', 'flow.ones_initializer', ([], {}), '()\n', (4396, 4398), True, 'import oneflow as flow\n'), ((4452, 4479), 'oneflow.distribute.broadcast', 'distribute_util.broadcast', ([], {}), '()\n', (4477, 4479), True, 'import oneflow.distribute as distribute_util\n'), ((3261, 3296), 'oneflow.current_global_function_desc', 'flow.current_global_function_desc', ([], {}), '()\n', (3294, 3296), True, 'import oneflow as flow\n'), ((4515, 4541), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (4535, 4541), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import shutil
import argparse
import oneflow as flow
import style_model
def _init_oneflow_env_and_config():
flow.env.init()
flow.enable_eager_execution(False)
flow.config.enable_legacy_model_io(True)
def _make_style_transform_predict_func(width, height, backend="gpu"):
batch_size = 1
channels = 3
func_cfg = flow.function_config()
func_cfg.default_placement_scope(flow.scope.placement(backend, "0:0"))
@flow.global_function("predict", function_config=func_cfg)
def predict_fn(
image: flow.typing.Numpy.Placeholder(
shape=(batch_size, height, width, channels), dtype=flow.float32
)
) -> flow.typing.Numpy:
style_out = style_model.styleNet(image, backend=backend)
return style_out
return predict_fn
def main(args):
_init_oneflow_env_and_config()
predict_fn = _make_style_transform_predict_func(args.image_width, args.image_height, args.backend)
flow.train.CheckPoint().load(args.model_dir)
# flow.load_variables(flow.checkpoint.get(args.model_dir))
print("predict_fn construct finished")
saved_model_path = args.save_dir
model_version = args.model_version
model_version_path = os.path.join(saved_model_path, str(model_version))
if os.path.exists(model_version_path) and os.path.isdir(model_version_path):
if args.force_save:
print(
f"WARNING: The model version path '{model_version_path}' already exist"
", old version directory will be replaced"
)
shutil.rmtree(model_version_path)
else:
raise ValueError(
f"The model version path '{model_version_path}' already exist"
)
saved_model_builder = (
flow.saved_model.ModelBuilder(saved_model_path)
.ModelName(args.model_name)
.Version(model_version)
)
saved_model_builder.AddFunction(predict_fn).Finish()
saved_model_builder.Save()
def _parse_args():
parser = argparse.ArgumentParser("flags for save style transform model")
parser.add_argument(
"--backend",
type=str,
default="gpu",
help="gpu or cambricon"
)
parser.add_argument(
"--model_dir",
type=str,
default="stylenet_nhwc",
help="model parameters directory",
)
parser.add_argument(
"--save_dir",
type=str,
default="style_transform_models",
help="directory to save models",
)
parser.add_argument(
"--model_name", type=str, default="style_transform", help="model name"
)
parser.add_argument("--model_version", type=int, default=1, help="model version")
parser.add_argument(
"--force_save",
default=False,
action="store_true",
help="force save model whether already exists or not",
)
parser.add_argument(
"--image_width", type=int, default=640, help="input image width"
)
parser.add_argument(
"--image_height", type=int, default=640, help="input image height"
)
return parser.parse_args()
if __name__ == "__main__":
args = _parse_args()
main(args)
| [
"oneflow.global_function",
"oneflow.enable_eager_execution",
"oneflow.saved_model.ModelBuilder",
"oneflow.typing.Numpy.Placeholder",
"oneflow.scope.placement",
"oneflow.function_config",
"oneflow.env.init",
"oneflow.config.enable_legacy_model_io",
"oneflow.train.CheckPoint"
] | [((714, 729), 'oneflow.env.init', 'flow.env.init', ([], {}), '()\n', (727, 729), True, 'import oneflow as flow\n'), ((734, 768), 'oneflow.enable_eager_execution', 'flow.enable_eager_execution', (['(False)'], {}), '(False)\n', (761, 768), True, 'import oneflow as flow\n'), ((773, 813), 'oneflow.config.enable_legacy_model_io', 'flow.config.enable_legacy_model_io', (['(True)'], {}), '(True)\n', (807, 813), True, 'import oneflow as flow\n'), ((938, 960), 'oneflow.function_config', 'flow.function_config', ([], {}), '()\n', (958, 960), True, 'import oneflow as flow\n'), ((1042, 1099), 'oneflow.global_function', 'flow.global_function', (['"""predict"""'], {'function_config': 'func_cfg'}), "('predict', function_config=func_cfg)\n", (1062, 1099), True, 'import oneflow as flow\n'), ((2612, 2675), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""flags for save style transform model"""'], {}), "('flags for save style transform model')\n", (2635, 2675), False, 'import argparse\n'), ((998, 1034), 'oneflow.scope.placement', 'flow.scope.placement', (['backend', '"""0:0"""'], {}), "(backend, '0:0')\n", (1018, 1034), True, 'import oneflow as flow\n'), ((1300, 1344), 'style_model.styleNet', 'style_model.styleNet', (['image'], {'backend': 'backend'}), '(image, backend=backend)\n', (1320, 1344), False, 'import style_model\n'), ((1866, 1900), 'os.path.exists', 'os.path.exists', (['model_version_path'], {}), '(model_version_path)\n', (1880, 1900), False, 'import os\n'), ((1905, 1938), 'os.path.isdir', 'os.path.isdir', (['model_version_path'], {}), '(model_version_path)\n', (1918, 1938), False, 'import os\n'), ((1135, 1233), 'oneflow.typing.Numpy.Placeholder', 'flow.typing.Numpy.Placeholder', ([], {'shape': '(batch_size, height, width, channels)', 'dtype': 'flow.float32'}), '(shape=(batch_size, height, width, channels),\n dtype=flow.float32)\n', (1164, 1233), True, 'import oneflow as flow\n'), ((1554, 1577), 'oneflow.train.CheckPoint', 'flow.train.CheckPoint', ([], {}), '()\n', (1575, 1577), True, 'import oneflow as flow\n'), ((2160, 2193), 'shutil.rmtree', 'shutil.rmtree', (['model_version_path'], {}), '(model_version_path)\n', (2173, 2193), False, 'import shutil\n'), ((2368, 2415), 'oneflow.saved_model.ModelBuilder', 'flow.saved_model.ModelBuilder', (['saved_model_path'], {}), '(saved_model_path)\n', (2397, 2415), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow as flow
from oneflow.python.nn.module import Module
from oneflow.python.oneflow_export import oneflow_export, experimental_api
from oneflow.python.framework.tensor import register_tensor_op
class BMM(Module):
def __init__(self) -> None:
super().__init__()
def forward(self, input, mat2):
assert (
input.shape[0] == mat2.shape[0] and input.shape[2] == mat2.shape[1]
), f"batch dim or matmul dim not match, please check input!"
return flow.F.batch_matmul(input, mat2)
@oneflow_export("bmm")
@experimental_api
def bmm_op(x, y):
"""
Performs a batch matrix-matrix product of matrices stored in input and mat2.
`input` and `mat2` must be 3-D tensors each containing the same number of matrices.
If input is a (b x n x m) tensor, mat2 is a (b x m x p) tensor, out will be a (b x n x p) tensor.
Args:
input(oneflow.Tensor): the first batch of matrices to be multiplied
mat2(oneflow.Tensor): the second batch of matrices to be multiplied
For example:
.. code-block:: python
>>> import oneflow.experimental as flow
>>> import numpy as np
>>> flow.enable_eager_execution()
>>> input1 = flow.Tensor(np.random.randn(10, 3, 4), dtype=flow.float32)
>>> input2 = flow.Tensor(np.random.randn(10, 4, 5), dtype=flow.float32)
>>> of_out = flow.bmm(input1, input2)
>>> of_out.shape
flow.Size([10, 3, 5])
"""
return BMM()(x, y)
@register_tensor_op("bmm")
@experimental_api
def bmm_op_tensor(x, y):
r"""
bmm() -> Tensor
See :func:`oneflow.experimental.bmm`
"""
return BMM()(x, y)
if __name__ == "__main__":
import doctest
doctest.testmod(raise_on_error=True)
| [
"oneflow.F.batch_matmul",
"oneflow.python.oneflow_export.oneflow_export",
"oneflow.python.framework.tensor.register_tensor_op"
] | [((1129, 1150), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""bmm"""'], {}), "('bmm')\n", (1143, 1150), False, 'from oneflow.python.oneflow_export import oneflow_export, experimental_api\n'), ((2099, 2124), 'oneflow.python.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""bmm"""'], {}), "('bmm')\n", (2117, 2124), False, 'from oneflow.python.framework.tensor import register_tensor_op\n'), ((2325, 2361), 'doctest.testmod', 'doctest.testmod', ([], {'raise_on_error': '(True)'}), '(raise_on_error=True)\n', (2340, 2361), False, 'import doctest\n'), ((1093, 1125), 'oneflow.F.batch_matmul', 'flow.F.batch_matmul', (['input', 'mat2'], {}), '(input, mat2)\n', (1112, 1125), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
from test_util import GenArgList
import oneflow as flow
import oneflow.unittest
from oneflow.test_utils.automated_test_util import *
def _test_meshgrid_forawd(test_case, device, indexing):
input1 = flow.tensor(
np.array([1, 2, 3]), dtype=flow.float32, device=flow.device(device)
)
input2 = flow.tensor(
np.array([4, 5, 6]), dtype=flow.float32, device=flow.device(device)
)
(np_x, np_y) = np.meshgrid(input1.numpy(), input2.numpy(), indexing=indexing)
(of_x, of_y) = flow.meshgrid(input1, input2, indexing=indexing)
test_case.assertTrue(np.allclose(of_x.numpy(), np_x, 0.0001, 0.0001))
def _test_meshgrid_forawd_scalar(test_case, device, indexing):
input1 = flow.tensor(np.array(1.0), dtype=flow.float32, device=flow.device(device))
input2 = flow.tensor(np.array(2.0), dtype=flow.float32, device=flow.device(device))
(np_x, np_y) = np.meshgrid(input1.numpy(), input2.numpy(), indexing=indexing)
(of_x, of_y) = flow.meshgrid(input1, input2, indexing=indexing)
test_case.assertTrue(np.allclose(of_x.numpy(), np_x, 0.0001, 0.0001))
def _test_meshgrid_forawd_3tensor(test_case, device, indexing):
input1 = flow.tensor(
np.array([1, 2, 3]), dtype=flow.float32, device=flow.device(device)
)
input2 = flow.tensor(
np.array([4, 5, 6]), dtype=flow.float32, device=flow.device(device)
)
input3 = flow.tensor(
np.array([7, 8, 9]), dtype=flow.float32, device=flow.device(device)
)
(np_x, np_y, np_z) = np.meshgrid(
input1.numpy(), input2.numpy(), input3.numpy(), indexing=indexing
)
(of_x, of_y, of_z) = flow.meshgrid(input1, input2, input3, indexing=indexing)
test_case.assertTrue(np.allclose(of_x.numpy(), np_x, 0.0001, 0.0001))
@flow.unittest.skip_unless_1n1d()
class TestMeshGridModule(flow.unittest.TestCase):
def test_meshgrid(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [
_test_meshgrid_forawd,
_test_meshgrid_forawd_scalar,
_test_meshgrid_forawd_3tensor,
]
arg_dict["device"] = ["cpu", "cuda"]
arg_dict["indexing"] = ["ij", "xy"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
@autotest(auto_backward=False, check_graph=True)
@unittest.skip("pytorch 1.9.0 exist not indexing")
def test_meshgrid_with_random_data(test_case):
device = random_device()
x = random_tensor(ndim=1, dim0=3, requires_grad=False).to(device)
y = random_tensor(ndim=1, dim0=3, requires_grad=False).to(device)
res = torch.meshgrid(x, y)
return res[0], res[1]
@autotest(auto_backward=False)
def test_meshgrid_with_0dim_data(test_case):
device = random_device()
x = random_tensor(ndim=0).to(device)
y = random_tensor(ndim=0).to(device)
res = torch.meshgrid(x, y)
@autotest(auto_backward=True)
@unittest.skip("pytorch 1.9.0 exist not indexing")
def test_meshgrid_with_random_data_xy(test_case):
device = random_device()
x = random_tensor(ndim=1, dim0=random(1, 6)).to(device)
y = random_tensor(ndim=1, dim0=random(1, 6)).to(device)
res = torch.meshgrid(x, y, indexing="xy")
return torch.cat((res[0], res[1]), 0)
@autotest(auto_backward=True)
@unittest.skip("pytorch 1.9.0 exist not indexing")
def test_meshgrid_with_random_data_size(test_case):
device = random_device()
x = random_tensor(ndim=1, dim0=random(1, 6)).to(device)
res = torch.meshgrid(x, indexing="xy")
return res[0]
if __name__ == "__main__":
unittest.main()
| [
"oneflow.meshgrid",
"oneflow.unittest.skip_unless_1n1d",
"oneflow.device"
] | [((2428, 2460), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (2458, 2460), True, 'import oneflow as flow\n'), ((1173, 1221), 'oneflow.meshgrid', 'flow.meshgrid', (['input1', 'input2'], {'indexing': 'indexing'}), '(input1, input2, indexing=indexing)\n', (1186, 1221), True, 'import oneflow as flow\n'), ((1638, 1686), 'oneflow.meshgrid', 'flow.meshgrid', (['input1', 'input2'], {'indexing': 'indexing'}), '(input1, input2, indexing=indexing)\n', (1651, 1686), True, 'import oneflow as flow\n'), ((2294, 2350), 'oneflow.meshgrid', 'flow.meshgrid', (['input1', 'input2', 'input3'], {'indexing': 'indexing'}), '(input1, input2, input3, indexing=indexing)\n', (2307, 2350), True, 'import oneflow as flow\n'), ((2970, 3019), 'unittest.skip', 'unittest.skip', (['"""pytorch 1.9.0 exist not indexing"""'], {}), "('pytorch 1.9.0 exist not indexing')\n", (2983, 3019), False, 'import unittest\n'), ((3600, 3649), 'unittest.skip', 'unittest.skip', (['"""pytorch 1.9.0 exist not indexing"""'], {}), "('pytorch 1.9.0 exist not indexing')\n", (3613, 3649), False, 'import unittest\n'), ((4001, 4050), 'unittest.skip', 'unittest.skip', (['"""pytorch 1.9.0 exist not indexing"""'], {}), "('pytorch 1.9.0 exist not indexing')\n", (4014, 4050), False, 'import unittest\n'), ((4306, 4321), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4319, 4321), False, 'import unittest\n'), ((890, 909), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (898, 909), True, 'import numpy as np\n'), ((998, 1017), 'numpy.array', 'np.array', (['[4, 5, 6]'], {}), '([4, 5, 6])\n', (1006, 1017), True, 'import numpy as np\n'), ((1386, 1399), 'numpy.array', 'np.array', (['(1.0)'], {}), '(1.0)\n', (1394, 1399), True, 'import numpy as np\n'), ((1474, 1487), 'numpy.array', 'np.array', (['(2.0)'], {}), '(2.0)\n', (1482, 1487), True, 'import numpy as np\n'), ((1861, 1880), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (1869, 1880), True, 'import numpy as np\n'), ((1969, 1988), 'numpy.array', 'np.array', (['[4, 5, 6]'], {}), '([4, 5, 6])\n', (1977, 1988), True, 'import numpy as np\n'), ((2077, 2096), 'numpy.array', 'np.array', (['[7, 8, 9]'], {}), '([7, 8, 9])\n', (2085, 2096), True, 'import numpy as np\n'), ((2564, 2577), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2575, 2577), False, 'from collections import OrderedDict\n'), ((2849, 2869), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (2859, 2869), False, 'from test_util import GenArgList\n'), ((938, 957), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (949, 957), True, 'import oneflow as flow\n'), ((1046, 1065), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (1057, 1065), True, 'import oneflow as flow\n'), ((1428, 1447), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (1439, 1447), True, 'import oneflow as flow\n'), ((1516, 1535), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (1527, 1535), True, 'import oneflow as flow\n'), ((1909, 1928), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (1920, 1928), True, 'import oneflow as flow\n'), ((2017, 2036), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (2028, 2036), True, 'import oneflow as flow\n'), ((2125, 2144), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (2136, 2144), True, 'import oneflow as flow\n')] |
import oneflow as flow
def conv_encoder(input, trainable=True):
crop_size = 512
if input.shape[2]!=256 or input.shape[3]!=256:
input = flow.experimental.nn.UpsamplingBilinear2d(size=(256, 256))(input)
input = flow.layers.conv2d(input, 64, kernel_size=3, strides=2, padding=1, trainable=trainable)
input = flow.nn.InstanceNorm2d(input, affine=trainable)
input = flow.nn.leaky_relu(input, 2e-1)
input = flow.layers.conv2d(input, 64*2, kernel_size=3, strides=2, padding=1, trainable=trainable)
input = flow.nn.InstanceNorm2d(input, affine=trainable)
input = flow.nn.leaky_relu(input, 2e-1)
input = flow.layers.conv2d(input, 64 * 4, kernel_size=3, strides=2, padding=1, trainable=trainable)
input = flow.nn.InstanceNorm2d(input, affine=trainable)
input = flow.nn.leaky_relu(input, 2e-1)
input = flow.layers.conv2d(input, 64 * 8, kernel_size=3, strides=2, padding=1, trainable=trainable)
input = flow.nn.InstanceNorm2d(input, affine=trainable)
input = flow.nn.leaky_relu(input, 2e-1)
input = flow.layers.conv2d(input, 64 * 8, kernel_size=3, strides=2, padding=1, trainable=trainable)
input = flow.nn.InstanceNorm2d(input, affine=trainable)
if crop_size>256:
input = flow.nn.leaky_relu(input, 2e-1)
input = flow.layers.conv2d(input, 64 * 8, kernel_size=3, strides=2, padding=1, trainable=trainable)
input = flow.nn.InstanceNorm2d(input, affine=trainable)
input = flow.nn.leaky_relu(input, 2e-1)
input = flow.reshape(input, [input.shape[0], -1])
mu = flow.layers.dense(input, 256, trainable=trainable)
logvar = flow.layers.dense(input, 256, trainable=trainable)
return mu, logvar
| [
"oneflow.nn.InstanceNorm2d",
"oneflow.layers.dense",
"oneflow.reshape",
"oneflow.nn.leaky_relu",
"oneflow.layers.conv2d",
"oneflow.experimental.nn.UpsamplingBilinear2d"
] | [((232, 323), 'oneflow.layers.conv2d', 'flow.layers.conv2d', (['input', '(64)'], {'kernel_size': '(3)', 'strides': '(2)', 'padding': '(1)', 'trainable': 'trainable'}), '(input, 64, kernel_size=3, strides=2, padding=1,\n trainable=trainable)\n', (250, 323), True, 'import oneflow as flow\n'), ((332, 379), 'oneflow.nn.InstanceNorm2d', 'flow.nn.InstanceNorm2d', (['input'], {'affine': 'trainable'}), '(input, affine=trainable)\n', (354, 379), True, 'import oneflow as flow\n'), ((393, 423), 'oneflow.nn.leaky_relu', 'flow.nn.leaky_relu', (['input', '(0.2)'], {}), '(input, 0.2)\n', (411, 423), True, 'import oneflow as flow\n'), ((437, 532), 'oneflow.layers.conv2d', 'flow.layers.conv2d', (['input', '(64 * 2)'], {'kernel_size': '(3)', 'strides': '(2)', 'padding': '(1)', 'trainable': 'trainable'}), '(input, 64 * 2, kernel_size=3, strides=2, padding=1,\n trainable=trainable)\n', (455, 532), True, 'import oneflow as flow\n'), ((539, 586), 'oneflow.nn.InstanceNorm2d', 'flow.nn.InstanceNorm2d', (['input'], {'affine': 'trainable'}), '(input, affine=trainable)\n', (561, 586), True, 'import oneflow as flow\n'), ((600, 630), 'oneflow.nn.leaky_relu', 'flow.nn.leaky_relu', (['input', '(0.2)'], {}), '(input, 0.2)\n', (618, 630), True, 'import oneflow as flow\n'), ((644, 739), 'oneflow.layers.conv2d', 'flow.layers.conv2d', (['input', '(64 * 4)'], {'kernel_size': '(3)', 'strides': '(2)', 'padding': '(1)', 'trainable': 'trainable'}), '(input, 64 * 4, kernel_size=3, strides=2, padding=1,\n trainable=trainable)\n', (662, 739), True, 'import oneflow as flow\n'), ((748, 795), 'oneflow.nn.InstanceNorm2d', 'flow.nn.InstanceNorm2d', (['input'], {'affine': 'trainable'}), '(input, affine=trainable)\n', (770, 795), True, 'import oneflow as flow\n'), ((809, 839), 'oneflow.nn.leaky_relu', 'flow.nn.leaky_relu', (['input', '(0.2)'], {}), '(input, 0.2)\n', (827, 839), True, 'import oneflow as flow\n'), ((853, 948), 'oneflow.layers.conv2d', 'flow.layers.conv2d', (['input', '(64 * 8)'], {'kernel_size': '(3)', 'strides': '(2)', 'padding': '(1)', 'trainable': 'trainable'}), '(input, 64 * 8, kernel_size=3, strides=2, padding=1,\n trainable=trainable)\n', (871, 948), True, 'import oneflow as flow\n'), ((957, 1004), 'oneflow.nn.InstanceNorm2d', 'flow.nn.InstanceNorm2d', (['input'], {'affine': 'trainable'}), '(input, affine=trainable)\n', (979, 1004), True, 'import oneflow as flow\n'), ((1018, 1048), 'oneflow.nn.leaky_relu', 'flow.nn.leaky_relu', (['input', '(0.2)'], {}), '(input, 0.2)\n', (1036, 1048), True, 'import oneflow as flow\n'), ((1062, 1157), 'oneflow.layers.conv2d', 'flow.layers.conv2d', (['input', '(64 * 8)'], {'kernel_size': '(3)', 'strides': '(2)', 'padding': '(1)', 'trainable': 'trainable'}), '(input, 64 * 8, kernel_size=3, strides=2, padding=1,\n trainable=trainable)\n', (1080, 1157), True, 'import oneflow as flow\n'), ((1166, 1213), 'oneflow.nn.InstanceNorm2d', 'flow.nn.InstanceNorm2d', (['input'], {'affine': 'trainable'}), '(input, affine=trainable)\n', (1188, 1213), True, 'import oneflow as flow\n'), ((1470, 1500), 'oneflow.nn.leaky_relu', 'flow.nn.leaky_relu', (['input', '(0.2)'], {}), '(input, 0.2)\n', (1488, 1500), True, 'import oneflow as flow\n'), ((1515, 1556), 'oneflow.reshape', 'flow.reshape', (['input', '[input.shape[0], -1]'], {}), '(input, [input.shape[0], -1])\n', (1527, 1556), True, 'import oneflow as flow\n'), ((1567, 1617), 'oneflow.layers.dense', 'flow.layers.dense', (['input', '(256)'], {'trainable': 'trainable'}), '(input, 256, trainable=trainable)\n', (1584, 1617), True, 'import oneflow as flow\n'), ((1631, 1681), 'oneflow.layers.dense', 'flow.layers.dense', (['input', '(256)'], {'trainable': 'trainable'}), '(input, 256, trainable=trainable)\n', (1648, 1681), True, 'import oneflow as flow\n'), ((1253, 1283), 'oneflow.nn.leaky_relu', 'flow.nn.leaky_relu', (['input', '(0.2)'], {}), '(input, 0.2)\n', (1271, 1283), True, 'import oneflow as flow\n'), ((1301, 1396), 'oneflow.layers.conv2d', 'flow.layers.conv2d', (['input', '(64 * 8)'], {'kernel_size': '(3)', 'strides': '(2)', 'padding': '(1)', 'trainable': 'trainable'}), '(input, 64 * 8, kernel_size=3, strides=2, padding=1,\n trainable=trainable)\n', (1319, 1396), True, 'import oneflow as flow\n'), ((1409, 1456), 'oneflow.nn.InstanceNorm2d', 'flow.nn.InstanceNorm2d', (['input'], {'affine': 'trainable'}), '(input, affine=trainable)\n', (1431, 1456), True, 'import oneflow as flow\n'), ((153, 211), 'oneflow.experimental.nn.UpsamplingBilinear2d', 'flow.experimental.nn.UpsamplingBilinear2d', ([], {'size': '(256, 256)'}), '(size=(256, 256))\n', (194, 211), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from multiprocessing.reduction import ForkingPickler
import numpy as np
import oneflow as flow
from oneflow.nn.parameter import Parameter
from oneflow.framework.tensor import Tensor
from oneflow.multiprocessing import shared_memory
try:
# Early load resource_sharer to prevent a partially initialized instance
# from being inherited in a forked child process. The reduce_storage method
# requires this module indirectly through DupFd(). The built-in mp.Queue
# class pickles arguments in a background thread which may overlap with the
# fork.
import multiprocessing.resource_sharer
except ImportError:
pass
def rebuild_empty_tensor(shape, dtype, requires_grad):
t = flow.tensor([], dtype=dtype)
t.requires_grad = requires_grad
return t.reshape(*shape)
def rebuild_shm_tensor(shm, shape, dtype, requires_grad):
def delete_shm():
shm.close()
try:
shm.unlink()
except:
pass
arr = np.ndarray(shape, dtype=dtype, buffer=shm.buf)
t = flow.from_numpy(arr)
t._register_storage_delete_hook(delete_shm)
t.requires_grad = requires_grad
return t
def rebuild_empty_parameter(shape, dtype, requires_grad):
t = flow.tensor([], dtype=dtype)
t = t.reshape(*shape)
return Parameter(t, requires_grad=requires_grad)
def rebuild_shm_parameter(shm, shape, dtype, requires_grad):
def delete_shm():
shm.close()
shm.unlink()
arr = np.ndarray(shape, dtype=dtype, buffer=shm.buf)
t = flow.from_numpy(arr)
t._register_storage_delete_hook(delete_shm)
return Parameter(t, requires_grad=requires_grad)
def reduce_tensor(tensor):
tensor_data = tensor.numpy()
requires_grad = tensor.requires_grad
if tensor_data.nbytes == 0:
return (rebuild_empty_tensor, (tensor.shape, tensor.dtype, requires_grad))
else:
shm = shared_memory.SharedMemory(create=True, size=tensor_data.nbytes)
shm_numpy = np.ndarray(
tensor_data.shape, dtype=tensor_data.dtype, buffer=shm.buf
)
shm_numpy[:] = tensor_data[:]
return (
rebuild_shm_tensor,
(shm, tensor_data.shape, tensor_data.dtype, requires_grad),
)
def reduce_parameter(tensor):
tensor_data = tensor.numpy()
requires_grad = tensor.requires_grad
if tensor_data.nbytes == 0:
return (rebuild_empty_parameter, (tensor, shape, tensor.dtype, requires_grad))
else:
shm = shared_memory.SharedMemory(create=True, size=tensor_data.nbytes)
shm_numpy = np.ndarray(
tensor_data.shape, dtype=tensor_data.dtype, buffer=shm.buf
)
shm_numpy[:] = tensor_data[:]
return (
rebuild_shm_parameter,
(shm, tensor_data.shape, tensor_data.dtype, requires_grad),
)
def init_reductions():
ForkingPickler.register(Tensor, reduce_tensor)
ForkingPickler.register(flow._oneflow_internal.Tensor, reduce_tensor)
ForkingPickler.register(Parameter, reduce_parameter)
ForkingPickler.register(flow._oneflow_internal.nn.Parameter, reduce_parameter)
| [
"oneflow.from_numpy",
"oneflow.tensor",
"oneflow.nn.parameter.Parameter",
"oneflow.multiprocessing.shared_memory.SharedMemory"
] | [((1294, 1322), 'oneflow.tensor', 'flow.tensor', (['[]'], {'dtype': 'dtype'}), '([], dtype=dtype)\n', (1305, 1322), True, 'import oneflow as flow\n'), ((1572, 1618), 'numpy.ndarray', 'np.ndarray', (['shape'], {'dtype': 'dtype', 'buffer': 'shm.buf'}), '(shape, dtype=dtype, buffer=shm.buf)\n', (1582, 1618), True, 'import numpy as np\n'), ((1627, 1647), 'oneflow.from_numpy', 'flow.from_numpy', (['arr'], {}), '(arr)\n', (1642, 1647), True, 'import oneflow as flow\n'), ((1814, 1842), 'oneflow.tensor', 'flow.tensor', (['[]'], {'dtype': 'dtype'}), '([], dtype=dtype)\n', (1825, 1842), True, 'import oneflow as flow\n'), ((1880, 1921), 'oneflow.nn.parameter.Parameter', 'Parameter', (['t'], {'requires_grad': 'requires_grad'}), '(t, requires_grad=requires_grad)\n', (1889, 1921), False, 'from oneflow.nn.parameter import Parameter\n'), ((2059, 2105), 'numpy.ndarray', 'np.ndarray', (['shape'], {'dtype': 'dtype', 'buffer': 'shm.buf'}), '(shape, dtype=dtype, buffer=shm.buf)\n', (2069, 2105), True, 'import numpy as np\n'), ((2114, 2134), 'oneflow.from_numpy', 'flow.from_numpy', (['arr'], {}), '(arr)\n', (2129, 2134), True, 'import oneflow as flow\n'), ((2194, 2235), 'oneflow.nn.parameter.Parameter', 'Parameter', (['t'], {'requires_grad': 'requires_grad'}), '(t, requires_grad=requires_grad)\n', (2203, 2235), False, 'from oneflow.nn.parameter import Parameter\n'), ((3455, 3501), 'multiprocessing.reduction.ForkingPickler.register', 'ForkingPickler.register', (['Tensor', 'reduce_tensor'], {}), '(Tensor, reduce_tensor)\n', (3478, 3501), False, 'from multiprocessing.reduction import ForkingPickler\n'), ((3506, 3575), 'multiprocessing.reduction.ForkingPickler.register', 'ForkingPickler.register', (['flow._oneflow_internal.Tensor', 'reduce_tensor'], {}), '(flow._oneflow_internal.Tensor, reduce_tensor)\n', (3529, 3575), False, 'from multiprocessing.reduction import ForkingPickler\n'), ((3580, 3632), 'multiprocessing.reduction.ForkingPickler.register', 'ForkingPickler.register', (['Parameter', 'reduce_parameter'], {}), '(Parameter, reduce_parameter)\n', (3603, 3632), False, 'from multiprocessing.reduction import ForkingPickler\n'), ((3637, 3715), 'multiprocessing.reduction.ForkingPickler.register', 'ForkingPickler.register', (['flow._oneflow_internal.nn.Parameter', 'reduce_parameter'], {}), '(flow._oneflow_internal.nn.Parameter, reduce_parameter)\n', (3660, 3715), False, 'from multiprocessing.reduction import ForkingPickler\n'), ((2479, 2543), 'oneflow.multiprocessing.shared_memory.SharedMemory', 'shared_memory.SharedMemory', ([], {'create': '(True)', 'size': 'tensor_data.nbytes'}), '(create=True, size=tensor_data.nbytes)\n', (2505, 2543), False, 'from oneflow.multiprocessing import shared_memory\n'), ((2564, 2634), 'numpy.ndarray', 'np.ndarray', (['tensor_data.shape'], {'dtype': 'tensor_data.dtype', 'buffer': 'shm.buf'}), '(tensor_data.shape, dtype=tensor_data.dtype, buffer=shm.buf)\n', (2574, 2634), True, 'import numpy as np\n'), ((3076, 3140), 'oneflow.multiprocessing.shared_memory.SharedMemory', 'shared_memory.SharedMemory', ([], {'create': '(True)', 'size': 'tensor_data.nbytes'}), '(create=True, size=tensor_data.nbytes)\n', (3102, 3140), False, 'from oneflow.multiprocessing import shared_memory\n'), ((3161, 3231), 'numpy.ndarray', 'np.ndarray', (['tensor_data.shape'], {'dtype': 'tensor_data.dtype', 'buffer': 'shm.buf'}), '(tensor_data.shape, dtype=tensor_data.dtype, buffer=shm.buf)\n', (3171, 3231), True, 'import numpy as np\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
import oneflow as flow
import oneflow.unittest
from oneflow.test_utils.automated_test_util import *
def _check_forward_and_backward(test_case, input, of_out, torch_out):
# compare forward
test_case.assertTrue(
np.array_equal(of_out.numpy(), torch_out.cpu().detach().numpy())
)
# compare backward
of_out.sum().backward()
torch_out.sum().backward()
torch_grad_local = input.pytorch.grad.cpu().detach()
test_case.assertTrue(
np.array_equal(input.oneflow.grad.numpy(), torch_grad_local.numpy())
)
def _test_slice_random_data(test_case, placement, sbp):
dims = [random(1, 2) * 8 for _ in range(2)]
input = random_tensor(2, *dims)
x = input.to_global(placement=placement, sbp=sbp)
slice_tup_list = [[None, None, None], [0, 5, 2]]
of_out = flow.slice(x.oneflow, slice_tup_list=slice_tup_list)
torch_out = x.pytorch[:, 0:5:2]
_check_forward_and_backward(test_case, input, of_out, torch_out)
def _test_slice_empty(test_case, placement, sbp):
dims = [random(1, 2) * 8 for _ in range(2)]
input = random_tensor(2, *dims)
x = input.to_global(placement=placement, sbp=sbp)
slice_tup_list = [[3, 3, 1], [None, None, None]]
of_out = flow.slice(x.oneflow, slice_tup_list=slice_tup_list)
torch_out = x.pytorch[3:3:1, :]
_check_forward_and_backward(test_case, input, of_out, torch_out)
def _test_slice_1dim(test_case, placement, sbp):
dims = [random(1, 2) * 8 for _ in range(2)]
input = random_tensor(2, *dims)
x = input.to_global(placement=placement, sbp=sbp)
of_out = x.oneflow[2]
torch_out = x.pytorch[2]
_check_forward_and_backward(test_case, input, of_out, torch_out)
def _test_negative_index(test_case, placement, sbp):
dims = [random(1, 2) * 8 for _ in range(2)]
input = random_tensor(2, *dims)
x = input.to_global(placement=placement, sbp=sbp)
of_out = x.oneflow[-1:-6:1, :]
torch_out = x.pytorch[-1:-6:1, :]
_check_forward_and_backward(test_case, input, of_out, torch_out)
def _test_slice_ellipsis_type(test_case, placement, sbp):
dims = [random(1, 2) * 8 for _ in range(2)]
input = random_tensor(2, *dims)
x = input.to_global(placement=placement, sbp=sbp)
of_out = x.oneflow[..., :]
torch_out = x.pytorch[..., :]
_check_forward_and_backward(test_case, input, of_out, torch_out)
def _test_logical_slice(test_case, placement, sbp):
input = random_tensor(2, 8, 8, requires_grad=True).oneflow
x_numpy = input.detach().cpu().numpy()
x = input.to_global(placement=placement, sbp=sbp)
y = flow.logical_slice(x, slice_tup_list=[[0, 1, 1]])
# forward
test_case.assertTrue(np.array_equal(y.numpy(), x_numpy[0:1:1]))
# backward
y.sum().backward()
input_grad_np = np.zeros((8, 8))
input_grad_np[0:1:1, :] = 1
test_case.assertTrue(np.array_equal(input.grad.numpy(), input_grad_np))
def _test_logical_slice_with_bool(test_case, placement, sbp):
x = random_tensor(2, 8, 8).oneflow > 0.5
x_numpy = x.detach().cpu().numpy()
x = x.to_global(placement=placement, sbp=sbp)
y = flow.logical_slice(x, slice_tup_list=[[0, 1, 1]])
test_case.assertTrue(np.array_equal(y.numpy(), x_numpy[0:1:1]))
def _test_logical_slice_with_grad(test_case, placement, sbp):
x = random_tensor(2, 4, 4, requires_grad=True).oneflow
x_numpy = x.detach().cpu().numpy()
class LogicalSliceWithGrad(flow.nn.Module):
def __init__(self):
super().__init__()
self.input_grad = flow.nn.Parameter(flow.zeros(4, 4))
def forward(self, input):
x = input + self.input_grad
x = x.to_global(placement, sbp)
return x[:, :2]
logical_slice_with_grad = LogicalSliceWithGrad().to_global(
placement, [flow.sbp.broadcast,] * len(sbp)
)
of_sgd = flow.optim.SGD(logical_slice_with_grad.parameters(), lr=1.0, momentum=0.0)
class LogicalSliceTrainGraph(flow.nn.Graph):
def __init__(self):
super().__init__()
self.module = logical_slice_with_grad
self.add_optimizer(of_sgd)
def build(self, x):
out = self.module(x)
z = out.sum()
z.backward()
return out
graph = LogicalSliceTrainGraph()
input = x.to_global(placement=placement, sbp=sbp)
y = graph(input)
# output
test_case.assertTrue(np.array_equal(y.numpy(), x_numpy[:, :2]))
# input_grad
x_grad_np = np.zeros((4, 4))
x_grad_np[:, :2] = 1
test_case.assertTrue(
np.array_equal(-graph.module.input_grad.origin.numpy(), x_grad_np)
)
class TestSlice(flow.unittest.TestCase):
@globaltest
def test_slice(test_case):
for placement in all_placement():
for sbp in all_sbp(placement, max_dim=2):
_test_slice_random_data(test_case, placement, sbp)
_test_slice_empty(test_case, placement, sbp)
_test_slice_1dim(test_case, placement, sbp)
_test_negative_index(test_case, placement, sbp)
_test_slice_ellipsis_type(test_case, placement, sbp)
class TestLogicalSlice(flow.unittest.TestCase):
@globaltest
def test_logical_slice(test_case):
for placement in all_placement():
for sbp in all_sbp(placement, max_dim=2):
_test_logical_slice(test_case, placement, sbp)
_test_logical_slice_with_bool(test_case, placement, sbp)
_test_logical_slice_with_grad(test_case, placement, sbp)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.logical_slice",
"oneflow.zeros",
"oneflow.slice"
] | [((1437, 1489), 'oneflow.slice', 'flow.slice', (['x.oneflow'], {'slice_tup_list': 'slice_tup_list'}), '(x.oneflow, slice_tup_list=slice_tup_list)\n', (1447, 1489), True, 'import oneflow as flow\n'), ((1852, 1904), 'oneflow.slice', 'flow.slice', (['x.oneflow'], {'slice_tup_list': 'slice_tup_list'}), '(x.oneflow, slice_tup_list=slice_tup_list)\n', (1862, 1904), True, 'import oneflow as flow\n'), ((3217, 3266), 'oneflow.logical_slice', 'flow.logical_slice', (['x'], {'slice_tup_list': '[[0, 1, 1]]'}), '(x, slice_tup_list=[[0, 1, 1]])\n', (3235, 3266), True, 'import oneflow as flow\n'), ((3409, 3425), 'numpy.zeros', 'np.zeros', (['(8, 8)'], {}), '((8, 8))\n', (3417, 3425), True, 'import numpy as np\n'), ((3741, 3790), 'oneflow.logical_slice', 'flow.logical_slice', (['x'], {'slice_tup_list': '[[0, 1, 1]]'}), '(x, slice_tup_list=[[0, 1, 1]])\n', (3759, 3790), True, 'import oneflow as flow\n'), ((5118, 5134), 'numpy.zeros', 'np.zeros', (['(4, 4)'], {}), '((4, 4))\n', (5126, 5134), True, 'import numpy as np\n'), ((6217, 6232), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6230, 6232), False, 'import unittest\n'), ((4178, 4194), 'oneflow.zeros', 'flow.zeros', (['(4)', '(4)'], {}), '(4, 4)\n', (4188, 4194), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import os
from collections import OrderedDict
import numpy as np
import oneflow as flow
import tensorflow as tf
import test_global_storage
from test_util import GenArgList, type_name_to_flow_type
gpus = tf.config.experimental.list_physical_devices("GPU")
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
def compare_with_tensorflow(device_type, x_shape, data_type, axis):
assert device_type in ["gpu", "cpu"]
flow.clear_default_session()
func_config = flow.FunctionConfig()
if data_type == "float16":
dtype = flow.float
else:
dtype = type_name_to_flow_type[data_type]
@flow.global_function(type="train", function_config=func_config)
def SoftmaxJob():
with flow.scope.placement(device_type, "0:0"):
x = flow.get_variable(
"x",
shape=x_shape,
dtype=dtype,
initializer=flow.random_uniform_initializer(minval=-1.0, maxval=1.0),
trainable=True,
)
x1 = x
x = flow.identity(x)
if data_type == "float16":
loss = flow.cast(
flow.nn.softmax(flow.cast(x, dtype=flow.float16), axis=axis),
dtype=flow.float,
)
else:
loss = flow.nn.softmax(x, axis=axis)
flow.watch(x, test_global_storage.Setter("x"))
flow.watch_diff(x, test_global_storage.Setter("x_diff"))
flow.watch(loss, test_global_storage.Setter("loss"))
flow.watch_diff(loss, test_global_storage.Setter("loss_diff"))
total_loss = loss * x1
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [1e-4]), momentum=0
).minimize(total_loss)
return loss
# OneFlow
check_point = flow.train.CheckPoint()
check_point.init()
of_out = SoftmaxJob().get()
# TensorFlow
with tf.GradientTape(persistent=True) as tape:
x = tf.Variable(test_global_storage.Get("x"))
tf_out = tf.nn.softmax(x, axis=axis)
loss_diff = test_global_storage.Get("loss_diff")
tf_x_diff = tape.gradient(tf_out, x, loss_diff)
if data_type == "float16":
tolerance = 1e-3
else:
tolerance = 1e-5
assert np.allclose(of_out.numpy(), tf_out.numpy(), rtol=tolerance, atol=tolerance)
assert np.allclose(
test_global_storage.Get("x_diff"),
tf_x_diff.numpy(),
rtol=tolerance,
atol=tolerance,
)
@flow.unittest.skip_unless_1n1d()
class TestSoftmax(flow.unittest.TestCase):
def test_softmax_shape(test_case):
if flow.eager_execution_enabled():
print("\nSkip under erger mode!")
return
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["x_shape"] = [
(10, 10, 20, 30),
(10, 20, 13),
(10, 20, 30),
(10, 20),
(10, 60),
(32, 12, 128),
(10, 960),
(12, 2001),
(10, 4096),
(10, 8092),
(256, 1001),
(100, 65536),
(10, 65535),
]
arg_dict["data_type"] = ["float32", "double", "float16"]
arg_dict["axis"] = [-1]
for arg in GenArgList(arg_dict):
if arg[0] == "cpu" and arg[2] == "float16":
continue
compare_with_tensorflow(*arg)
def test_softmax_axis(test_case):
if flow.eager_execution_enabled():
print("\nSkip under erger mode!")
return
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["x_shape"] = [(10, 20, 30, 40)]
arg_dict["data_type"] = ["float32", "double", "float16"]
arg_dict["axis"] = [-4, -3, -2, -1, 0, 1, 2, 3]
for arg in GenArgList(arg_dict):
if arg[0] == "cpu" and arg[2] == "float16":
continue
compare_with_tensorflow(*arg)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.eager_execution_enabled",
"oneflow.clear_default_session",
"oneflow.unittest.skip_unless_1n1d",
"oneflow.optimizer.PiecewiseConstantScheduler",
"oneflow.nn.softmax",
"oneflow.FunctionConfig",
"oneflow.random_uniform_initializer",
"oneflow.global_function",
"oneflow.identity",
"oneflow.sco... | [((811, 862), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (855, 862), True, 'import tensorflow as tf\n'), ((3162, 3194), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (3192, 3194), True, 'import oneflow as flow\n'), ((884, 935), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['gpu', '(True)'], {}), '(gpu, True)\n', (924, 935), True, 'import tensorflow as tf\n'), ((1051, 1079), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (1077, 1079), True, 'import oneflow as flow\n'), ((1098, 1119), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (1117, 1119), True, 'import oneflow as flow\n'), ((1245, 1308), 'oneflow.global_function', 'flow.global_function', ([], {'type': '"""train"""', 'function_config': 'func_config'}), "(type='train', function_config=func_config)\n", (1265, 1308), True, 'import oneflow as flow\n'), ((2481, 2504), 'oneflow.train.CheckPoint', 'flow.train.CheckPoint', ([], {}), '()\n', (2502, 2504), True, 'import oneflow as flow\n'), ((2744, 2780), 'test_global_storage.Get', 'test_global_storage.Get', (['"""loss_diff"""'], {}), "('loss_diff')\n", (2767, 2780), False, 'import test_global_storage\n'), ((4690, 4705), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4703, 4705), False, 'import unittest\n'), ((2586, 2618), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {'persistent': '(True)'}), '(persistent=True)\n', (2601, 2618), True, 'import tensorflow as tf\n'), ((2699, 2726), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['x'], {'axis': 'axis'}), '(x, axis=axis)\n', (2712, 2726), True, 'import tensorflow as tf\n'), ((3043, 3076), 'test_global_storage.Get', 'test_global_storage.Get', (['"""x_diff"""'], {}), "('x_diff')\n", (3066, 3076), False, 'import test_global_storage\n'), ((3288, 3318), 'oneflow.eager_execution_enabled', 'flow.eager_execution_enabled', ([], {}), '()\n', (3316, 3318), True, 'import oneflow as flow\n'), ((3404, 3417), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3415, 3417), False, 'from collections import OrderedDict\n'), ((3949, 3969), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (3959, 3969), False, 'from test_util import GenArgList, type_name_to_flow_type\n'), ((4144, 4174), 'oneflow.eager_execution_enabled', 'flow.eager_execution_enabled', ([], {}), '()\n', (4172, 4174), True, 'import oneflow as flow\n'), ((4260, 4273), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4271, 4273), False, 'from collections import OrderedDict\n'), ((4512, 4532), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (4522, 4532), False, 'from test_util import GenArgList, type_name_to_flow_type\n'), ((1344, 1384), 'oneflow.scope.placement', 'flow.scope.placement', (['device_type', '"""0:0"""'], {}), "(device_type, '0:0')\n", (1364, 1384), True, 'import oneflow as flow\n'), ((1669, 1685), 'oneflow.identity', 'flow.identity', (['x'], {}), '(x)\n', (1682, 1685), True, 'import oneflow as flow\n'), ((2652, 2680), 'test_global_storage.Get', 'test_global_storage.Get', (['"""x"""'], {}), "('x')\n", (2675, 2680), False, 'import test_global_storage\n'), ((1938, 1967), 'oneflow.nn.softmax', 'flow.nn.softmax', (['x'], {'axis': 'axis'}), '(x, axis=axis)\n', (1953, 1967), True, 'import oneflow as flow\n'), ((1995, 2026), 'test_global_storage.Setter', 'test_global_storage.Setter', (['"""x"""'], {}), "('x')\n", (2021, 2026), False, 'import test_global_storage\n'), ((2059, 2095), 'test_global_storage.Setter', 'test_global_storage.Setter', (['"""x_diff"""'], {}), "('x_diff')\n", (2085, 2095), False, 'import test_global_storage\n'), ((2126, 2160), 'test_global_storage.Setter', 'test_global_storage.Setter', (['"""loss"""'], {}), "('loss')\n", (2152, 2160), False, 'import test_global_storage\n'), ((2196, 2235), 'test_global_storage.Setter', 'test_global_storage.Setter', (['"""loss_diff"""'], {}), "('loss_diff')\n", (2222, 2235), False, 'import test_global_storage\n'), ((1530, 1586), 'oneflow.random_uniform_initializer', 'flow.random_uniform_initializer', ([], {'minval': '(-1.0)', 'maxval': '(1.0)'}), '(minval=-1.0, maxval=1.0)\n', (1561, 1586), True, 'import oneflow as flow\n'), ((1795, 1827), 'oneflow.cast', 'flow.cast', (['x'], {'dtype': 'flow.float16'}), '(x, dtype=flow.float16)\n', (1804, 1827), True, 'import oneflow as flow\n'), ((2322, 2377), 'oneflow.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[0.0001]'], {}), '([], [0.0001])\n', (2363, 2377), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from collections import OrderedDict
import numpy as np
import oneflow as flow
import oneflow.typing as oft
import tensorflow as tf
from test_util import Args, CompareOpWithTensorFlow, GenArgDict
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
def test_naive(test_case):
@flow.global_function(function_config=func_config)
def SqrDiffJob(a: oft.Numpy.Placeholder((5, 2)), b: oft.Numpy.Placeholder((5, 2))):
return flow.math.squared_difference(a, b)
x = np.random.rand(5, 2).astype(np.float32)
y = np.random.rand(5, 2).astype(np.float32)
z = None
z = SqrDiffJob(x, y).get().numpy()
test_case.assertTrue(np.allclose(z, (x - y) * (x - y)))
def test_broadcast(test_case):
@flow.global_function(function_config=func_config)
def SqrDiffJob(a: oft.Numpy.Placeholder((5, 2)), b: oft.Numpy.Placeholder((1, 2))):
return flow.math.squared_difference(a, b)
x = np.random.rand(5, 2).astype(np.float32)
y = np.random.rand(1, 2).astype(np.float32)
z = None
z = SqrDiffJob(x, y).get().numpy()
test_case.assertTrue(np.allclose(z, (x - y) * (x - y)))
def test_xy_sqr_diff_x1(test_case):
GenerateTest(test_case, (64, 64), (64, 1))
def test_xy_sqr_diff_1y(test_case):
GenerateTest(test_case, (64, 64), (1, 64))
def test_xyz_sqr_diff_x1z(test_case):
GenerateTest(test_case, (64, 64, 64), (64, 1, 64))
def test_xyz_sqr_diff_1y1(test_case):
GenerateTest(test_case, (64, 64, 64), (1, 64, 1))
def GenerateTest(test_case, a_shape, b_shape):
@flow.global_function(function_config=func_config)
def SqrDiffJob(
a: oft.Numpy.Placeholder(a_shape), b: oft.Numpy.Placeholder(b_shape)
):
return flow.math.squared_difference(a, b)
a = np.random.rand(*a_shape).astype(np.float32)
b = np.random.rand(*b_shape).astype(np.float32)
y = SqrDiffJob(a, b).get().numpy()
test_case.assertTrue(np.allclose(y, (a - b) * (a - b)))
def test_scalar_sqr_diff(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["flow_op"] = [flow.math.squared_difference]
arg_dict["tf_op"] = [tf.math.squared_difference]
arg_dict["input_shape"] = [(10, 10, 10)]
arg_dict["op_args"] = [
Args([1]),
Args([-1]),
Args([84223.19348]),
Args([-3284.139]),
]
for arg in GenArgDict(arg_dict):
CompareOpWithTensorFlow(**arg)
| [
"oneflow.global_function",
"oneflow.typing.Numpy.Placeholder",
"oneflow.FunctionConfig",
"oneflow.math.squared_difference"
] | [((801, 822), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (820, 822), True, 'import oneflow as flow\n'), ((899, 948), 'oneflow.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (919, 948), True, 'import oneflow as flow\n'), ((1334, 1383), 'oneflow.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (1354, 1383), True, 'import oneflow as flow\n'), ((2144, 2193), 'oneflow.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (2164, 2193), True, 'import oneflow as flow\n'), ((2606, 2619), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2617, 2619), False, 'from collections import OrderedDict\n'), ((2964, 2984), 'test_util.GenArgDict', 'GenArgDict', (['arg_dict'], {}), '(arg_dict)\n', (2974, 2984), False, 'from test_util import Args, CompareOpWithTensorFlow, GenArgDict\n'), ((1052, 1086), 'oneflow.math.squared_difference', 'flow.math.squared_difference', (['a', 'b'], {}), '(a, b)\n', (1080, 1086), True, 'import oneflow as flow\n'), ((1261, 1294), 'numpy.allclose', 'np.allclose', (['z', '((x - y) * (x - y))'], {}), '(z, (x - y) * (x - y))\n', (1272, 1294), True, 'import numpy as np\n'), ((1487, 1521), 'oneflow.math.squared_difference', 'flow.math.squared_difference', (['a', 'b'], {}), '(a, b)\n', (1515, 1521), True, 'import oneflow as flow\n'), ((1696, 1729), 'numpy.allclose', 'np.allclose', (['z', '((x - y) * (x - y))'], {}), '(z, (x - y) * (x - y))\n', (1707, 1729), True, 'import numpy as np\n'), ((2313, 2347), 'oneflow.math.squared_difference', 'flow.math.squared_difference', (['a', 'b'], {}), '(a, b)\n', (2341, 2347), True, 'import oneflow as flow\n'), ((2517, 2550), 'numpy.allclose', 'np.allclose', (['y', '((a - b) * (a - b))'], {}), '(y, (a - b) * (a - b))\n', (2528, 2550), True, 'import numpy as np\n'), ((2856, 2865), 'test_util.Args', 'Args', (['[1]'], {}), '([1])\n', (2860, 2865), False, 'from test_util import Args, CompareOpWithTensorFlow, GenArgDict\n'), ((2875, 2885), 'test_util.Args', 'Args', (['[-1]'], {}), '([-1])\n', (2879, 2885), False, 'from test_util import Args, CompareOpWithTensorFlow, GenArgDict\n'), ((2895, 2914), 'test_util.Args', 'Args', (['[84223.19348]'], {}), '([84223.19348])\n', (2899, 2914), False, 'from test_util import Args, CompareOpWithTensorFlow, GenArgDict\n'), ((2924, 2941), 'test_util.Args', 'Args', (['[-3284.139]'], {}), '([-3284.139])\n', (2928, 2941), False, 'from test_util import Args, CompareOpWithTensorFlow, GenArgDict\n'), ((2994, 3024), 'test_util.CompareOpWithTensorFlow', 'CompareOpWithTensorFlow', ([], {}), '(**arg)\n', (3017, 3024), False, 'from test_util import Args, CompareOpWithTensorFlow, GenArgDict\n'), ((971, 1000), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(5, 2)'], {}), '((5, 2))\n', (992, 1000), True, 'import oneflow.typing as oft\n'), ((1005, 1034), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(5, 2)'], {}), '((5, 2))\n', (1026, 1034), True, 'import oneflow.typing as oft\n'), ((1096, 1116), 'numpy.random.rand', 'np.random.rand', (['(5)', '(2)'], {}), '(5, 2)\n', (1110, 1116), True, 'import numpy as np\n'), ((1144, 1164), 'numpy.random.rand', 'np.random.rand', (['(5)', '(2)'], {}), '(5, 2)\n', (1158, 1164), True, 'import numpy as np\n'), ((1406, 1435), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(5, 2)'], {}), '((5, 2))\n', (1427, 1435), True, 'import oneflow.typing as oft\n'), ((1440, 1469), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(1, 2)'], {}), '((1, 2))\n', (1461, 1469), True, 'import oneflow.typing as oft\n'), ((1531, 1551), 'numpy.random.rand', 'np.random.rand', (['(5)', '(2)'], {}), '(5, 2)\n', (1545, 1551), True, 'import numpy as np\n'), ((1579, 1599), 'numpy.random.rand', 'np.random.rand', (['(1)', '(2)'], {}), '(1, 2)\n', (1593, 1599), True, 'import numpy as np\n'), ((2225, 2255), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['a_shape'], {}), '(a_shape)\n', (2246, 2255), True, 'import oneflow.typing as oft\n'), ((2260, 2290), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['b_shape'], {}), '(b_shape)\n', (2281, 2290), True, 'import oneflow.typing as oft\n'), ((2357, 2381), 'numpy.random.rand', 'np.random.rand', (['*a_shape'], {}), '(*a_shape)\n', (2371, 2381), True, 'import numpy as np\n'), ((2409, 2433), 'numpy.random.rand', 'np.random.rand', (['*b_shape'], {}), '(*b_shape)\n', (2423, 2433), True, 'import numpy as np\n')] |
import numpy as np
import cv2
import oneflow as flow
import oneflow.typing as tp
def gram_matrix(y):
(b, ch, h, w) = y.shape
features = y.reshape((b, ch, w * h))
features_t = features.transpose(1, 2)
gram = flow.matmul(features, features_t) / (ch * h * w)
return gram
def normalize_batch(batch):
# normalize using imagenet mean and std
mean = (
flow.Tensor([119.90508914, 113.98250597, 103.85173186])
.reshape((1, 3, 1, 1))
.to("cuda")
)
std = flow.Tensor([58.393, 57.12, 57.375]).reshape((1, 3, 1, 1)).to("cuda")
return (batch - mean) / std
def load_image(image_path):
im = cv2.imread(image_path)
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
im = cv2.resize(im, (256, 256))
im = np.transpose(im, (2, 0, 1))
im = np.expand_dims(im, axis=0)
return np.ascontiguousarray(im, "float32")
def load_image_eval(image_path):
im = cv2.imread(image_path)
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
im = np.transpose(im, (2, 0, 1))
im = np.expand_dims(im, axis=0)
return np.ascontiguousarray(im, "float32")
def recover_image(im):
im = np.squeeze(im)
im = np.transpose(im, (1, 2, 0))
im = cv2.cvtColor(np.float32(im), cv2.COLOR_RGB2BGR)
return im.astype(np.uint8)
| [
"oneflow.Tensor",
"oneflow.matmul"
] | [((650, 672), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (660, 672), False, 'import cv2\n'), ((682, 717), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2RGB'], {}), '(im, cv2.COLOR_BGR2RGB)\n', (694, 717), False, 'import cv2\n'), ((727, 753), 'cv2.resize', 'cv2.resize', (['im', '(256, 256)'], {}), '(im, (256, 256))\n', (737, 753), False, 'import cv2\n'), ((763, 790), 'numpy.transpose', 'np.transpose', (['im', '(2, 0, 1)'], {}), '(im, (2, 0, 1))\n', (775, 790), True, 'import numpy as np\n'), ((800, 826), 'numpy.expand_dims', 'np.expand_dims', (['im'], {'axis': '(0)'}), '(im, axis=0)\n', (814, 826), True, 'import numpy as np\n'), ((838, 873), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['im', '"""float32"""'], {}), "(im, 'float32')\n", (858, 873), True, 'import numpy as np\n'), ((918, 940), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (928, 940), False, 'import cv2\n'), ((950, 985), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2RGB'], {}), '(im, cv2.COLOR_BGR2RGB)\n', (962, 985), False, 'import cv2\n'), ((995, 1022), 'numpy.transpose', 'np.transpose', (['im', '(2, 0, 1)'], {}), '(im, (2, 0, 1))\n', (1007, 1022), True, 'import numpy as np\n'), ((1032, 1058), 'numpy.expand_dims', 'np.expand_dims', (['im'], {'axis': '(0)'}), '(im, axis=0)\n', (1046, 1058), True, 'import numpy as np\n'), ((1070, 1105), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['im', '"""float32"""'], {}), "(im, 'float32')\n", (1090, 1105), True, 'import numpy as np\n'), ((1140, 1154), 'numpy.squeeze', 'np.squeeze', (['im'], {}), '(im)\n', (1150, 1154), True, 'import numpy as np\n'), ((1164, 1191), 'numpy.transpose', 'np.transpose', (['im', '(1, 2, 0)'], {}), '(im, (1, 2, 0))\n', (1176, 1191), True, 'import numpy as np\n'), ((226, 259), 'oneflow.matmul', 'flow.matmul', (['features', 'features_t'], {}), '(features, features_t)\n', (237, 259), True, 'import oneflow as flow\n'), ((1214, 1228), 'numpy.float32', 'np.float32', (['im'], {}), '(im)\n', (1224, 1228), True, 'import numpy as np\n'), ((386, 441), 'oneflow.Tensor', 'flow.Tensor', (['[119.90508914, 113.98250597, 103.85173186]'], {}), '([119.90508914, 113.98250597, 103.85173186])\n', (397, 441), True, 'import oneflow as flow\n'), ((509, 545), 'oneflow.Tensor', 'flow.Tensor', (['[58.393, 57.12, 57.375]'], {}), '([58.393, 57.12, 57.375])\n', (520, 545), True, 'import oneflow as flow\n')] |
import argparse
import cv2
import numpy as np
import oneflow as flow
from flowvision.models import ModelCreator
def load_image(image_path):
im = cv2.imread(image_path)
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
im = np.transpose(im, (2, 0, 1))
im = np.expand_dims(im, axis=0)
return np.ascontiguousarray(im, "float32")
def recover_image(im):
im = np.squeeze(im)
im = np.transpose(im, (1, 2, 0))
im = cv2.cvtColor(np.float32(im), cv2.COLOR_RGB2BGR)
return im.astype(np.uint8)
def stylize(args):
content_image = load_image(args.content_image)
style_model = ModelCreator.create_model(
"neural_style_transfer", pretrained=True, style_model=args.style_model
)
with flow.no_grad():
style_model.to("cuda")
output = style_model(flow.Tensor(content_image).clamp(0, 255).to("cuda"))
cv2.imwrite(args.output_image, recover_image(output.numpy()))
def main():
arg_parser = argparse.ArgumentParser(description="parser for fast-neural-style")
arg_parser.add_argument(
"--content-image",
type=str,
required=True,
help="path to content image you want to stylize",
)
arg_parser.add_argument(
"--style-model",
type=str,
required=True,
default="sketch",
help="path to content image you want to stylize",
)
arg_parser.add_argument(
"--output-image",
type=str,
required=True,
help="path for saving the output image",
)
args = arg_parser.parse_args()
stylize(args)
if __name__ == "__main__":
main()
| [
"oneflow.Tensor",
"oneflow.no_grad"
] | [((152, 174), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (162, 174), False, 'import cv2\n'), ((184, 219), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2RGB'], {}), '(im, cv2.COLOR_BGR2RGB)\n', (196, 219), False, 'import cv2\n'), ((229, 256), 'numpy.transpose', 'np.transpose', (['im', '(2, 0, 1)'], {}), '(im, (2, 0, 1))\n', (241, 256), True, 'import numpy as np\n'), ((266, 292), 'numpy.expand_dims', 'np.expand_dims', (['im'], {'axis': '(0)'}), '(im, axis=0)\n', (280, 292), True, 'import numpy as np\n'), ((304, 339), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['im', '"""float32"""'], {}), "(im, 'float32')\n", (324, 339), True, 'import numpy as np\n'), ((374, 388), 'numpy.squeeze', 'np.squeeze', (['im'], {}), '(im)\n', (384, 388), True, 'import numpy as np\n'), ((398, 425), 'numpy.transpose', 'np.transpose', (['im', '(1, 2, 0)'], {}), '(im, (1, 2, 0))\n', (410, 425), True, 'import numpy as np\n'), ((604, 705), 'flowvision.models.ModelCreator.create_model', 'ModelCreator.create_model', (['"""neural_style_transfer"""'], {'pretrained': '(True)', 'style_model': 'args.style_model'}), "('neural_style_transfer', pretrained=True,\n style_model=args.style_model)\n", (629, 705), False, 'from flowvision.models import ModelCreator\n'), ((951, 1018), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""parser for fast-neural-style"""'}), "(description='parser for fast-neural-style')\n", (974, 1018), False, 'import argparse\n'), ((448, 462), 'numpy.float32', 'np.float32', (['im'], {}), '(im)\n', (458, 462), True, 'import numpy as np\n'), ((725, 739), 'oneflow.no_grad', 'flow.no_grad', ([], {}), '()\n', (737, 739), True, 'import oneflow as flow\n'), ((801, 827), 'oneflow.Tensor', 'flow.Tensor', (['content_image'], {}), '(content_image)\n', (812, 827), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import Optional
import oneflow as flow
from oneflow.framework.tensor import Tensor
from oneflow.nn.module import Module
from oneflow.nn.modules.constant import _ConstantBase
class _Loss(Module):
def __init__(self, reduction: str = "mean") -> None:
super(_Loss, self).__init__()
assert reduction in ["none", "mean", "sum"]
self.reduction = reduction
class _WeightedLoss(_Loss):
def __init__(
self, weight: Optional[Tensor] = None, reduction: str = "mean"
) -> None:
super(_WeightedLoss, self).__init__(reduction=reduction)
self.weight = weight
class L1Loss(_Loss):
"""This operator computes the L1 Loss between each element in `input` and `target`.
The equation is:
if reduction = "none":
.. math::
output = |Target - Input|
if reduction = "mean":
.. math::
output = \\frac{1}{n}\\sum_{i=1}^n|Target_i - Input_i|
if reduction = "sum":
.. math::
output = \\sum_{i=1}^n|Target_i - Input_i|
Args:
input (oneflow.Tensor): the input Tensor.
target (oneflow.Tensor): The target Tensor.
reduction (str): The reduce type, it can be one of "none", "mean", "sum". Defaults to "mean".
Returns:
oneflow.Tensor: The result Tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> input = flow.tensor([[1, 1, 1], [2, 2, 2], [7, 7, 7]], dtype = flow.float32)
>>> target = flow.tensor([[4, 4, 4], [4, 4, 4], [4, 4, 4]], dtype = flow.float32)
>>> m = flow.nn.L1Loss(reduction="none")
>>> out = m(input, target)
>>> out
tensor([[3., 3., 3.],
[2., 2., 2.],
[3., 3., 3.]], dtype=oneflow.float32)
>>> m_mean = flow.nn.L1Loss(reduction="mean")
>>> out = m_mean(input, target)
>>> out
tensor(2.6667, dtype=oneflow.float32)
>>> m_mean = flow.nn.L1Loss(reduction="sum")
>>> out = m_mean(input, target)
>>> out
tensor(24., dtype=oneflow.float32)
"""
def __init__(self, reduction: str = "mean") -> None:
super(L1Loss, self).__init__(reduction)
def forward(self, input: Tensor, target: Tensor) -> Tensor:
return flow._C.l1_loss(input, target, self.reduction)
class CrossEntropyLoss(_WeightedLoss):
"""This criterion combines :class:`~flow.nn.LogSoftmax` and :class:`~flow.nn.NLLLoss` in one single class.
It is useful when training a classification problem with `C` classes.
The `input` is expected to contain raw, unnormalized scores for each class.
`input` has to be a Tensor of size either :math:`(minibatch, C)` or
:math:`(minibatch, C, d_1, d_2, ..., d_K)`
with :math:`K \\geq 1` for the `K`-dimensional case (described later).
This criterion expects a class index in the range :math:`[0, C-1]` as the
`target` for each value of a 1D tensor of size `minibatch`;
The loss can be described as:
.. math::
\\text{loss}(x, class) = -\\log\\left(\\frac{\\exp(x[class])}{\\sum_j \\exp(x[j])}\\right)
= -x[class] + \\log\\left(\\sum_j \\exp(x[j])\\right)
Can also be used for higher dimension inputs, such as 2D images, by providing
an input of size :math:`(minibatch, C, d_1, d_2, ..., d_K)` with :math:`K \\geq 1`,
where :math:`K` is the number of dimensions, and a target of appropriate shape
(see below).
Args:
reduction (string, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will
be applied, ``'mean'``: the weighted mean of the output is taken,
``'sum'``: the output will be summed. Default: ``'mean'``
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> input = flow.tensor(
... [[-0.1664078, -1.7256707, -0.14690138],
... [-0.21474946, 0.53737473, 0.99684894],
... [-1.135804, -0.50371903, 0.7645404]], dtype=flow.float32)
>>> target = flow.tensor(np.array([0, 1, 2]), dtype=flow.int32)
>>> out = flow.nn.CrossEntropyLoss(reduction="none")(input, target)
>>> out
tensor([0.8020, 1.1167, 0.3583], dtype=oneflow.float32)
>>> out_sum = flow.nn.CrossEntropyLoss(reduction="sum")(input, target)
>>> out_sum
tensor(2.2769, dtype=oneflow.float32)
>>> out_mean = flow.nn.CrossEntropyLoss(reduction="mean")(input, target)
>>> out_mean
tensor(0.7590, dtype=oneflow.float32)
"""
def __init__(
self,
weight: Optional[Tensor] = None,
ignore_index: int = -100,
reduction: str = "mean",
) -> None:
super(CrossEntropyLoss, self).__init__(weight, reduction)
self.ignore_index = ignore_index
def forward(self, input, target):
return flow._C.cross_entropy(
input, target, self.weight, self.ignore_index, self.reduction
)
class BCELoss(_WeightedLoss):
"""This operator computes the binary cross entropy loss.
The equation is:
if reduction = "none":
.. math::
out = -(Target_i*log(Input_i) + (1-Target_i)*log(1-Input_i))
if reduction = "mean":
.. math::
out = -\\frac{1}{n}\\sum_{i=1}^n(Target_i*log(Input_i) + (1-Target_i)*log(1-Input_i))
if reduction = "sum":
.. math::
out = -\\sum_{i=1}^n(Target_i*log(Input_i) + (1-Target_i)*log(1-Input_i))
Args:
weight (oneflow.Tensor, optional): The manual rescaling weight to the loss. Default to None, whose corresponding weight value is 1.
reduction (str, optional): The reduce type, it can be one of "none", "mean", "sum". Defaults to "mean".
Attention:
The input value must be in the range of (0, 1). Or the loss function may return `nan` value.
Returns:
oneflow.Tensor: The result Tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> input = flow.Tensor(np.array([[1.2, 0.2, -0.3], [0.7, 0.6, -2]]).astype(np.float32))
>>> target = flow.Tensor(np.array([[0, 1, 0], [1, 0, 1]]).astype(np.float32))
>>> weight = flow.Tensor(np.array([[2, 2, 2], [2, 2, 2]]).astype(np.float32))
>>> activation = flow.nn.Sigmoid()
>>> sigmoid_input = activation(input)
>>> m = flow.nn.BCELoss(weight, reduction="none")
>>> out = m(sigmoid_input, target)
>>> out
tensor([[2.9266, 1.1963, 1.1087],
[0.8064, 2.0750, 4.2539]], dtype=oneflow.float32)
>>> m_sum = flow.nn.BCELoss(weight, reduction="sum")
>>> out = m_sum(sigmoid_input, target)
>>> out
tensor(12.3668, dtype=oneflow.float32)
>>> m_mean = flow.nn.BCELoss(weight, reduction="mean")
>>> out = m_mean(sigmoid_input, target)
>>> out
tensor(2.0611, dtype=oneflow.float32)
>>> m_none = flow.nn.BCELoss()
>>> out = m_none(sigmoid_input, target)
>>> out
tensor(1.0306, dtype=oneflow.float32)
"""
def __init__(
self, weight: Optional[Tensor] = None, reduction: str = "mean"
) -> None:
super(BCELoss, self).__init__(weight, reduction)
def forward(self, input: Tensor, target: Tensor) -> Tensor:
return flow._C.binary_cross_entropy_loss(
input, target, self.weight, self.reduction
)
class NLLLoss(_WeightedLoss):
""" The negative log likelihood loss. It is useful to train a classification
problem with `C` classes.
The `input` given through a forward call is expected to contain
log-probabilities of each class. `input` has to be a Tensor of size either
:math:`(minibatch, C)` or :math:`(minibatch, C, d_1, d_2, ..., d_K)`
with :math:`K \\geq 1` for the `K`-dimensional case (described later).
Obtaining log-probabilities in a neural network is easily achieved by
adding a `LogSoftmax` layer in the last layer of your network.
You may use `CrossEntropyLoss` instead, if you prefer not to add an extra
layer.
The `target` that this loss expects should be a class index in the range :math:`[0, C-1]`
where `C = number of classes`;
The unreduced (i.e. with :attr:`reduction` set to ``'none'``) loss can be described as:
.. math::
\\ell(x, y) = L = \\{l_1,\\dots,l_N\\}^\\top, \\quad
l_n = - w_{y_n} x_{n,y_n}, \\quad
w_{c} = \\mathbb{1},
where :math:`x` is the input, :math:`y` is the target, :math:`w` is the weight, and
:math:`N` is the batch size. If :attr:`reduction` is not ``'none'``
(default ``'mean'``), then
.. math::
\\ell(x, y) = \\begin{cases}
\\sum_{n=1}^N \\frac{1}{N} l_n, &
\\text{if reduction} = \\text{`mean';}\\\\
\\sum_{n=1}^N l_n, &
\\text{if reduction} = \\text{`sum'.}
\\end{cases}
Can also be used for higher dimension inputs, such as 2D images, by providing
an input of size :math:`(minibatch, C, d_1, d_2, ..., d_K)` with :math:`K \\geq 1`,
where :math:`K` is the number of dimensions, and a target of appropriate shape
(see below). In the case of images, it computes NLL loss per-pixel.
Args:
reduction (string, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will
be applied, ``'mean'``: the weighted mean of the output is taken,
``'sum'``: the output will be summed. Default: ``'mean'``
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> input = flow.tensor(
... [[-0.1664078, -1.7256707, -0.14690138],
... [-0.21474946, 0.53737473, 0.99684894],
... [-1.135804, -0.50371903, 0.7645404]], dtype=flow.float32)
>>> target = flow.tensor(np.array([0, 1, 2]), dtype=flow.int32)
>>> m = flow.nn.NLLLoss(reduction="none")
>>> out = m(input, target)
>>> out
tensor([ 0.1664, -0.5374, -0.7645], dtype=oneflow.float32)
>>> m = flow.nn.NLLLoss(reduction="sum")
>>> out = m(input, target)
>>> out
tensor(-1.1355, dtype=oneflow.float32)
>>> m = flow.nn.NLLLoss(reduction="mean")
>>> out = m(input, target)
>>> out
tensor(-0.3785, dtype=oneflow.float32)
"""
def __init__(
self,
weight: Optional[Tensor] = None,
ignore_index: int = -100,
reduction: str = "mean",
) -> None:
super(NLLLoss, self).__init__(weight, reduction)
self.ignore_index = ignore_index
def forward(self, input: Tensor, target: Tensor) -> Tensor:
return flow._C.nll_loss(
input, target, self.weight, self.ignore_index, self.reduction
)
class KLDivLoss(_Loss):
"""The interface is consistent with PyTorch.
The documentation is referenced from:
https://pytorch.org/docs/1.10/generated/torch.nn.KLDivLoss.html.
The Kullback-Leibler divergence loss measure
`Kullback-Leibler divergence`_ is a useful distance measure for continuous
distributions and is often useful when performing direct regression over
the space of (discretely sampled) continuous output distributions.
As with :class:`~torch.nn.NLLLoss`, the `input` given is expected to contain
*log-probabilities* and is not restricted to a 2D Tensor.
The targets are interpreted as *probabilities* by default, but could be considered
as *log-probabilities* with :attr:`log_target` set to ``True``.
This criterion expects a `target` `Tensor` of the same size as the
`input` `Tensor`.
The unreduced (i.e. with :attr:`reduction` set to ``'none'``) loss can be described as:
.. math::
l(x,y) = L = \\{ l_1,\\dots,l_N \\}, \\quad
l_n = y_n \\cdot \\left( \\log y_n - x_n \\right)
where the index :math:`N` spans all dimensions of ``input`` and :math:`L` has the same
shape as ``input``. If :attr:`reduction` is not ``'none'`` (default ``'mean'``), then:
.. math::
\\ell(x, y) = \\begin{cases}
\\operatorname{mean}(L), & \\text{if reduction} = \\text{`mean';} \\\\
\\operatorname{sum}(L), & \\text{if reduction} = \\text{`sum'.}
\\end{cases}
In default :attr:`reduction` mode ``'mean'``, the losses are averaged for each minibatch over observations
**as well as** over dimensions. ``'batchmean'`` mode gives the correct KL divergence where losses
are averaged over batch dimension only. ``'mean'`` mode's behavior will be changed to the same as
``'batchmean'`` in the next major release.
.. _`kullback-leibler divergence`: https://en.wikipedia.org/wiki/Kullback-Leibler_divergence
Args:
reduction (string, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'batchmean'`` | ``'sum'`` | ``'mean'``.
``'none'``: no reduction will be applied.
``'batchmean'``: the sum of the output will be divided by batchsize.
``'sum'``: the output will be summed.
``'mean'``: the output will be divided by the number of elements in the output.
Default: ``'mean'``
log_target (bool, optional): Specifies whether `target` is passed in the log space.
Default: ``False``
.. note::
:attr:`reduction` = ``'mean'`` doesn't return the true kl divergence value, please use
:attr:`reduction` = ``'batchmean'`` which aligns with KL math definition.
In the next major release, ``'mean'`` will be changed to be the same as ``'batchmean'``.
Shape:
- Input: :math:`(N, *)` where :math:`*` means, any number of additional
dimensions
- Target: :math:`(N, *)`, same shape as the input
- Output: scalar by default. If :attr:``reduction`` is ``'none'``, then :math:`(N, *)`,
the same shape as the input
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> input = flow.tensor([-0.9021705, 0.08798598, 1.04686249], dtype=flow.float32)
>>> target = flow.tensor([1.22386942, -0.89729659, 0.01615712], dtype=flow.float32)
>>> m = flow.nn.KLDivLoss(reduction="none", log_target=False)
>>> out = m(input, target)
>>> out
tensor([ 1.3514, 0.0000, -0.0836], dtype=oneflow.float32)
>>> m = flow.nn.KLDivLoss(reduction="mean", log_target=False)
>>> out = m(input, target)
>>> out
tensor(0.4226, dtype=oneflow.float32)
>>> m = flow.nn.KLDivLoss(reduction="sum", log_target=True)
>>> out = m(input, target)
>>> out
tensor(5.7801, dtype=oneflow.float32)
"""
def __init__(self, reduction: str = "mean", log_target: bool = False) -> None:
super(KLDivLoss, self).__init__(reduction)
self.log_target = log_target
def forward(self, input: Tensor, target: Tensor) -> Tensor:
return flow._C.kl_div_loss(input, target, self.log_target, self.reduction)
class MSELoss(_Loss):
"""The interface is consistent with PyTorch.
The documentation is referenced from:
https://pytorch.org/docs/1.10/generated/torch.nn.MSELoss.html.
Creates a criterion that measures the mean squared error (squared L2 norm) between
each element in the input :math:`x` and target :math:`y`.
The unreduced (i.e. with :attr:`reduction` set to ``'none'``) loss can be described as:
.. math::
\\ell(x, y) = L = \\{l_1,\\dots,l_N\\}^\\top, \\quad
l_n = \\left( x_n - y_n \\right)^2,
where :math:`N` is the batch size. If :attr:`reduction` is not ``'none'``
(default ``'mean'``), then:
.. math::
\\ell(x, y) =
\\begin{cases}
\\operatorname{mean}(L), & \\text{if reduction} = \\text{`mean';}\\\\
\\operatorname{sum}(L), & \\text{if reduction} = \\text{`sum'.}
\\end{cases}
:math:`x` and :math:`y` are tensors of arbitrary shapes with a total
of :math:`n` elements each.
The mean operation still operates over all the elements, and divides by :math:`n`.
The division by :math:`n` can be avoided if one sets ``reduction = 'sum'``.
Args:
reduction (string, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Default: ``'mean'``
Shape:
- Input: :math:`(N, *)` where :math:`*` means, any number of additional
dimensions
- Target: :math:`(N, *)`, same shape as the input
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> input = flow.tensor(
... [[-0.02557137, 0.03101675, 1.37493674],
... [0.25599439, -1.08372561, -0.21006816]], dtype=flow.float32)
>>> target = flow.tensor(
... [[-1.53105064, -0.68137555, 0.5931354],
... [-0.49158347, 0.93673637, 0.1324141]], dtype=flow.float32)
>>> m = flow.nn.MSELoss(reduction="none")
>>> out = m(input, target)
>>> out
tensor([[2.2665, 0.5075, 0.6112],
[0.5589, 4.0823, 0.1173]], dtype=oneflow.float32)
>>> m = flow.nn.MSELoss(reduction="mean")
>>> out = m(input, target)
>>> out
tensor(1.3573, dtype=oneflow.float32)
>>> m = flow.nn.MSELoss(reduction="sum")
>>> out = m(input, target)
>>> out
tensor(8.1436, dtype=oneflow.float32)
"""
def __init__(self, reduction: str = "mean") -> None:
super(MSELoss, self).__init__(reduction)
def forward(self, input: Tensor, target: Tensor) -> Tensor:
return flow._C.mse_loss(input, target, self.reduction)
class MarginRankingLoss(_Loss):
"""Creates a criterion that measures the loss given
inputs :math:`x1`, :math:`x2`, two 1D mini-batch `Tensors`,
and a label 1D mini-batch tensor :math:`y` (containing 1 or -1).
If :math:`y = 1` then it assumed the first input should be ranked higher
(have a larger value) than the second input, and vice-versa for :math:`y = -1`.
The loss function for each sample in the mini-batch is:
.. math::
\\text{loss}(x1, x2, y) = \\max(0, -y * (x1 - x2) + \\text{margin})
Args:
margin (float, optional): Has a default value of :math:`0`.
reduction (string, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Default: ``'mean'``
Shape:
- `x1` : :math:`(N, D)` where `N` is the batch size and `D` is the size of a sample.
- `x2` : :math:`(N, D)` where `N` is the batch size and `D` is the size of a sample.
- Target: :math:`(N)`
- Output: scalar. If :attr:`reduction` is ``'none'``, then :math:`(N)`.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> x1 = flow.tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), dtype=flow.float32)
>>> x2 = flow.tensor(np.array([[2, 2, 2], [2, 2, 2], [2, 2, 2]]), dtype=flow.float32)
>>> target = flow.tensor(np.array([[1, -1, 1],[-1, 1, -1], [1, 1, 1]]), dtype=flow.float32)
>>> m = flow.nn.MarginRankingLoss(margin =1.0, reduction="none")
>>> out = m(x1, x2, target)
>>> out
tensor([[2., 1., 0.],
[3., 0., 5.],
[0., 0., 0.]], dtype=oneflow.float32)
>>> m = flow.nn.MarginRankingLoss(margin = 0.3, reduction="sum")
>>> out = m(x1, x2, target)
>>> out
tensor(8.2000, dtype=oneflow.float32)
>>> m = flow.nn.MarginRankingLoss(margin = 10, reduction="mean")
>>> out = m(x1, x2, target)
>>> out
tensor(8.3333, dtype=oneflow.float32)
"""
def __init__(self, margin: float = 0.0, reduction: str = "mean") -> None:
super(MarginRankingLoss, self).__init__(reduction)
self.margin = margin
def forward(self, input1: Tensor, input2: Tensor, target: Tensor) -> Tensor:
return flow._C.margin_ranking_loss(
input1, input2, target, self.margin, self.reduction
)
class CTCLoss(_Loss):
"""The Connectionist Temporal Classification loss.
The interface is consistent with PyTorch.
The documentation is referenced from:
https://pytorch.org/docs/1.10/generated/torch.nn.CTCLoss.html.
Calculates loss between a continuous (unsegmented) time series and a target sequence. CTCLoss sums over the
probability of possible alignments of input to target, producing a loss value which is differentiable
with respect to each input node. The alignment of input to target is assumed to be "many-to-one", which
limits the length of the target sequence such that it must be :math:`\\leq` the input length.
Args:
blank (int, optional): blank label. Default :math:`0`.
reduction (string, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the output losses will be divided by the target lengths and
then the mean over the batch is taken. Default: ``'mean'``
zero_infinity (bool, optional):
Whether to zero infinite losses and the associated gradients.
Default: ``False``
Infinite losses mainly occur when the inputs are too short
to be aligned to the targets.
Shape:
- Log_probs: Tensor of size :math:`(T, N, C)`,
where :math:`T = \\text{input length}`,
:math:`N = \\text{batch size}`, and
:math:`C = \\text{number of classes (including blank)}`.
- Targets: Tensor of size :math:`(N, S)` or
:math:`(\\operatorname{sum}(\\text{target_lengths}))`,
where :math:`N = \\text{batch size}` and
:math:`S = \\text{max target length, if shape is } (N, S)`.
It represent the target sequences. Each element in the target
sequence is a class index. And the target index cannot be blank (default=0).
In the :math:`(N, S)` form, targets are padded to the
length of the longest sequence, and stacked.
In the :math:`(\\operatorname{sum}(\\text{target_lengths}))` form,
the targets are assumed to be un-padded and
concatenated within 1 dimension.
- Input_lengths: Tuple or tensor of size :math:`(N)`,
where :math:`N = \\text{batch size}`. It represent the lengths of the
inputs (must each be :math:`\\leq T`). And the lengths are specified
for each sequence to achieve masking under the assumption that sequences
are padded to equal lengths.
- Target_lengths: Tuple or tensor of size :math:`(N)`,
where :math:`N = \\text{batch size}`. It represent lengths of the targets.
Lengths are specified for each sequence to achieve masking under the
assumption that sequences are padded to equal lengths. If target shape is
:math:`(N,S)`, target_lengths are effectively the stop index
:math:`s_n` for each target sequence, such that ``target_n = targets[n,0:s_n]`` for
each target in a batch. Lengths must each be :math:`\\leq S`
If the targets are given as a 1d tensor that is the concatenation of individual
targets, the target_lengths must add up to the total length of the tensor.
Reference:
<NAME> et al.: Connectionist Temporal Classification:
Labelling Unsegmented Sequence Data with Recurrent Neural Networks:
https://www.cs.toronto.edu/~graves/icml_2006.pdf
For example:
.. code-block:: python
>>> import oneflow as flow
>>> log_probs = flow.tensor(
... [
... [[-1.1031, -0.7998, -1.5200], [-0.9808, -1.1363, -1.1908]],
... [[-1.2258, -1.0665, -1.0153], [-1.1135, -1.2331, -0.9671]],
... [[-1.3348, -0.6611, -1.5118], [-0.9823, -1.2355, -1.0941]],
... [[-1.3850, -1.3273, -0.7247], [-0.8235, -1.4783, -1.0994]],
... [[-0.9049, -0.8867, -1.6962], [-1.4938, -1.3630, -0.6547]],
... ], dtype=flow.float32)
>>> targets = flow.tensor([[1, 2, 2], [1, 2, 2]], dtype=flow.int32)
>>> input_lengths = flow.tensor([5, 5], dtype=flow.int32)
>>> target_lengths = flow.tensor([3, 3], dtype=flow.int32)
>>> loss_mean = flow.nn.CTCLoss()
>>> out = loss_mean(log_probs, targets, input_lengths, target_lengths)
>>> out
tensor(1.1376, dtype=oneflow.float32)
>>> loss_sum = flow.nn.CTCLoss(blank=0, reduction="sum")
>>> out = loss_sum(log_probs, targets, input_lengths, target_lengths)
>>> out
tensor(6.8257, dtype=oneflow.float32)
"""
def __init__(
self, blank: int = 0, reduction: str = "mean", zero_infinity: bool = False
) -> None:
super(CTCLoss, self).__init__(reduction)
self.blank = blank
self.zero_infinity = zero_infinity
def forward(
self,
log_probs: Tensor,
targets: Tensor,
input_lengths: Tensor,
target_lengths: Tensor,
) -> Tensor:
max_target_length = 0
if targets.ndim == 1:
max_target_length = target_lengths.max().item()
elif targets.ndim == 2:
max_target_length = targets.shape[1]
return flow._C.ctc_loss(
log_probs,
targets,
input_lengths,
target_lengths,
max_target_length,
self.blank,
self.zero_infinity,
self.reduction,
)
class BCEWithLogitsLoss(_WeightedLoss):
"""This operator combines the `Sigmoid` and `BCELoss` together. For numerical stability,
we apply some math tricks instead of using `Sigmoid` layer with `BCELoss`.
The equation is:
if :attr:`reduction` = ``"none"``:
.. math::
out = -weight*[Pos\\_weight*y*log\\sigma({x}) + (1-y)*log(1-\\sigma(x))]
if :attr:`reduction` = ``"mean"``:
.. math::
out = -\\frac{weight}{n}\\sum_{i=1}^n[Pos\\_weight*y*log\\sigma({x}) + (1-y)*log(1-\\sigma(x))]
if :attr:`reduction` = ``"sum"``:
.. math::
out = -weight*\\sum_{i=1}^n[Pos\\_weight*y*log\\sigma({x}) + (1-y)*log(1-\\sigma(x))]
Args:
weight (Tensor, optional): The manual rescaling weight to the loss. Default: ``None``
size_average (bool, optional): Deprecated (see :attr:`reduction`). Default: ``True``
reduce (bool, optional): Deprecated (see :attr:`reduction`). Default: ``True``
reduction (str, optional): The reduce type, it can be one of ``"none"``, ``"mean"``, ``"sum"``.
``'none'``: no reduction will be applied, ``'mean'``: the sum of the output will be divided
by the number of elements in the output, ``'sum'``: the output will be summed. Default: ``"mean"``
pos_weight (Tensor, optional): The manual rescaling weight to the positive examples.
Default: ``None``
Shape:
- Input: :math:`(N,*)` where `*` means, any number of additional dimensions
- Target: :math:`(N,*)`, same shape as the input
- Output: scalar. If :attr:`reduction` is ``"none"``, then :math:`(N,*)`, same shape as input.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> input = flow.tensor([[1.2, 0.2, -0.3], [0.7, 0.6, -2], [0.7, 0.6, -2]], dtype=flow.float32)
>>> target = flow.tensor([[0, 1, 0], [1, 0, 1], [1, 0, 1]], dtype=flow.float32)
>>> weight = flow.tensor([[2, 2, 2], [2, 2, 2], [2, 2, 2]], dtype=flow.float32)
>>> pos_weight = flow.tensor([1.2, 1.3, 1.4], dtype=flow.float32)
>>> m = flow.nn.BCEWithLogitsLoss(weight=weight, pos_weight=pos_weight, reduction="none")
>>> out = m(input, target)
>>> out
tensor([[2.9266, 1.5552, 1.1087],
[0.9676, 2.0750, 5.9554],
[0.9676, 2.0750, 5.9554]], dtype=oneflow.float32)
>>> m = flow.nn.BCEWithLogitsLoss(weight=weight, pos_weight=pos_weight, reduction="mean")
>>> out = m(input, target)
>>> out
tensor(2.6207, dtype=oneflow.float32)
>>> m = flow.nn.BCEWithLogitsLoss(weight=weight, pos_weight=pos_weight, reduction="sum")
>>> out = m(input, target)
>>> out
tensor(23.5865, dtype=oneflow.float32)
"""
def __init__(
self,
weight: Optional[Tensor] = None,
reduction: str = "mean",
pos_weight: Optional[Tensor] = None,
) -> None:
super(BCEWithLogitsLoss, self).__init__(weight, reduction)
self.reduction = reduction
self.pos_weight = pos_weight
def forward(self, input: Tensor, target: Tensor) -> Tensor:
return flow._C.binary_cross_entropy_with_logits_loss(
input, target, self.weight, self.pos_weight, self.reduction
)
class SmoothL1Loss(_Loss):
"""Creates a criterion that uses a squared term if the absolute
element-wise error falls below beta and an L1 term otherwise.
The interface is consistent with PyTorch.
The documentation is referenced from:
https://pytorch.org/docs/1.10/generated/torch.nn.SmoothL1Loss.html.
It is less sensitive to outliers than :class:`torch.nn.MSELoss` and in some cases
prevents exploding gradients (e.g. see the paper `Fast R-CNN <https://openaccess.thecvf.com/content_iccv_2015/papers/Girshick_Fast_R-CNN_ICCV_2015_paper.pdf>`__ by <NAME>)..
For a batch of size :math:`N`, the unreduced loss can be described as:
.. math::
\\ell(x, y) = L = \\{l_1, ..., l_N\\}^T
with
.. math::
l_n = \\begin{cases}
0.5 (x_n - y_n)^2 / beta, & \\text{if } |x_n - y_n| < beta \\\\
|x_n - y_n| - 0.5 * beta, & \\text{otherwise }
\\end{cases}
If `reduction` is not `none`, then:
.. math::
\\ell(x, y) =
\\begin{cases}
\\operatorname{mean}(L), & \\text{if reduction} = \\text{`mean';}\\\\
\\operatorname{sum}(L), & \\text{if reduction} = \\text{`sum'.}
\\end{cases}
.. note::
Smooth L1 loss can be seen as exactly :class:`L1Loss`, but with the :math:`|x - y| < beta`
portion replaced with a quadratic function such that its slope is 1 at :math:`|x - y| = beta`.
The quadratic segment smooths the L1 loss near :math:`|x - y| = 0`.
.. note::
Smooth L1 loss is closely related to :class:`HuberLoss`, being
equivalent to :math:`huber(x, y) / beta` (note that Smooth L1's beta hyper-parameter is
also known as delta for Huber). This leads to the following differences:
* As beta -> 0, Smooth L1 loss converges to :class:`L1Loss`, while :class:`HuberLoss`
converges to a constant 0 loss.
* As beta -> :math:`+\\infty`, Smooth L1 loss converges to a constant 0 loss, while
:class:`HuberLoss` converges to :class:`MSELoss`.
* For Smooth L1 loss, as beta varies, the L1 segment of the loss has a constant slope of 1.
For :class:`HuberLoss`, the slope of the L1 segment is beta.
Args:
size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,
the losses are averaged over each loss element in the batch. Note that for
some losses, there are multiple elements per sample. If the field :attr:`size_average`
is set to ``False``, the losses are instead summed for each minibatch. Ignored
when :attr:`reduce` is ``False``. Default: ``True``
reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the
losses are averaged or summed over observations for each minibatch depending
on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per
batch element instead and ignores :attr:`size_average`. Default: ``True``
reduction (string, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
and :attr:`reduce` are in the process of being deprecated, and in the meantime,
specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
beta (float, optional): Specifies the threshold at which to change between L1 and L2 loss.
The value must be non-negative. Default: 1.0
Shape:
- Input: :math:`(N, *)` where :math:`*` means any number of additional dimensions
- Target: :math:`(N, *)`; same shape as the input
- Output: scalar. If :attr:`reduction` is ``'none'``, then :math:`(N, *)`; same shape as the input
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> x = flow.tensor(np.array([0.1, 0.4, 0.3, 0.5, 0.9]).astype(np.float32), dtype=flow.float32)
>>> y = flow.tensor(np.array([0.3, 0.9, 2.5, 0.4, 0.3]).astype(np.float32), dtype=flow.float32)
>>> m = flow.nn.SmoothL1Loss(reduction="none")
>>> out = m(x, y)
>>> out
tensor([0.0200, 0.1250, 1.7000, 0.0050, 0.1800], dtype=oneflow.float32)
>>> m = flow.nn.SmoothL1Loss(reduction="mean")
>>> out = m(x, y)
>>> out
tensor(0.4060, dtype=oneflow.float32)
>>> m = flow.nn.SmoothL1Loss(reduction="sum")
>>> out = m(x, y)
>>> out
tensor(2.0300, dtype=oneflow.float32)
"""
def __init__(self, reduction: str = "mean", beta: float = 1.0) -> None:
super(SmoothL1Loss, self).__init__(reduction)
self.beta = beta
def forward(self, input: Tensor, target: Tensor) -> Tensor:
return flow._C.smooth_l1_loss(input, target, self.beta, self.reduction)
class CombinedMarginLoss(Module):
"""The operation implements "margin_softmax" in InsightFace:
https://github.com/deepinsight/insightface/blob/master/recognition/arcface_mxnet/train.py
The implementation of margin_softmax in InsightFace is composed of multiple operators.
We fuse them for speed up.
Args:
x (oneflow.Tensor): A Tensor
label (oneflow.Tensor): label with integer data type
m1 (float): loss m1 parameter
m2 (float): loss m2 parameter
m3 (float): loss m3 parameter
Returns:
oneflow.Tensor: A Tensor
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow as flow
>>> np_x = np.array([[-0.7027179, 0.0230609], [-0.02721931, -0.16056311], [-0.4565852, -0.64471215]])
>>> np_label = np.array([0, 1, 1])
>>> x = flow.tensor(np_x, dtype=flow.float32)
>>> label = flow.tensor(np_label, dtype=flow.int32)
>>> loss_func = flow.nn.CombinedMarginLoss(0.3, 0.5, 0.4)
>>> out = loss_func(x, label)
>>> out
tensor([[-0.0423, 0.0231],
[-0.0272, 0.1237],
[-0.4566, -0.0204]], dtype=oneflow.float32)
"""
def __init__(self, m1: float = 1.0, m2: float = 0.0, m3: float = 0.0) -> None:
super().__init__()
self.m1 = m1
self.m2 = m2
self.m3 = m3
def forward(self, x: Tensor, label: Tensor) -> Tensor:
return flow._C.combined_margin_loss(
x, label, m1=self.m1, m2=self.m2, m3=self.m3
)
class TripletMarginLoss(Module):
r"""Creates a criterion that measures the triplet loss given an input
tensors :math:`x1`, :math:`x2`, :math:`x3` and a margin with a value greater than :math:`0`.
This is used for measuring a relative similarity between samples. A triplet
is composed by `a`, `p` and `n` (i.e., `anchor`, `positive examples` and `negative
examples` respectively). The shapes of all input tensors should be
:math:`(N, D)`.
The distance swap is described in detail in the paper `Learning shallow
convolutional feature descriptors with triplet losses <http://www.bmva.org/bmvc/2016/papers/paper119/index.html>`__ by
<NAME>, <NAME> et al.
The loss function for each sample in the mini-batch is:
.. math::
L(a, p, n) = \max \{d(a_i, p_i) - d(a_i, n_i) + {\rm margin}, 0\}
where
.. math::
d(x_i, y_i) = \left\lVert {\bf x}_i - {\bf y}_i \right\rVert_p
Args:
margin (float, optional): Default: :math:`1`.
p (float, optional): The norm degree for pairwise distance. Default: :math:`2.0`.
swap (bool, optional): The distance swap is described in detail in the paper
`Learning shallow convolutional feature descriptors with triplet losses` by
<NAME>, <NAME> et al. Default: ``False``.
reduction (string, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
and :attr:`reduce` are in the process of being deprecated, and in the meantime,
specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
Shape:
- Input: :math:`(N, D)` where :math:`D` is the vector dimension.
- Output: A Tensor of shape :math:`(N)` if :attr:`reduction` is ``'none'``, or a scalar
otherwise.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> triplet_loss = flow.nn.TripletMarginLoss(margin=1.0, p=2)
>>> anchor = np.array([[1, -1, 1],[-1, 1, -1], [1, 1, 1]])
>>> positive = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> negative = np.array([[2, 2, 2], [2, 2, 2], [2, 2, 2]])
>>> output = triplet_loss(flow.Tensor(anchor), flow.Tensor(positive), flow.Tensor(negative))
>>> output
tensor(6.2971, dtype=oneflow.float32)
"""
def __init__(
self,
margin: float = 1.0,
p: float = 2.0,
eps: float = 1e-6,
swap: bool = False,
size_average=None,
reduce=None,
reduction: str = "mean",
) -> None:
super().__init__()
self.margin = margin
self.p = p
self.eps = eps
self.swap = swap
self.reduction = reduction
def forward(self, anchor, positive, negative):
triplet_loss = flow._C.triplet_margin_loss(
anchor,
positive,
negative,
margin=self.margin,
p=self.p,
eps=self.eps,
swap=self.swap,
reduction=self.reduction,
)
return triplet_loss
if __name__ == "__main__":
import doctest
doctest.testmod(raise_on_error=True)
| [
"oneflow._C.nll_loss",
"oneflow._C.cross_entropy",
"oneflow._C.mse_loss",
"oneflow._C.margin_ranking_loss",
"oneflow._C.combined_margin_loss",
"oneflow._C.kl_div_loss",
"oneflow._C.smooth_l1_loss",
"oneflow._C.triplet_margin_loss",
"oneflow._C.binary_cross_entropy_loss",
"oneflow._C.ctc_loss",
"... | [((40404, 40440), 'doctest.testmod', 'doctest.testmod', ([], {'raise_on_error': '(True)'}), '(raise_on_error=True)\n', (40419, 40440), False, 'import doctest\n'), ((2902, 2948), 'oneflow._C.l1_loss', 'flow._C.l1_loss', (['input', 'target', 'self.reduction'], {}), '(input, target, self.reduction)\n', (2917, 2948), True, 'import oneflow as flow\n'), ((5608, 5697), 'oneflow._C.cross_entropy', 'flow._C.cross_entropy', (['input', 'target', 'self.weight', 'self.ignore_index', 'self.reduction'], {}), '(input, target, self.weight, self.ignore_index, self.\n reduction)\n', (5629, 5697), True, 'import oneflow as flow\n'), ((8083, 8160), 'oneflow._C.binary_cross_entropy_loss', 'flow._C.binary_cross_entropy_loss', (['input', 'target', 'self.weight', 'self.reduction'], {}), '(input, target, self.weight, self.reduction)\n', (8116, 8160), True, 'import oneflow as flow\n'), ((11525, 11604), 'oneflow._C.nll_loss', 'flow._C.nll_loss', (['input', 'target', 'self.weight', 'self.ignore_index', 'self.reduction'], {}), '(input, target, self.weight, self.ignore_index, self.reduction)\n', (11541, 11604), True, 'import oneflow as flow\n'), ((15845, 15912), 'oneflow._C.kl_div_loss', 'flow._C.kl_div_loss', (['input', 'target', 'self.log_target', 'self.reduction'], {}), '(input, target, self.log_target, self.reduction)\n', (15864, 15912), True, 'import oneflow as flow\n'), ((18744, 18791), 'oneflow._C.mse_loss', 'flow._C.mse_loss', (['input', 'target', 'self.reduction'], {}), '(input, target, self.reduction)\n', (18760, 18791), True, 'import oneflow as flow\n'), ((21324, 21409), 'oneflow._C.margin_ranking_loss', 'flow._C.margin_ranking_loss', (['input1', 'input2', 'target', 'self.margin', 'self.reduction'], {}), '(input1, input2, target, self.margin, self.reduction\n )\n', (21351, 21409), True, 'import oneflow as flow\n'), ((26746, 26884), 'oneflow._C.ctc_loss', 'flow._C.ctc_loss', (['log_probs', 'targets', 'input_lengths', 'target_lengths', 'max_target_length', 'self.blank', 'self.zero_infinity', 'self.reduction'], {}), '(log_probs, targets, input_lengths, target_lengths,\n max_target_length, self.blank, self.zero_infinity, self.reduction)\n', (26762, 26884), True, 'import oneflow as flow\n'), ((30177, 30287), 'oneflow._C.binary_cross_entropy_with_logits_loss', 'flow._C.binary_cross_entropy_with_logits_loss', (['input', 'target', 'self.weight', 'self.pos_weight', 'self.reduction'], {}), '(input, target, self.weight,\n self.pos_weight, self.reduction)\n', (30222, 30287), True, 'import oneflow as flow\n'), ((35334, 35398), 'oneflow._C.smooth_l1_loss', 'flow._C.smooth_l1_loss', (['input', 'target', 'self.beta', 'self.reduction'], {}), '(input, target, self.beta, self.reduction)\n', (35356, 35398), True, 'import oneflow as flow\n'), ((36876, 36950), 'oneflow._C.combined_margin_loss', 'flow._C.combined_margin_loss', (['x', 'label'], {'m1': 'self.m1', 'm2': 'self.m2', 'm3': 'self.m3'}), '(x, label, m1=self.m1, m2=self.m2, m3=self.m3)\n', (36904, 36950), True, 'import oneflow as flow\n'), ((40074, 40219), 'oneflow._C.triplet_margin_loss', 'flow._C.triplet_margin_loss', (['anchor', 'positive', 'negative'], {'margin': 'self.margin', 'p': 'self.p', 'eps': 'self.eps', 'swap': 'self.swap', 'reduction': 'self.reduction'}), '(anchor, positive, negative, margin=self.margin,\n p=self.p, eps=self.eps, swap=self.swap, reduction=self.reduction)\n', (40101, 40219), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow as flow
import oneflow.typing as oft
import numpy as np
from typing import Tuple, Dict
def test_annotation_return_None(test_case):
flow.config.gpu_device_num(1)
@flow.global_function()
def foo(x: oft.Numpy.Placeholder((10,))) -> None:
pass
data = np.ones((10,), dtype=np.float32)
test_case.assertTrue(foo(data) is None)
def test_annotation_Numpy(test_case):
flow.config.gpu_device_num(1)
@flow.global_function()
def foo(x: oft.Numpy.Placeholder((10,))) -> oft.Numpy:
return x
data = np.ones((10,), dtype=np.float32)
test_case.assertTrue(np.array_equal(foo(data), data))
def test_annotation_ListNumpy(test_case):
flow.config.gpu_device_num(1)
func_config = flow.FunctionConfig()
func_config.default_logical_view(flow.scope.mirrored_view())
@flow.global_function(function_config=func_config)
def foo(x: oft.ListNumpy.Placeholder((10,))) -> oft.ListNumpy:
return x
data = np.ones((10,), dtype=np.float32)
test_case.assertTrue(np.array_equal(foo([data])[0], data))
def test_annotation_ListListNumpy(test_case):
flow.config.gpu_device_num(1)
func_config = flow.FunctionConfig()
func_config.default_logical_view(flow.scope.mirrored_view())
@flow.global_function(function_config=func_config)
def foo(x: oft.ListListNumpy.Placeholder((10,))) -> oft.ListListNumpy:
return x
data = np.ones((10,), dtype=np.float32)
test_case.assertTrue(np.array_equal(foo([[data]])[0][0], data))
def test_annotation_watch_Numpy(test_case):
data = np.ones((10,), dtype=np.float32)
def Watch(x: oft.Numpy):
test_case.assertTrue(np.array_equal(x, data))
flow.config.gpu_device_num(1)
@flow.global_function()
def foo(x: oft.Numpy.Placeholder((10,))) -> oft.Numpy:
flow.watch(x, Watch)
return x
foo(data)
def test_annotation_watch_ListNumpy(test_case):
data = np.ones((10,), dtype=np.float32)
def Watch(x: oft.ListNumpy):
test_case.assertTrue(np.array_equal(x[0], data))
flow.config.gpu_device_num(1)
func_config = flow.FunctionConfig()
func_config.default_logical_view(flow.scope.mirrored_view())
@flow.global_function(function_config=func_config)
def foo(x: oft.ListNumpy.Placeholder((10,))) -> oft.ListNumpy:
flow.watch(x, Watch)
return x
foo([data])
def test_annotation_watch_ListListNumpy(test_case):
# TODO(lixinqi): fixed bugs
return
data = np.ones((10,), dtype=np.float32)
def Watch(x: oft.ListListNumpy):
test_case.assertTrue(np.array_equal(x[0][0], data))
flow.config.gpu_device_num(1)
func_config = flow.FunctionConfig()
func_config.default_logical_view(flow.scope.mirrored_view())
@flow.global_function(function_config=func_config)
def foo(x: oft.ListListNumpy.Placeholder((10,))) -> oft.ListListNumpy:
flow.watch(x, Watch)
return x
foo([[data]])
def test_annotation_Dict_Numpy(test_case):
flow.config.gpu_device_num(1)
@flow.global_function()
def foo(x: oft.Numpy.Placeholder((10,))) -> Dict[str, oft.Numpy]:
return {"x": x}
data = np.ones((10,), dtype=np.float32)
test_case.assertTrue(np.array_equal(foo(data)["x"], data))
def test_annotation_Dict_ListNumpy(test_case):
flow.config.gpu_device_num(1)
func_config = flow.FunctionConfig()
func_config.default_logical_view(flow.scope.mirrored_view())
@flow.global_function(function_config=func_config)
def foo(x: oft.ListNumpy.Placeholder((10,))) -> Dict[str, oft.ListNumpy]:
return {"x": x}
data = np.ones((10,), dtype=np.float32)
test_case.assertTrue(np.array_equal(foo([data])["x"][0], data))
def test_annotation_Dict_ListListNumpy(test_case):
flow.config.gpu_device_num(1)
func_config = flow.FunctionConfig()
func_config.default_logical_view(flow.scope.mirrored_view())
@flow.global_function(function_config=func_config)
def foo(x: oft.ListListNumpy.Placeholder((10,))) -> Dict[str, oft.ListListNumpy]:
return {"x": x}
data = np.ones((10,), dtype=np.float32)
test_case.assertTrue(np.array_equal(foo([[data]])["x"][0][0], data))
def test_annotation_Dict_Nesting_Numpy(test_case):
flow.config.gpu_device_num(1)
@flow.global_function()
def foo(x: oft.Numpy.Placeholder((10,))) -> Dict[str, Dict[str, oft.Numpy]]:
return {"x": {"x": x}}
data = np.ones((10,), dtype=np.float32)
test_case.assertTrue(np.array_equal(foo(data)["x"]["x"], data))
def test_annotation_Dict_Nesting_ListNumpy(test_case):
flow.config.gpu_device_num(1)
func_config = flow.FunctionConfig()
func_config.default_logical_view(flow.scope.mirrored_view())
@flow.global_function(function_config=func_config)
def foo(x: oft.ListNumpy.Placeholder((10,))) -> Dict[str, Dict[str, oft.ListNumpy]]:
return {"x": {"x": x}}
data = np.ones((10,), dtype=np.float32)
test_case.assertTrue(np.array_equal(foo([data])["x"]["x"][0], data))
def test_annotation_Dict_Nesting_ListListNumpy(test_case):
flow.config.gpu_device_num(1)
func_config = flow.FunctionConfig()
func_config.default_logical_view(flow.scope.mirrored_view())
@flow.global_function(function_config=func_config)
def foo(
x: oft.ListListNumpy.Placeholder((10,))
) -> Dict[str, Dict[str, oft.ListListNumpy]]:
return {"x": {"x": x}}
data = np.ones((10,), dtype=np.float32)
test_case.assertTrue(np.array_equal(foo([[data]])["x"]["x"][0][0], data))
def test_annotation_Tuple_Numpy(test_case):
flow.config.gpu_device_num(1)
@flow.global_function()
def foo(x: Tuple[oft.Numpy.Placeholder((10,))]) -> Tuple[oft.Numpy]:
return x
data = np.ones((10,), dtype=np.float32)
test_case.assertTrue(np.array_equal(foo((data,))[0], data))
def test_annotation_Tuple_ListNumpy(test_case):
flow.config.gpu_device_num(1)
func_config = flow.FunctionConfig()
func_config.default_logical_view(flow.scope.mirrored_view())
@flow.global_function(function_config=func_config)
def foo(x: Tuple[oft.ListNumpy.Placeholder((10,))]) -> Tuple[oft.ListNumpy]:
return x
data = np.ones((10,), dtype=np.float32)
test_case.assertTrue(np.array_equal(foo(([data],))[0][0], data))
def test_annotation_Tuple_ListListNumpy(test_case):
flow.config.gpu_device_num(1)
func_config = flow.FunctionConfig()
func_config.default_logical_view(flow.scope.mirrored_view())
@flow.global_function(function_config=func_config)
def foo(x: Tuple[oft.ListListNumpy.Placeholder((10,))]) -> Tuple[oft.ListListNumpy]:
return x
data = np.ones((10,), dtype=np.float32)
test_case.assertTrue(np.array_equal(foo(([[data]],))[0][0][0], data))
def test_annotation_Callback_Numpy(test_case):
data = np.ones((10,), dtype=np.float32)
def Test(x: oft.Numpy):
test_case.assertTrue(np.array_equal(x, data))
flow.config.gpu_device_num(1)
@flow.global_function()
def foo(x: oft.Numpy.Placeholder((10,))) -> oft.Callback[oft.Numpy]:
return x
foo(data)(Test)
def test_annotation_Callback_ListNumpy(test_case):
data = np.ones((10,), dtype=np.float32)
def Test(x: oft.ListNumpy):
test_case.assertTrue(np.array_equal(x[0], data))
flow.config.gpu_device_num(1)
func_config = flow.FunctionConfig()
func_config.default_logical_view(flow.scope.mirrored_view())
@flow.global_function(function_config=func_config)
def foo(x: oft.ListNumpy.Placeholder((10,))) -> oft.Callback[oft.ListNumpy]:
return x
foo([data])(Test)
def test_annotation_Callback_ListListNumpy(test_case):
data = np.ones((10,), dtype=np.float32)
def Test(x: oft.ListListNumpy):
test_case.assertTrue(np.array_equal(x[0][0], data))
flow.config.gpu_device_num(1)
func_config = flow.FunctionConfig()
func_config.default_logical_view(flow.scope.mirrored_view())
@flow.global_function(function_config=func_config)
def foo(x: oft.ListListNumpy.Placeholder((10,))) -> oft.Callback[oft.ListListNumpy]:
return x
foo([[data]])(Test)
def test_annotation_Callback_Tuple_Numpy(test_case):
data = np.ones((10,), dtype=np.float32)
def Test(x: Tuple[oft.Numpy]):
test_case.assertTrue(np.array_equal(x[0], data))
flow.config.gpu_device_num(1)
func_config = flow.FunctionConfig()
func_config.default_logical_view(flow.scope.mirrored_view())
@flow.global_function(function_config=func_config)
def foo(x: oft.Numpy.Placeholder((10,))) -> oft.Callback[Tuple[oft.Numpy]]:
return (x,)
foo(data)(Test)
def test_annotation_Callback_Tuple_ListNumpy(test_case):
data = np.ones((10,), dtype=np.float32)
def Test(x: Tuple[oft.ListNumpy]):
test_case.assertTrue(np.array_equal(x[0][0], data))
flow.config.gpu_device_num(1)
func_config = flow.FunctionConfig()
func_config.default_logical_view(flow.scope.mirrored_view())
@flow.global_function(function_config=func_config)
def foo(x: oft.ListNumpy.Placeholder((10,))) -> oft.Callback[Tuple[oft.ListNumpy]]:
return (x,)
foo([data])(Test)
def test_annotation_Callback_Tuple_ListListNumpy(test_case):
data = np.ones((10,), dtype=np.float32)
def Test(x: Tuple[oft.ListListNumpy]):
test_case.assertTrue(np.array_equal(x[0][0][0], data))
flow.config.gpu_device_num(1)
func_config = flow.FunctionConfig()
func_config.default_logical_view(flow.scope.mirrored_view())
@flow.global_function(function_config=func_config)
def foo(
x: oft.ListListNumpy.Placeholder((10,))
) -> oft.Callback[Tuple[oft.ListListNumpy]]:
return (x,)
foo([[data]])(Test)
| [
"oneflow.global_function",
"oneflow.typing.Numpy.Placeholder",
"oneflow.FunctionConfig",
"oneflow.watch",
"oneflow.typing.ListListNumpy.Placeholder",
"oneflow.config.gpu_device_num",
"oneflow.typing.ListNumpy.Placeholder",
"oneflow.scope.mirrored_view"
] | [((742, 771), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['(1)'], {}), '(1)\n', (768, 771), True, 'import oneflow as flow\n'), ((778, 800), 'oneflow.global_function', 'flow.global_function', ([], {}), '()\n', (798, 800), True, 'import oneflow as flow\n'), ((880, 912), 'numpy.ones', 'np.ones', (['(10,)'], {'dtype': 'np.float32'}), '((10,), dtype=np.float32)\n', (887, 912), True, 'import numpy as np\n'), ((1001, 1030), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['(1)'], {}), '(1)\n', (1027, 1030), True, 'import oneflow as flow\n'), ((1037, 1059), 'oneflow.global_function', 'flow.global_function', ([], {}), '()\n', (1057, 1059), True, 'import oneflow as flow\n'), ((1148, 1180), 'numpy.ones', 'np.ones', (['(10,)'], {'dtype': 'np.float32'}), '((10,), dtype=np.float32)\n', (1155, 1180), True, 'import numpy as np\n'), ((1287, 1316), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['(1)'], {}), '(1)\n', (1313, 1316), True, 'import oneflow as flow\n'), ((1336, 1357), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (1355, 1357), True, 'import oneflow as flow\n'), ((1429, 1478), 'oneflow.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (1449, 1478), True, 'import oneflow as flow\n'), ((1575, 1607), 'numpy.ones', 'np.ones', (['(10,)'], {'dtype': 'np.float32'}), '((10,), dtype=np.float32)\n', (1582, 1607), True, 'import numpy as np\n'), ((1723, 1752), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['(1)'], {}), '(1)\n', (1749, 1752), True, 'import oneflow as flow\n'), ((1772, 1793), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (1791, 1793), True, 'import oneflow as flow\n'), ((1865, 1914), 'oneflow.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (1885, 1914), True, 'import oneflow as flow\n'), ((2019, 2051), 'numpy.ones', 'np.ones', (['(10,)'], {'dtype': 'np.float32'}), '((10,), dtype=np.float32)\n', (2026, 2051), True, 'import numpy as np\n'), ((2177, 2209), 'numpy.ones', 'np.ones', (['(10,)'], {'dtype': 'np.float32'}), '((10,), dtype=np.float32)\n', (2184, 2209), True, 'import numpy as np\n'), ((2299, 2328), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['(1)'], {}), '(1)\n', (2325, 2328), True, 'import oneflow as flow\n'), ((2335, 2357), 'oneflow.global_function', 'flow.global_function', ([], {}), '()\n', (2355, 2357), True, 'import oneflow as flow\n'), ((2539, 2571), 'numpy.ones', 'np.ones', (['(10,)'], {'dtype': 'np.float32'}), '((10,), dtype=np.float32)\n', (2546, 2571), True, 'import numpy as np\n'), ((2668, 2697), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['(1)'], {}), '(1)\n', (2694, 2697), True, 'import oneflow as flow\n'), ((2717, 2738), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (2736, 2738), True, 'import oneflow as flow\n'), ((2810, 2859), 'oneflow.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (2830, 2859), True, 'import oneflow as flow\n'), ((3098, 3130), 'numpy.ones', 'np.ones', (['(10,)'], {'dtype': 'np.float32'}), '((10,), dtype=np.float32)\n', (3105, 3130), True, 'import numpy as np\n'), ((3234, 3263), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['(1)'], {}), '(1)\n', (3260, 3263), True, 'import oneflow as flow\n'), ((3282, 3303), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (3301, 3303), True, 'import oneflow as flow\n'), ((3375, 3424), 'oneflow.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (3395, 3424), True, 'import oneflow as flow\n'), ((3614, 3643), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['(1)'], {}), '(1)\n', (3640, 3643), True, 'import oneflow as flow\n'), ((3650, 3672), 'oneflow.global_function', 'flow.global_function', ([], {}), '()\n', (3670, 3672), True, 'import oneflow as flow\n'), ((3779, 3811), 'numpy.ones', 'np.ones', (['(10,)'], {'dtype': 'np.float32'}), '((10,), dtype=np.float32)\n', (3786, 3811), True, 'import numpy as np\n'), ((3928, 3957), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['(1)'], {}), '(1)\n', (3954, 3957), True, 'import oneflow as flow\n'), ((3977, 3998), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (3996, 3998), True, 'import oneflow as flow\n'), ((4070, 4119), 'oneflow.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (4090, 4119), True, 'import oneflow as flow\n'), ((4234, 4266), 'numpy.ones', 'np.ones', (['(10,)'], {'dtype': 'np.float32'}), '((10,), dtype=np.float32)\n', (4241, 4266), True, 'import numpy as np\n'), ((4392, 4421), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['(1)'], {}), '(1)\n', (4418, 4421), True, 'import oneflow as flow\n'), ((4440, 4461), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (4459, 4461), True, 'import oneflow as flow\n'), ((4533, 4582), 'oneflow.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (4553, 4582), True, 'import oneflow as flow\n'), ((4705, 4737), 'numpy.ones', 'np.ones', (['(10,)'], {'dtype': 'np.float32'}), '((10,), dtype=np.float32)\n', (4712, 4737), True, 'import numpy as np\n'), ((4868, 4897), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['(1)'], {}), '(1)\n', (4894, 4897), True, 'import oneflow as flow\n'), ((4904, 4926), 'oneflow.global_function', 'flow.global_function', ([], {}), '()\n', (4924, 4926), True, 'import oneflow as flow\n'), ((5051, 5083), 'numpy.ones', 'np.ones', (['(10,)'], {'dtype': 'np.float32'}), '((10,), dtype=np.float32)\n', (5058, 5083), True, 'import numpy as np\n'), ((5213, 5242), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['(1)'], {}), '(1)\n', (5239, 5242), True, 'import oneflow as flow\n'), ((5262, 5283), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (5281, 5283), True, 'import oneflow as flow\n'), ((5355, 5404), 'oneflow.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (5375, 5404), True, 'import oneflow as flow\n'), ((5537, 5569), 'numpy.ones', 'np.ones', (['(10,)'], {'dtype': 'np.float32'}), '((10,), dtype=np.float32)\n', (5544, 5569), True, 'import numpy as np\n'), ((5708, 5737), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['(1)'], {}), '(1)\n', (5734, 5737), True, 'import oneflow as flow\n'), ((5756, 5777), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (5775, 5777), True, 'import oneflow as flow\n'), ((5849, 5898), 'oneflow.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (5869, 5898), True, 'import oneflow as flow\n'), ((6053, 6085), 'numpy.ones', 'np.ones', (['(10,)'], {'dtype': 'np.float32'}), '((10,), dtype=np.float32)\n', (6060, 6085), True, 'import numpy as np\n'), ((6214, 6243), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['(1)'], {}), '(1)\n', (6240, 6243), True, 'import oneflow as flow\n'), ((6250, 6272), 'oneflow.global_function', 'flow.global_function', ([], {}), '()\n', (6270, 6272), True, 'import oneflow as flow\n'), ((6375, 6407), 'numpy.ones', 'np.ones', (['(10,)'], {'dtype': 'np.float32'}), '((10,), dtype=np.float32)\n', (6382, 6407), True, 'import numpy as np\n'), ((6526, 6555), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['(1)'], {}), '(1)\n', (6552, 6555), True, 'import oneflow as flow\n'), ((6575, 6596), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (6594, 6596), True, 'import oneflow as flow\n'), ((6668, 6717), 'oneflow.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (6688, 6717), True, 'import oneflow as flow\n'), ((6828, 6860), 'numpy.ones', 'np.ones', (['(10,)'], {'dtype': 'np.float32'}), '((10,), dtype=np.float32)\n', (6835, 6860), True, 'import numpy as np\n'), ((6988, 7017), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['(1)'], {}), '(1)\n', (7014, 7017), True, 'import oneflow as flow\n'), ((7036, 7057), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (7055, 7057), True, 'import oneflow as flow\n'), ((7129, 7178), 'oneflow.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (7149, 7178), True, 'import oneflow as flow\n'), ((7297, 7329), 'numpy.ones', 'np.ones', (['(10,)'], {'dtype': 'np.float32'}), '((10,), dtype=np.float32)\n', (7304, 7329), True, 'import numpy as np\n'), ((7464, 7496), 'numpy.ones', 'np.ones', (['(10,)'], {'dtype': 'np.float32'}), '((10,), dtype=np.float32)\n', (7471, 7496), True, 'import numpy as np\n'), ((7585, 7614), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['(1)'], {}), '(1)\n', (7611, 7614), True, 'import oneflow as flow\n'), ((7621, 7643), 'oneflow.global_function', 'flow.global_function', ([], {}), '()\n', (7641, 7643), True, 'import oneflow as flow\n'), ((7819, 7851), 'numpy.ones', 'np.ones', (['(10,)'], {'dtype': 'np.float32'}), '((10,), dtype=np.float32)\n', (7826, 7851), True, 'import numpy as np\n'), ((7947, 7976), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['(1)'], {}), '(1)\n', (7973, 7976), True, 'import oneflow as flow\n'), ((7996, 8017), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (8015, 8017), True, 'import oneflow as flow\n'), ((8089, 8138), 'oneflow.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (8109, 8138), True, 'import oneflow as flow\n'), ((8328, 8360), 'numpy.ones', 'np.ones', (['(10,)'], {'dtype': 'np.float32'}), '((10,), dtype=np.float32)\n', (8335, 8360), True, 'import numpy as np\n'), ((8463, 8492), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['(1)'], {}), '(1)\n', (8489, 8492), True, 'import oneflow as flow\n'), ((8511, 8532), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (8530, 8532), True, 'import oneflow as flow\n'), ((8604, 8653), 'oneflow.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (8624, 8653), True, 'import oneflow as flow\n'), ((8851, 8883), 'numpy.ones', 'np.ones', (['(10,)'], {'dtype': 'np.float32'}), '((10,), dtype=np.float32)\n', (8858, 8883), True, 'import numpy as np\n'), ((8982, 9011), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['(1)'], {}), '(1)\n', (9008, 9011), True, 'import oneflow as flow\n'), ((9030, 9051), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (9049, 9051), True, 'import oneflow as flow\n'), ((9123, 9172), 'oneflow.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (9143, 9172), True, 'import oneflow as flow\n'), ((9364, 9396), 'numpy.ones', 'np.ones', (['(10,)'], {'dtype': 'np.float32'}), '((10,), dtype=np.float32)\n', (9371, 9396), True, 'import numpy as np\n'), ((9502, 9531), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['(1)'], {}), '(1)\n', (9528, 9531), True, 'import oneflow as flow\n'), ((9550, 9571), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (9569, 9571), True, 'import oneflow as flow\n'), ((9643, 9692), 'oneflow.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (9663, 9692), True, 'import oneflow as flow\n'), ((9898, 9930), 'numpy.ones', 'np.ones', (['(10,)'], {'dtype': 'np.float32'}), '((10,), dtype=np.float32)\n', (9905, 9930), True, 'import numpy as np\n'), ((10043, 10072), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['(1)'], {}), '(1)\n', (10069, 10072), True, 'import oneflow as flow\n'), ((10092, 10113), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (10111, 10113), True, 'import oneflow as flow\n'), ((10185, 10234), 'oneflow.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (10205, 10234), True, 'import oneflow as flow\n'), ((1395, 1421), 'oneflow.scope.mirrored_view', 'flow.scope.mirrored_view', ([], {}), '()\n', (1419, 1421), True, 'import oneflow as flow\n'), ((1831, 1857), 'oneflow.scope.mirrored_view', 'flow.scope.mirrored_view', ([], {}), '()\n', (1855, 1857), True, 'import oneflow as flow\n'), ((2425, 2445), 'oneflow.watch', 'flow.watch', (['x', 'Watch'], {}), '(x, Watch)\n', (2435, 2445), True, 'import oneflow as flow\n'), ((2776, 2802), 'oneflow.scope.mirrored_view', 'flow.scope.mirrored_view', ([], {}), '()\n', (2800, 2802), True, 'import oneflow as flow\n'), ((2935, 2955), 'oneflow.watch', 'flow.watch', (['x', 'Watch'], {}), '(x, Watch)\n', (2945, 2955), True, 'import oneflow as flow\n'), ((3341, 3367), 'oneflow.scope.mirrored_view', 'flow.scope.mirrored_view', ([], {}), '()\n', (3365, 3367), True, 'import oneflow as flow\n'), ((3508, 3528), 'oneflow.watch', 'flow.watch', (['x', 'Watch'], {}), '(x, Watch)\n', (3518, 3528), True, 'import oneflow as flow\n'), ((4036, 4062), 'oneflow.scope.mirrored_view', 'flow.scope.mirrored_view', ([], {}), '()\n', (4060, 4062), True, 'import oneflow as flow\n'), ((4499, 4525), 'oneflow.scope.mirrored_view', 'flow.scope.mirrored_view', ([], {}), '()\n', (4523, 4525), True, 'import oneflow as flow\n'), ((5321, 5347), 'oneflow.scope.mirrored_view', 'flow.scope.mirrored_view', ([], {}), '()\n', (5345, 5347), True, 'import oneflow as flow\n'), ((5815, 5841), 'oneflow.scope.mirrored_view', 'flow.scope.mirrored_view', ([], {}), '()\n', (5839, 5841), True, 'import oneflow as flow\n'), ((6634, 6660), 'oneflow.scope.mirrored_view', 'flow.scope.mirrored_view', ([], {}), '()\n', (6658, 6660), True, 'import oneflow as flow\n'), ((7095, 7121), 'oneflow.scope.mirrored_view', 'flow.scope.mirrored_view', ([], {}), '()\n', (7119, 7121), True, 'import oneflow as flow\n'), ((8055, 8081), 'oneflow.scope.mirrored_view', 'flow.scope.mirrored_view', ([], {}), '()\n', (8079, 8081), True, 'import oneflow as flow\n'), ((8570, 8596), 'oneflow.scope.mirrored_view', 'flow.scope.mirrored_view', ([], {}), '()\n', (8594, 8596), True, 'import oneflow as flow\n'), ((9089, 9115), 'oneflow.scope.mirrored_view', 'flow.scope.mirrored_view', ([], {}), '()\n', (9113, 9115), True, 'import oneflow as flow\n'), ((9609, 9635), 'oneflow.scope.mirrored_view', 'flow.scope.mirrored_view', ([], {}), '()\n', (9633, 9635), True, 'import oneflow as flow\n'), ((10151, 10177), 'oneflow.scope.mirrored_view', 'flow.scope.mirrored_view', ([], {}), '()\n', (10175, 10177), True, 'import oneflow as flow\n'), ((816, 844), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(10,)'], {}), '((10,))\n', (837, 844), True, 'import oneflow.typing as oft\n'), ((1075, 1103), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(10,)'], {}), '((10,))\n', (1096, 1103), True, 'import oneflow.typing as oft\n'), ((1494, 1526), 'oneflow.typing.ListNumpy.Placeholder', 'oft.ListNumpy.Placeholder', (['(10,)'], {}), '((10,))\n', (1519, 1526), True, 'import oneflow.typing as oft\n'), ((1930, 1966), 'oneflow.typing.ListListNumpy.Placeholder', 'oft.ListListNumpy.Placeholder', (['(10,)'], {}), '((10,))\n', (1959, 1966), True, 'import oneflow.typing as oft\n'), ((2269, 2292), 'numpy.array_equal', 'np.array_equal', (['x', 'data'], {}), '(x, data)\n', (2283, 2292), True, 'import numpy as np\n'), ((2373, 2401), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(10,)'], {}), '((10,))\n', (2394, 2401), True, 'import oneflow.typing as oft\n'), ((2635, 2661), 'numpy.array_equal', 'np.array_equal', (['x[0]', 'data'], {}), '(x[0], data)\n', (2649, 2661), True, 'import numpy as np\n'), ((2875, 2907), 'oneflow.typing.ListNumpy.Placeholder', 'oft.ListNumpy.Placeholder', (['(10,)'], {}), '((10,))\n', (2900, 2907), True, 'import oneflow.typing as oft\n'), ((3198, 3227), 'numpy.array_equal', 'np.array_equal', (['x[0][0]', 'data'], {}), '(x[0][0], data)\n', (3212, 3227), True, 'import numpy as np\n'), ((3440, 3476), 'oneflow.typing.ListListNumpy.Placeholder', 'oft.ListListNumpy.Placeholder', (['(10,)'], {}), '((10,))\n', (3469, 3476), True, 'import oneflow.typing as oft\n'), ((3688, 3716), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(10,)'], {}), '((10,))\n', (3709, 3716), True, 'import oneflow.typing as oft\n'), ((4135, 4167), 'oneflow.typing.ListNumpy.Placeholder', 'oft.ListNumpy.Placeholder', (['(10,)'], {}), '((10,))\n', (4160, 4167), True, 'import oneflow.typing as oft\n'), ((4598, 4634), 'oneflow.typing.ListListNumpy.Placeholder', 'oft.ListListNumpy.Placeholder', (['(10,)'], {}), '((10,))\n', (4627, 4634), True, 'import oneflow.typing as oft\n'), ((4942, 4970), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(10,)'], {}), '((10,))\n', (4963, 4970), True, 'import oneflow.typing as oft\n'), ((5420, 5452), 'oneflow.typing.ListNumpy.Placeholder', 'oft.ListNumpy.Placeholder', (['(10,)'], {}), '((10,))\n', (5445, 5452), True, 'import oneflow.typing as oft\n'), ((5923, 5959), 'oneflow.typing.ListListNumpy.Placeholder', 'oft.ListListNumpy.Placeholder', (['(10,)'], {}), '((10,))\n', (5952, 5959), True, 'import oneflow.typing as oft\n'), ((7555, 7578), 'numpy.array_equal', 'np.array_equal', (['x', 'data'], {}), '(x, data)\n', (7569, 7578), True, 'import numpy as np\n'), ((7659, 7687), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(10,)'], {}), '((10,))\n', (7680, 7687), True, 'import oneflow.typing as oft\n'), ((7914, 7940), 'numpy.array_equal', 'np.array_equal', (['x[0]', 'data'], {}), '(x[0], data)\n', (7928, 7940), True, 'import numpy as np\n'), ((8154, 8186), 'oneflow.typing.ListNumpy.Placeholder', 'oft.ListNumpy.Placeholder', (['(10,)'], {}), '((10,))\n', (8179, 8186), True, 'import oneflow.typing as oft\n'), ((8427, 8456), 'numpy.array_equal', 'np.array_equal', (['x[0][0]', 'data'], {}), '(x[0][0], data)\n', (8441, 8456), True, 'import numpy as np\n'), ((8669, 8705), 'oneflow.typing.ListListNumpy.Placeholder', 'oft.ListListNumpy.Placeholder', (['(10,)'], {}), '((10,))\n', (8698, 8705), True, 'import oneflow.typing as oft\n'), ((8949, 8975), 'numpy.array_equal', 'np.array_equal', (['x[0]', 'data'], {}), '(x[0], data)\n', (8963, 8975), True, 'import numpy as np\n'), ((9188, 9216), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(10,)'], {}), '((10,))\n', (9209, 9216), True, 'import oneflow.typing as oft\n'), ((9466, 9495), 'numpy.array_equal', 'np.array_equal', (['x[0][0]', 'data'], {}), '(x[0][0], data)\n', (9480, 9495), True, 'import numpy as np\n'), ((9708, 9740), 'oneflow.typing.ListNumpy.Placeholder', 'oft.ListNumpy.Placeholder', (['(10,)'], {}), '((10,))\n', (9733, 9740), True, 'import oneflow.typing as oft\n'), ((10004, 10036), 'numpy.array_equal', 'np.array_equal', (['x[0][0][0]', 'data'], {}), '(x[0][0][0], data)\n', (10018, 10036), True, 'import numpy as np\n'), ((10259, 10295), 'oneflow.typing.ListListNumpy.Placeholder', 'oft.ListListNumpy.Placeholder', (['(10,)'], {}), '((10,))\n', (10288, 10295), True, 'import oneflow.typing as oft\n'), ((6294, 6322), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(10,)'], {}), '((10,))\n', (6315, 6322), True, 'import oneflow.typing as oft\n'), ((6739, 6771), 'oneflow.typing.ListNumpy.Placeholder', 'oft.ListNumpy.Placeholder', (['(10,)'], {}), '((10,))\n', (6764, 6771), True, 'import oneflow.typing as oft\n'), ((7200, 7236), 'oneflow.typing.ListListNumpy.Placeholder', 'oft.ListListNumpy.Placeholder', (['(10,)'], {}), '((10,))\n', (7229, 7236), True, 'import oneflow.typing as oft\n')] |
"""
Modified from https://github.com/pytorch/vision/blob/main/torchvision/models/detection/transform.py
"""
import math
import oneflow as flow
import random
from oneflow import nn, Tensor
from typing import List, Tuple, Dict, Optional
from .image_list import ImageList
from .roi_heads import paste_masks_in_image
def _resize_image_and_masks(
image: Tensor,
self_min_size: float,
self_max_size: float,
target: Optional[Dict[str, Tensor]] = None,
fixed_size: Optional[Tuple[int, int]] = None,
) -> Tuple[Tensor, Optional[Dict[str, Tensor]]]:
im_shape = flow.tensor(image.shape[-2:])
size: Optional[List[int]] = None
scale_factor: Optional[float] = None
recompute_scale_factor: Optional[bool] = None
if fixed_size is not None:
size = [fixed_size[1], fixed_size[0]]
else:
min_size = flow.min(im_shape).to(dtype=flow.float32)
max_size = flow.max(im_shape).to(dtype=flow.float32)
scale = flow.minimum(self_min_size / min_size, self_max_size / max_size)
scale_factor = scale.item()
recompute_scale_factor = True
image = flow.nn.functional.interpolate(
image[None],
size=size,
scale_factor=scale_factor,
mode="bilinear",
recompute_scale_factor=recompute_scale_factor,
align_corners=False,
)[0]
if target is None:
return image, target
if "masks" in target:
mask = target["masks"]
mask = flow.nn.functional.interpolate(
mask[:, None].float(),
size=size,
scale_factor=scale_factor,
recompute_scale_factor=recompute_scale_factor,
)[:, 0].to(flow.uint8)
target["masks"] = mask
return image, target
def _resize_keypoints(
keypoints: Tensor, original_size: List[int], new_size: List[int]
) -> Tensor:
ratios = [
flow.tensor(s, dtype=flow.float32, device=keypoints.device)
/ flow.tensor(s_orig, dtype=flow.float32, device=keypoints.device)
for s, s_orig in zip(new_size, original_size)
]
ratio_h, ratio_w = ratios
resized_data = keypoints.clone()
resized_data[..., 0] *= ratio_w
resized_data[..., 1] *= ratio_h
return resized_data
def _resize_boxes(
boxes: Tensor, original_size: List[int], new_size: List[int]
) -> Tensor:
ratios = [
flow.tensor(s, dtype=flow.float32, device=boxes.device)
/ flow.tensor(s_orig, dtype=flow.float32, device=boxes.device)
for s, s_orig in zip(new_size, original_size)
]
ratio_height, ratio_width = ratios
# TODO (<NAME>): use Tensor.unbind
xmin, ymin, xmax, ymax = boxes.split(1, dim=1)
xmin = xmin * ratio_width
xmax = xmax * ratio_width
ymin = ymin * ratio_height
ymax = ymax * ratio_height
return flow.concat((xmin, ymin, xmax, ymax), dim=1)
class GeneralizedRCNNTransform(nn.Module):
"""
Performs input / target transformation before feeding the data to a GeneralizedRCNN
model.
The transformation it perform are:
- input normalization (mean subtraction and std division)
- input / target resizing to match min_size / max_size
It returns a ImageList for the inputs, and a List[Dict[Tensor]] for the targets
"""
def __init__(
self,
min_size: int,
max_size: int,
image_mean: List[float],
image_std: List[float],
size_divisible: int = 32,
fixed_size: Optional[Tuple[int, int]] = None,
):
super(GeneralizedRCNNTransform, self).__init__()
if not isinstance(min_size, (list, tuple)):
min_size = (min_size,)
self.min_size = min_size
self.max_size = max_size
self.image_mean = image_mean
self.image_std = image_std
self.size_divisible = size_divisible
self.fixed_size = fixed_size
def forward(
self, images: List[Tensor], targets: Optional[List[Dict[str, Tensor]]] = None
) -> Tuple[ImageList, Optional[List[Dict[str, Tensor]]]]:
images = [img for img in images]
if targets is not None:
targets_copy: List[Dict[str, Tensor]] = []
for t in targets:
data: Dict[str, Tensor] = {}
for k, v in t.items():
data[k] = v
targets_copy.append(data)
targets = targets_copy
for i in range(len(images)):
image = images[i]
target_index = targets[i] if targets is not None else None
if image.dim() != 3:
raise ValueError(
"image is expected to be a list of 3d tensors "
"of shape [C, H, W], got {}".format(image.shape)
)
image = self.normalize(image)
image, target_index = self.resize(image, target_index)
images[i] = image
if targets is not None and target_index is not None:
targets[i] = target_index
image_sizes = [img.shape[-2:] for img in images]
images = self.batch_images(images, size_divisible=self.size_divisible)
image_sizes_list: List[Tuple[int, int]] = []
for image_size in image_sizes:
assert len(image_size) == 2
image_sizes_list.append((image_size[0], image_size[1]))
image_list = ImageList(images, image_sizes_list)
return image_list, targets
def normalize(self, image: Tensor) -> Tensor:
if not image.is_floating_point():
raise TypeError(
f"Expected input images to be of floating type (in range [0, 1]), "
f"but found type {image.dtype} instead"
)
dtype, device = image.dtype, image.device
mean = flow.as_tensor(self.image_mean, dtype=dtype, device=device)
std = flow.as_tensor(self.image_std, dtype=dtype, device=device)
return (image - mean[:, None, None]) / std[:, None, None]
def resize(
self, image: Tensor, target: Optional[Dict[str, Tensor]] = None,
) -> Tuple[Tensor, Optional[Dict[str, Tensor]]]:
h, w = image.shape[-2:]
if self.training:
size = float(random.choice(self.min_size))
else:
# assume for now that testing uses the largest scale
size = float(self.min_size[-1])
image, target = _resize_image_and_masks(
image, size, float(self.max_size), target, self.fixed_size
)
if target is None:
return image, target
bbox = target["boxes"]
bbox = _resize_boxes(bbox, (h, w), image.shape[-2:])
target["boxes"] = bbox
if "keypoints" in target:
keypoints = target["keypoints"]
keypoints = _resize_keypoints(keypoints, (h, w), image.shape[-2:])
target["keypoints"] = keypoints
return image, target
def max_by_axis(self, the_list: List[List[int]]) -> List[int]:
maxes = the_list[0]
for sublist in the_list[1:]:
for index, item in enumerate(sublist):
maxes[index] = max(maxes[index], item)
return maxes
def batch_images(self, images: List[Tensor], size_divisible: int = 32) -> Tensor:
max_size = self.max_by_axis([list(img.shape) for img in images])
stride = float(size_divisible)
max_size = list(max_size)
max_size[1] = int(math.ceil(float(max_size[1]) / stride) * stride)
max_size[2] = int(math.ceil(float(max_size[2]) / stride) * stride)
batch_shape = [len(images)] + max_size
# TODO (<NAME>): use tensor.new_full
batched_imgs = flow.full(
batch_shape, 0, dtype=images[0].dtype, device=images[0].device
)
for i in range(batched_imgs.shape[0]):
img = images[i]
batched_imgs[i, : img.shape[0], : img.shape[1], : img.shape[2]] = img
return batched_imgs
def postprocess(
self,
result: List[Dict[str, Tensor]],
image_shapes: List[Tuple[int, int]],
original_image_sizes: List[Tuple[int, int]],
) -> List[Dict[str, Tensor]]:
if self.training:
return result
for i, (pred, im_s, o_im_s) in enumerate(
zip(result, image_shapes, original_image_sizes)
):
boxes = pred["boxes"]
boxes = _resize_boxes(boxes, im_s, o_im_s)
result[i]["boxes"] = boxes
if "masks" in pred:
masks = pred["masks"]
masks = paste_masks_in_image(masks, boxes, o_im_s)
result[i]["masks"] = masks
if "keypoints" in pred:
keypoints = pred["keypoints"]
keypoints = _resize_keypoints(keypoints, im_s, o_im_s)
result[i]["keypoints"] = keypoints
return result
def __repr__(self) -> str:
format_string = self.__class__.__name__ + "("
_indent = "\n "
format_string += "{0}Normalize(mean={1}, std={2})".format(
_indent, self.image_mean, self.image_std
)
format_string += "{0}Resize(min_size={1}, max_size={2}, mode='bilinear')".format(
_indent, self.min_size, self.max_size
)
format_string += "\n)"
return format_string
| [
"oneflow.as_tensor",
"oneflow.concat",
"oneflow.minimum",
"oneflow.tensor",
"oneflow.max",
"oneflow.nn.functional.interpolate",
"oneflow.full",
"oneflow.min"
] | [((579, 608), 'oneflow.tensor', 'flow.tensor', (['image.shape[-2:]'], {}), '(image.shape[-2:])\n', (590, 608), True, 'import oneflow as flow\n'), ((2801, 2845), 'oneflow.concat', 'flow.concat', (['(xmin, ymin, xmax, ymax)'], {'dim': '(1)'}), '((xmin, ymin, xmax, ymax), dim=1)\n', (2812, 2845), True, 'import oneflow as flow\n'), ((963, 1027), 'oneflow.minimum', 'flow.minimum', (['(self_min_size / min_size)', '(self_max_size / max_size)'], {}), '(self_min_size / min_size, self_max_size / max_size)\n', (975, 1027), True, 'import oneflow as flow\n'), ((1115, 1291), 'oneflow.nn.functional.interpolate', 'flow.nn.functional.interpolate', (['image[None]'], {'size': 'size', 'scale_factor': 'scale_factor', 'mode': '"""bilinear"""', 'recompute_scale_factor': 'recompute_scale_factor', 'align_corners': '(False)'}), "(image[None], size=size, scale_factor=\n scale_factor, mode='bilinear', recompute_scale_factor=\n recompute_scale_factor, align_corners=False)\n", (1145, 1291), True, 'import oneflow as flow\n'), ((5758, 5817), 'oneflow.as_tensor', 'flow.as_tensor', (['self.image_mean'], {'dtype': 'dtype', 'device': 'device'}), '(self.image_mean, dtype=dtype, device=device)\n', (5772, 5817), True, 'import oneflow as flow\n'), ((5832, 5890), 'oneflow.as_tensor', 'flow.as_tensor', (['self.image_std'], {'dtype': 'dtype', 'device': 'device'}), '(self.image_std, dtype=dtype, device=device)\n', (5846, 5890), True, 'import oneflow as flow\n'), ((7641, 7714), 'oneflow.full', 'flow.full', (['batch_shape', '(0)'], {'dtype': 'images[0].dtype', 'device': 'images[0].device'}), '(batch_shape, 0, dtype=images[0].dtype, device=images[0].device)\n', (7650, 7714), True, 'import oneflow as flow\n'), ((1871, 1930), 'oneflow.tensor', 'flow.tensor', (['s'], {'dtype': 'flow.float32', 'device': 'keypoints.device'}), '(s, dtype=flow.float32, device=keypoints.device)\n', (1882, 1930), True, 'import oneflow as flow\n'), ((1941, 2005), 'oneflow.tensor', 'flow.tensor', (['s_orig'], {'dtype': 'flow.float32', 'device': 'keypoints.device'}), '(s_orig, dtype=flow.float32, device=keypoints.device)\n', (1952, 2005), True, 'import oneflow as flow\n'), ((2351, 2406), 'oneflow.tensor', 'flow.tensor', (['s'], {'dtype': 'flow.float32', 'device': 'boxes.device'}), '(s, dtype=flow.float32, device=boxes.device)\n', (2362, 2406), True, 'import oneflow as flow\n'), ((2417, 2477), 'oneflow.tensor', 'flow.tensor', (['s_orig'], {'dtype': 'flow.float32', 'device': 'boxes.device'}), '(s_orig, dtype=flow.float32, device=boxes.device)\n', (2428, 2477), True, 'import oneflow as flow\n'), ((844, 862), 'oneflow.min', 'flow.min', (['im_shape'], {}), '(im_shape)\n', (852, 862), True, 'import oneflow as flow\n'), ((905, 923), 'oneflow.max', 'flow.max', (['im_shape'], {}), '(im_shape)\n', (913, 923), True, 'import oneflow as flow\n'), ((6183, 6211), 'random.choice', 'random.choice', (['self.min_size'], {}), '(self.min_size)\n', (6196, 6211), False, 'import random\n')] |
"""
Modified from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/cbam.py
"""
import oneflow as flow
import oneflow.nn as nn
import oneflow.nn.functional as F
from flowvision.layers.blocks import ConvBnAct
from flowvision.layers.helpers import make_divisible
class ChannelAttn(nn.Module):
""" Original CBAM channel attention module, currently avg + max pool variant only.
"""
def __init__(
self,
channels,
rd_ratio=1.0 / 16,
rd_channels=None,
rd_divisor=1,
act_layer=nn.ReLU,
gate_layer=nn.Sigmoid,
mlp_bias=False,
):
super(ChannelAttn, self).__init__()
if not rd_channels:
rd_channels = make_divisible(
channels * rd_ratio, rd_divisor, round_limit=0.0
)
self.fc1 = nn.Conv2d(channels, rd_channels, 1, bias=mlp_bias)
self.act = act_layer(inplace=True)
self.fc2 = nn.Conv2d(rd_channels, channels, 1, bias=mlp_bias)
self.gate = gate_layer()
def forward(self, x):
x_avg = self.fc2(self.act(self.fc1(x.mean((2, 3), keepdim=True))))
# TODO: switch F.max_pool2d to amax
x_max = self.fc2(
self.act(
self.fc1(
F.max_pool2d(
x,
kernel_size=(x.size(2), x.size(3)),
stride=(x.size(2), x.size(3)),
)
)
)
)
return x * self.gate(x_avg + x_max)
class SpatialAttn(nn.Module):
""" Original CBAM spatial attention module
"""
def __init__(self, kernel_size=7, gate_layer=nn.Sigmoid):
super(SpatialAttn, self).__init__()
# TODO: update ConvBnAct
self.conv = ConvBnAct(
2, 1, kernel_size=kernel_size, padding=kernel_size // 2, act_layer=None
)
self.gate = gate_layer()
def forward(self, x):
# TODO: switch flow.max to tensor.amax
x_attn = flow.cat(
[x.mean(dim=1, keepdim=True), flow.max(x, dim=1, keepdim=True)[0]], dim=1
)
x_attn = self.conv(x_attn)
return x * self.gate(x_attn)
class CbamModule(nn.Module):
def __init__(
self,
channels,
rd_ratio=1.0 / 16,
rd_channels=None,
rd_divisor=1,
spatial_kernel_size=7,
act_layer=nn.ReLU,
gate_layer=nn.Sigmoid,
mlp_bias=False,
):
super(CbamModule, self).__init__()
self.channel = ChannelAttn(
channels,
rd_ratio=rd_ratio,
rd_channels=rd_channels,
rd_divisor=rd_divisor,
act_layer=act_layer,
gate_layer=gate_layer,
mlp_bias=mlp_bias,
)
self.spatial = SpatialAttn(spatial_kernel_size, gate_layer=gate_layer)
def forward(self, x):
x = self.channel(x)
x = self.spatial(x)
return x
| [
"oneflow.nn.Conv2d",
"oneflow.max"
] | [((848, 898), 'oneflow.nn.Conv2d', 'nn.Conv2d', (['channels', 'rd_channels', '(1)'], {'bias': 'mlp_bias'}), '(channels, rd_channels, 1, bias=mlp_bias)\n', (857, 898), True, 'import oneflow.nn as nn\n'), ((961, 1011), 'oneflow.nn.Conv2d', 'nn.Conv2d', (['rd_channels', 'channels', '(1)'], {'bias': 'mlp_bias'}), '(rd_channels, channels, 1, bias=mlp_bias)\n', (970, 1011), True, 'import oneflow.nn as nn\n'), ((1796, 1882), 'flowvision.layers.blocks.ConvBnAct', 'ConvBnAct', (['(2)', '(1)'], {'kernel_size': 'kernel_size', 'padding': '(kernel_size // 2)', 'act_layer': 'None'}), '(2, 1, kernel_size=kernel_size, padding=kernel_size // 2,\n act_layer=None)\n', (1805, 1882), False, 'from flowvision.layers.blocks import ConvBnAct\n'), ((734, 798), 'flowvision.layers.helpers.make_divisible', 'make_divisible', (['(channels * rd_ratio)', 'rd_divisor'], {'round_limit': '(0.0)'}), '(channels * rd_ratio, rd_divisor, round_limit=0.0)\n', (748, 798), False, 'from flowvision.layers.helpers import make_divisible\n'), ((2077, 2109), 'oneflow.max', 'flow.max', (['x'], {'dim': '(1)', 'keepdim': '(True)'}), '(x, dim=1, keepdim=True)\n', (2085, 2109), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow
from oneflow.framework.docstr.utils import add_docstr
add_docstr(
oneflow._C.cosine_similarity,
r"""
cosine_similarity(x1, x2, dim=1, eps=1e-8) -> Tensor
The documentation is referenced from: https://pytorch.org/docs/stable/generated/torch.nn.functional.cosine_similarity.html#torch.nn.functional.cosine_similarity
Returns cosine similarity between ``x1`` and ``x2``, computed along dim. ``x1`` and ``x2`` must be broadcastable
to a common shape. ``dim`` refers to the dimension in this common shape. Dimension ``dim`` of the output is
squeezed (see :func:`oneflow.squeeze`), resulting in the
output tensor having 1 fewer dimension.
.. math ::
\text{similarity} = \dfrac{x_1 \cdot x_2}{\max(\Vert x_1 \Vert _2 \cdot \Vert x_2 \Vert _2, \epsilon)}
Args:
x1 (Tensor): First input.
x2 (Tensor): Second input.
dim (int, optional): Dimension along which cosine similarity is computed. Default: 1
eps (float, optional): Small value to avoid division by zero.
Default: 1e-8
For examples:
.. code-block:: python
>>> import oneflow as flow
>>> import oneflow.nn.functional as F
>>> input1 = flow.randn(100, 128)
>>> input2 = flow.randn(100, 128)
>>> output = F.cosine_similarity(input1, input2)
""",
)
| [
"oneflow.framework.docstr.utils.add_docstr"
] | [((660, 1960), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow._C.cosine_similarity', '"""\n cosine_similarity(x1, x2, dim=1, eps=1e-8) -> Tensor\n\n The documentation is referenced from: https://pytorch.org/docs/stable/generated/torch.nn.functional.cosine_similarity.html#torch.nn.functional.cosine_similarity\n\n Returns cosine similarity between ``x1`` and ``x2``, computed along dim. ``x1`` and ``x2`` must be broadcastable\n to a common shape. ``dim`` refers to the dimension in this common shape. Dimension ``dim`` of the output is\n squeezed (see :func:`oneflow.squeeze`), resulting in the\n output tensor having 1 fewer dimension.\n\n .. math ::\n \\\\text{similarity} = \\\\dfrac{x_1 \\\\cdot x_2}{\\\\max(\\\\Vert x_1 \\\\Vert _2 \\\\cdot \\\\Vert x_2 \\\\Vert _2, \\\\epsilon)}\n \n Args:\n x1 (Tensor): First input.\n x2 (Tensor): Second input.\n dim (int, optional): Dimension along which cosine similarity is computed. Default: 1\n eps (float, optional): Small value to avoid division by zero.\n Default: 1e-8\n\n For examples:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import oneflow.nn.functional as F\n >>> input1 = flow.randn(100, 128)\n >>> input2 = flow.randn(100, 128)\n >>> output = F.cosine_similarity(input1, input2)\n """'], {}), '(oneflow._C.cosine_similarity,\n """\n cosine_similarity(x1, x2, dim=1, eps=1e-8) -> Tensor\n\n The documentation is referenced from: https://pytorch.org/docs/stable/generated/torch.nn.functional.cosine_similarity.html#torch.nn.functional.cosine_similarity\n\n Returns cosine similarity between ``x1`` and ``x2``, computed along dim. ``x1`` and ``x2`` must be broadcastable\n to a common shape. ``dim`` refers to the dimension in this common shape. Dimension ``dim`` of the output is\n squeezed (see :func:`oneflow.squeeze`), resulting in the\n output tensor having 1 fewer dimension.\n\n .. math ::\n \\\\text{similarity} = \\\\dfrac{x_1 \\\\cdot x_2}{\\\\max(\\\\Vert x_1 \\\\Vert _2 \\\\cdot \\\\Vert x_2 \\\\Vert _2, \\\\epsilon)}\n \n Args:\n x1 (Tensor): First input.\n x2 (Tensor): Second input.\n dim (int, optional): Dimension along which cosine similarity is computed. Default: 1\n eps (float, optional): Small value to avoid division by zero.\n Default: 1e-8\n\n For examples:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import oneflow.nn.functional as F\n >>> input1 = flow.randn(100, 128)\n >>> input2 = flow.randn(100, 128)\n >>> output = F.cosine_similarity(input1, input2)\n """\n )\n', (670, 1960), False, 'from oneflow.framework.docstr.utils import add_docstr\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow
from oneflow.framework.docstr.utils import add_docstr
add_docstr(
oneflow.F.adaptive_avg_pool1d,
r"""
adaptive_avg_pool1d(input, output_size) -> Tensor
Applies a 1D adaptive average pooling over an input signal composed of
several input planes.
See :class:`~oneflow.nn.AdaptiveAvgPool1d` for details and output shape.
Args:
input: the input tensor
output_size: the target output size (single integer)
For examples:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> arr = np.array([[[ 0.0558, -0.6875, -1.6544, -0.6226, 0.1018, 0.0502, -1.2538, 0.1491]]])
>>> input = flow.Tensor(arr, dtype=flow.float32)
>>> flow.nn.functional.adaptive_avg_pool1d(input, output_size=[4])
tensor([[[-0.3158, -1.1385, 0.0760, -0.5524]]], dtype=oneflow.float32)
""",
)
add_docstr(
oneflow.F.adaptive_avg_pool2d,
r"""
adaptive_avg_pool2d(input, output_size) -> Tensor
Applies a 2D adaptive average pooling over an input signal composed of several input planes.
See :class:`~oneflow.nn.AdaptiveAvgPool2d` for details and output shape.
Args:
input: the input tensor
output_size: the target output size (single integer or double-integer tuple)
For examples:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> arr = np.array([[[[ 0.1004, 0.0488, -1.0515, 0.9466],[ 0.4538, 0.2361, 1.3437, 0.398 ],[ 0.0558, -0.6875, -1.6544, -0.6226],[ 0.1018, 0.0502, -1.2538, 0.1491]]]])
>>> input = flow.Tensor(arr, dtype=flow.float32)
>>> outputs = flow.nn.functional.adaptive_avg_pool2d(input, (2, 2))
""",
)
add_docstr(
oneflow.F.adaptive_avg_pool3d,
r"""
adaptive_avg_pool3d(input, output_size) -> Tensor
Applies a 3D adaptive average pooling over an input signal composed of several input planes.
See :class:`~oneflow.nn.AdaptiveAvgPool3d` for details and output shape.
Args:
input: the input tensor
output_size: the target output size (single integer or triple-integer tuple)
For examples:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> input = flow.Tensor(np.random.randn(1, 1, 4, 4, 4), dtype=flow.float32)
>>> output = flow.nn.functional.adaptive_avg_pool3d(input, (2, 2, 2))
""",
)
| [
"oneflow.framework.docstr.utils.add_docstr"
] | [((660, 1493), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.F.adaptive_avg_pool1d', '"""\n adaptive_avg_pool1d(input, output_size) -> Tensor\n\n Applies a 1D adaptive average pooling over an input signal composed of\n several input planes.\n\n See :class:`~oneflow.nn.AdaptiveAvgPool1d` for details and output shape.\n\n Args:\n input: the input tensor\n output_size: the target output size (single integer)\n\n For examples:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n\n >>> arr = np.array([[[ 0.0558, -0.6875, -1.6544, -0.6226, 0.1018, 0.0502, -1.2538, 0.1491]]])\n >>> input = flow.Tensor(arr, dtype=flow.float32)\n >>> flow.nn.functional.adaptive_avg_pool1d(input, output_size=[4])\n tensor([[[-0.3158, -1.1385, 0.0760, -0.5524]]], dtype=oneflow.float32)\n\n """'], {}), '(oneflow.F.adaptive_avg_pool1d,\n """\n adaptive_avg_pool1d(input, output_size) -> Tensor\n\n Applies a 1D adaptive average pooling over an input signal composed of\n several input planes.\n\n See :class:`~oneflow.nn.AdaptiveAvgPool1d` for details and output shape.\n\n Args:\n input: the input tensor\n output_size: the target output size (single integer)\n\n For examples:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n\n >>> arr = np.array([[[ 0.0558, -0.6875, -1.6544, -0.6226, 0.1018, 0.0502, -1.2538, 0.1491]]])\n >>> input = flow.Tensor(arr, dtype=flow.float32)\n >>> flow.nn.functional.adaptive_avg_pool1d(input, output_size=[4])\n tensor([[[-0.3158, -1.1385, 0.0760, -0.5524]]], dtype=oneflow.float32)\n\n """\n )\n', (670, 1493), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((1497, 2348), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.F.adaptive_avg_pool2d', '"""\n adaptive_avg_pool2d(input, output_size) -> Tensor\n\n Applies a 2D adaptive average pooling over an input signal composed of several input planes.\n\n See :class:`~oneflow.nn.AdaptiveAvgPool2d` for details and output shape.\n\n Args:\n input: the input tensor\n output_size: the target output size (single integer or double-integer tuple)\n\n For examples:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n\n >>> arr = np.array([[[[ 0.1004, 0.0488, -1.0515, 0.9466],[ 0.4538, 0.2361, 1.3437, 0.398 ],[ 0.0558, -0.6875, -1.6544, -0.6226],[ 0.1018, 0.0502, -1.2538, 0.1491]]]])\n >>> input = flow.Tensor(arr, dtype=flow.float32)\n >>> outputs = flow.nn.functional.adaptive_avg_pool2d(input, (2, 2))\n """'], {}), '(oneflow.F.adaptive_avg_pool2d,\n """\n adaptive_avg_pool2d(input, output_size) -> Tensor\n\n Applies a 2D adaptive average pooling over an input signal composed of several input planes.\n\n See :class:`~oneflow.nn.AdaptiveAvgPool2d` for details and output shape.\n\n Args:\n input: the input tensor\n output_size: the target output size (single integer or double-integer tuple)\n\n For examples:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n\n >>> arr = np.array([[[[ 0.1004, 0.0488, -1.0515, 0.9466],[ 0.4538, 0.2361, 1.3437, 0.398 ],[ 0.0558, -0.6875, -1.6544, -0.6226],[ 0.1018, 0.0502, -1.2538, 0.1491]]]])\n >>> input = flow.Tensor(arr, dtype=flow.float32)\n >>> outputs = flow.nn.functional.adaptive_avg_pool2d(input, (2, 2))\n """\n )\n', (1507, 2348), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((2353, 3060), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.F.adaptive_avg_pool3d', '"""\n adaptive_avg_pool3d(input, output_size) -> Tensor\n\n Applies a 3D adaptive average pooling over an input signal composed of several input planes.\n\n See :class:`~oneflow.nn.AdaptiveAvgPool3d` for details and output shape.\n\n Args:\n input: the input tensor\n output_size: the target output size (single integer or triple-integer tuple)\n\n For examples:\n\n .. code-block:: python\n\n >>> import oneflow as flow \n >>> import numpy as np\n\n >>> input = flow.Tensor(np.random.randn(1, 1, 4, 4, 4), dtype=flow.float32)\n >>> output = flow.nn.functional.adaptive_avg_pool3d(input, (2, 2, 2))\n """'], {}), '(oneflow.F.adaptive_avg_pool3d,\n """\n adaptive_avg_pool3d(input, output_size) -> Tensor\n\n Applies a 3D adaptive average pooling over an input signal composed of several input planes.\n\n See :class:`~oneflow.nn.AdaptiveAvgPool3d` for details and output shape.\n\n Args:\n input: the input tensor\n output_size: the target output size (single integer or triple-integer tuple)\n\n For examples:\n\n .. code-block:: python\n\n >>> import oneflow as flow \n >>> import numpy as np\n\n >>> input = flow.Tensor(np.random.randn(1, 1, 4, 4, 4), dtype=flow.float32)\n >>> output = flow.nn.functional.adaptive_avg_pool3d(input, (2, 2, 2))\n """\n )\n', (2363, 3060), False, 'from oneflow.framework.docstr.utils import add_docstr\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import time
import unittest
import oneflow as flow
import oneflow.nn as nn
import oneflow.unittest
import oneflow.utils.vision.transforms as transforms
# reference: http://tangshusen.me/Dive-into-DL-PyTorch/#/chapter05_CNN/5.5_lenet
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(1, 6, kernel_size=5), # in_channels, out_channels, kernel_size
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2), # kernel_size, stride
nn.Conv2d(6, 16, 5),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.fc = nn.Sequential(
nn.Linear(16 * 4 * 4, 120),
nn.ReLU(),
nn.Linear(120, 84),
nn.ReLU(),
nn.Linear(84, 10),
)
def forward(self, img):
feature = self.conv(img)
feature = feature.flatten(start_dim=1)
output = self.fc(feature)
return output
def load_data_fashion_mnist(
batch_size,
resize=None,
root="./data-test/fashion-mnist",
download=True,
source_url=None,
num_workers=0,
):
"""Download the Fashion-MNIST dataset and then load into memory."""
root = os.path.expanduser(root)
trans = []
if resize:
trans.append(transforms.Resize(resize))
trans.append(transforms.ToTensor())
transform = transforms.Compose(trans)
mnist_train = flow.utils.vision.datasets.FashionMNIST(
root=root,
train=True,
transform=transform,
download=download,
source_url=source_url,
)
mnist_test = flow.utils.vision.datasets.FashionMNIST(
root=root,
train=False,
transform=transform,
download=download,
source_url=source_url,
)
train_iter = flow.utils.data.DataLoader(
mnist_train, batch_size, shuffle=True, num_workers=num_workers
)
test_iter = flow.utils.data.DataLoader(
mnist_test, batch_size, shuffle=False, num_workers=num_workers
)
return train_iter, test_iter
def evaluate_accuracy(data_iter, net, device=None):
if device is None and isinstance(net, nn.Module):
device = list(net.parameters())[0].device
acc_sum, n = 0.0, 0
net.eval()
with flow.no_grad():
for X, y in data_iter:
X = X.to(device=device)
y = y.to(device=device)
acc_sum += (net(X).argmax(dim=1).numpy() == y.numpy()).sum()
n += y.shape[0]
net.train()
return acc_sum / n
def test_train_and_eval(test_case):
device = flow.device("cuda")
net = LeNet()
net.to(device)
batch_size = 256
data_dir = os.getenv("ONEFLOW_TEST_CACHE_DIR") + "/data-test/fashion-mnist"
source_url = "https://oneflow-public.oss-cn-beijing.aliyuncs.com/datasets/mnist/Fashion-MNIST/"
train_iter, test_iter = load_data_fashion_mnist(
batch_size=batch_size,
resize=None,
root=data_dir,
download=True,
source_url=source_url,
num_workers=0,
)
loss = nn.CrossEntropyLoss()
loss.to(device)
lr, num_epochs = 0.02, 1
optimizer = flow.optim.SGD(net.parameters(), lr=lr, momentum=0.9)
final_accuracy = 0
for epoch in range(num_epochs):
train_l_sum, train_acc_sum, n, batch_count, start = 0.0, 0.0, 0, 0, time.time()
for X, y in train_iter:
X = X.to(device=device)
y = y.to(device=device)
# forward
y_hat = net(X)
l = loss(y_hat, y).sum()
# backward
l.backward()
optimizer.step()
optimizer.zero_grad()
train_l_sum += l.numpy()
train_acc_sum += (y_hat.argmax(dim=1).numpy() == y.numpy()).sum()
n += y.shape[0]
batch_count += 1
test_acc = evaluate_accuracy(test_iter, net)
final_accuracy = train_acc_sum / n
print(
"epoch %d, loss %.4f, train acc %.3f, test acc %.3f, time %.1f sec"
% (
epoch + 1,
train_l_sum / batch_count,
final_accuracy,
test_acc,
time.time() - start,
)
)
test_case.assertLess(0.52, final_accuracy)
@flow.unittest.skip_unless_1n1d()
class TestLenet(flow.unittest.TestCase):
def test_lenet(test_case):
test_train_and_eval(test_case)
if __name__ == "__main__":
unittest.main()
# 1 epoch training log
# epoch 1, loss 1.1473, train acc 0.569, test acc 0.742, time 162.4 sec
# epoch 2, loss 0.5736, train acc 0.784, test acc 0.796, time 158.1 sec
# epoch 3, loss 0.4761, train acc 0.826, test acc 0.821, time 154.0 sec
# epoch 4, loss 0.4215, train acc 0.848, test acc 0.855, time 160.3 sec
| [
"oneflow.utils.vision.transforms.Resize",
"oneflow.nn.Linear",
"oneflow.utils.vision.transforms.ToTensor",
"oneflow.no_grad",
"oneflow.nn.ReLU",
"oneflow.utils.vision.datasets.FashionMNIST",
"oneflow.utils.vision.transforms.Compose",
"oneflow.nn.CrossEntropyLoss",
"oneflow.nn.Conv2d",
"oneflow.uni... | [((4915, 4947), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (4945, 4947), True, 'import oneflow as flow\n'), ((1861, 1885), 'os.path.expanduser', 'os.path.expanduser', (['root'], {}), '(root)\n', (1879, 1885), False, 'import os\n'), ((2020, 2045), 'oneflow.utils.vision.transforms.Compose', 'transforms.Compose', (['trans'], {}), '(trans)\n', (2038, 2045), True, 'import oneflow.utils.vision.transforms as transforms\n'), ((2065, 2195), 'oneflow.utils.vision.datasets.FashionMNIST', 'flow.utils.vision.datasets.FashionMNIST', ([], {'root': 'root', 'train': '(True)', 'transform': 'transform', 'download': 'download', 'source_url': 'source_url'}), '(root=root, train=True, transform=\n transform, download=download, source_url=source_url)\n', (2104, 2195), True, 'import oneflow as flow\n'), ((2255, 2386), 'oneflow.utils.vision.datasets.FashionMNIST', 'flow.utils.vision.datasets.FashionMNIST', ([], {'root': 'root', 'train': '(False)', 'transform': 'transform', 'download': 'download', 'source_url': 'source_url'}), '(root=root, train=False, transform=\n transform, download=download, source_url=source_url)\n', (2294, 2386), True, 'import oneflow as flow\n'), ((2447, 2541), 'oneflow.utils.data.DataLoader', 'flow.utils.data.DataLoader', (['mnist_train', 'batch_size'], {'shuffle': '(True)', 'num_workers': 'num_workers'}), '(mnist_train, batch_size, shuffle=True,\n num_workers=num_workers)\n', (2473, 2541), True, 'import oneflow as flow\n'), ((2568, 2662), 'oneflow.utils.data.DataLoader', 'flow.utils.data.DataLoader', (['mnist_test', 'batch_size'], {'shuffle': '(False)', 'num_workers': 'num_workers'}), '(mnist_test, batch_size, shuffle=False,\n num_workers=num_workers)\n', (2594, 2662), True, 'import oneflow as flow\n'), ((3222, 3241), 'oneflow.device', 'flow.device', (['"""cuda"""'], {}), "('cuda')\n", (3233, 3241), True, 'import oneflow as flow\n'), ((3704, 3725), 'oneflow.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (3723, 3725), True, 'import oneflow.nn as nn\n'), ((5092, 5107), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5105, 5107), False, 'import unittest\n'), ((1981, 2002), 'oneflow.utils.vision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2000, 2002), True, 'import oneflow.utils.vision.transforms as transforms\n'), ((2912, 2926), 'oneflow.no_grad', 'flow.no_grad', ([], {}), '()\n', (2924, 2926), True, 'import oneflow as flow\n'), ((3316, 3351), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CACHE_DIR"""'], {}), "('ONEFLOW_TEST_CACHE_DIR')\n", (3325, 3351), False, 'import os\n'), ((969, 999), 'oneflow.nn.Conv2d', 'nn.Conv2d', (['(1)', '(6)'], {'kernel_size': '(5)'}), '(1, 6, kernel_size=5)\n', (978, 999), True, 'import oneflow.nn as nn\n'), ((1055, 1064), 'oneflow.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1062, 1064), True, 'import oneflow.nn as nn\n'), ((1078, 1115), 'oneflow.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2)', 'stride': '(2)'}), '(kernel_size=2, stride=2)\n', (1090, 1115), True, 'import oneflow.nn as nn\n'), ((1152, 1171), 'oneflow.nn.Conv2d', 'nn.Conv2d', (['(6)', '(16)', '(5)'], {}), '(6, 16, 5)\n', (1161, 1171), True, 'import oneflow.nn as nn\n'), ((1185, 1194), 'oneflow.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1192, 1194), True, 'import oneflow.nn as nn\n'), ((1208, 1245), 'oneflow.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2)', 'stride': '(2)'}), '(kernel_size=2, stride=2)\n', (1220, 1245), True, 'import oneflow.nn as nn\n'), ((1302, 1328), 'oneflow.nn.Linear', 'nn.Linear', (['(16 * 4 * 4)', '(120)'], {}), '(16 * 4 * 4, 120)\n', (1311, 1328), True, 'import oneflow.nn as nn\n'), ((1342, 1351), 'oneflow.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1349, 1351), True, 'import oneflow.nn as nn\n'), ((1365, 1383), 'oneflow.nn.Linear', 'nn.Linear', (['(120)', '(84)'], {}), '(120, 84)\n', (1374, 1383), True, 'import oneflow.nn as nn\n'), ((1397, 1406), 'oneflow.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1404, 1406), True, 'import oneflow.nn as nn\n'), ((1420, 1437), 'oneflow.nn.Linear', 'nn.Linear', (['(84)', '(10)'], {}), '(84, 10)\n', (1429, 1437), True, 'import oneflow.nn as nn\n'), ((1937, 1962), 'oneflow.utils.vision.transforms.Resize', 'transforms.Resize', (['resize'], {}), '(resize)\n', (1954, 1962), True, 'import oneflow.utils.vision.transforms as transforms\n'), ((3982, 3993), 'time.time', 'time.time', ([], {}), '()\n', (3991, 3993), False, 'import time\n'), ((4820, 4831), 'time.time', 'time.time', ([], {}), '()\n', (4829, 4831), False, 'import time\n')] |
import oneflow as flow
import oneflow.typing as tp
from typing import Tuple
import numpy as np
import time
import os
from yolov3_tiny import Yolov3_tiny
from dataset import Dataset
from config import cfg
np.set_printoptions(threshold=np.inf)
train_label_sbbox_input_size = int(cfg.TRAIN.INPUT_SIZE[0]/cfg.YOLO.STRIDES[0])
train_label_lbbox_input_size = int(cfg.TRAIN.INPUT_SIZE[0]/cfg.YOLO.STRIDES[1])
train_output_channel = (cfg.YOLO.CLASS_NUM+5)*cfg.YOLO.ANCHOR_PER_SCALE
dataset = Dataset('train')
cfg.TRAIN.BATCH_NUM_PER_EPOCH = dataset.num_batchs
train_images = tp.Numpy.Placeholder((cfg.TRAIN.BATCH_SIZE, 3, cfg.TRAIN.INPUT_SIZE[0], cfg.TRAIN.INPUT_SIZE[0]))
train_label_sbbox = tp.Numpy.Placeholder((cfg.TRAIN.BATCH_SIZE, train_label_sbbox_input_size,
train_label_sbbox_input_size, cfg.YOLO.ANCHOR_PER_SCALE, cfg.YOLO.CLASS_NUM+5))
train_label_lbbox = tp.Numpy.Placeholder((cfg.TRAIN.BATCH_SIZE, train_label_lbbox_input_size,
train_label_lbbox_input_size, cfg.YOLO.ANCHOR_PER_SCALE, cfg.YOLO.CLASS_NUM+5))
train_true_sbbox = tp.Numpy.Placeholder((cfg.TRAIN.BATCH_SIZE, cfg.TRAIN.MAX_BBOX_PER_SCALE, 4))
train_true_lbbox = tp.Numpy.Placeholder((cfg.TRAIN.BATCH_SIZE, cfg.TRAIN.MAX_BBOX_PER_SCALE, 4))
anchors_s = tp.Numpy.Placeholder((cfg.YOLO.ANCHOR_PER_SCALE, 2))
anchors_l = tp.Numpy.Placeholder((cfg.YOLO.ANCHOR_PER_SCALE, 2))
func_config = flow.FunctionConfig()
model = Yolov3_tiny(cfg, trainable=True)
@flow.global_function(type="train", function_config=func_config)
def train_job(images: train_images, label_sbbox: train_label_sbbox, label_lbbox: train_label_lbbox,
true_sbbox: train_true_sbbox, true_lbbox: train_true_lbbox,
anchors_s: anchors_s, anchors_l: anchors_l
) -> Tuple[tp.Numpy, tp.Numpy, tp.Numpy, tp.Numpy]:
total_loss, giou_loss, conf_loss, prob_loss = model.train(images, label_sbbox, label_lbbox, true_sbbox, true_lbbox, anchors_s, anchors_l)
wramup_steps = cfg.TRAIN.WARMUP_EPOCHS * cfg.TRAIN.BATCH_NUM_PER_EPOCH
warmup_scheduler = flow.optimizer.warmup.linear(wramup_steps, cfg.TRAIN.LEARN_RATE_INIT)
end_steps = (cfg.TRAIN.EPOCHS-cfg.TRAIN.WARMUP_EPOCHS) * cfg.TRAIN.BATCH_NUM_PER_EPOCH
lr_scheduler = flow.optimizer.CosineScheduler(base_lr=cfg.TRAIN.LEARN_RATE_INIT, steps=end_steps, alpha=0, warmup=warmup_scheduler)
flow.optimizer.Adam(lr_scheduler).minimize(total_loss)
# flow.optimizer.SGD(lr_scheduler).minimize([giou_loss, conf_loss, prob_loss])
return total_loss, giou_loss, conf_loss, prob_loss
if __name__ == "__main__":
check_point = flow.train.CheckPoint()
if not cfg.TRAIN.INITIAL_WEIGHT:
check_point.init()
else:
check_point.load(cfg.TRAIN.INITIAL_WEIGHT)
fmt_str = "{:>12} {:>12} {:>12.3f} {:>12.4f} {:>12.4f} {:>12.4f} {:>12.4f}"
print("{:>12} {:>12} {:>12} {:>12} {:>12} {:>12} {:>12}".format('epoch','iter', 'time', 'giou_loss',
'conf_loss', 'prob_loss',
'total_loss'))
global cur_time
cur_time = time.time()
for epoch in range(cfg.TRAIN.EPOCHS):
for iter_, train_data in enumerate(dataset):
total_loss, giou_loss, conf_loss, prob_loss = train_job(*train_data)
if iter_%10==0:
print(fmt_str.format(epoch, iter_, time.time() - cur_time,
np.abs(giou_loss).mean(), np.abs(conf_loss).mean(),
np.abs(prob_loss).mean(), np.abs(total_loss).mean()))
cur_time = time.time()
# check_point.save(os.path.join(cfg.TRAIN.SAVE_MODEL_PATH, "yolov3_snapshot_") + str(epoch + 1))
if (epoch+1)%50==0:
check_point.save(os.path.join(cfg.TRAIN.SAVE_MODEL_PATH, "yolov3_snapshot_") + str(epoch + 1))
| [
"oneflow.global_function",
"oneflow.optimizer.warmup.linear",
"oneflow.optimizer.CosineScheduler",
"oneflow.typing.Numpy.Placeholder",
"oneflow.FunctionConfig",
"oneflow.optimizer.Adam",
"oneflow.train.CheckPoint"
] | [((205, 242), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'np.inf'}), '(threshold=np.inf)\n', (224, 242), True, 'import numpy as np\n'), ((487, 503), 'dataset.Dataset', 'Dataset', (['"""train"""'], {}), "('train')\n", (494, 503), False, 'from dataset import Dataset\n'), ((571, 673), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', (['(cfg.TRAIN.BATCH_SIZE, 3, cfg.TRAIN.INPUT_SIZE[0], cfg.TRAIN.INPUT_SIZE[0])'], {}), '((cfg.TRAIN.BATCH_SIZE, 3, cfg.TRAIN.INPUT_SIZE[0], cfg\n .TRAIN.INPUT_SIZE[0]))\n', (591, 673), True, 'import oneflow.typing as tp\n'), ((689, 853), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', (['(cfg.TRAIN.BATCH_SIZE, train_label_sbbox_input_size,\n train_label_sbbox_input_size, cfg.YOLO.ANCHOR_PER_SCALE, cfg.YOLO.\n CLASS_NUM + 5)'], {}), '((cfg.TRAIN.BATCH_SIZE, train_label_sbbox_input_size,\n train_label_sbbox_input_size, cfg.YOLO.ANCHOR_PER_SCALE, cfg.YOLO.\n CLASS_NUM + 5))\n', (709, 853), True, 'import oneflow.typing as tp\n'), ((905, 1069), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', (['(cfg.TRAIN.BATCH_SIZE, train_label_lbbox_input_size,\n train_label_lbbox_input_size, cfg.YOLO.ANCHOR_PER_SCALE, cfg.YOLO.\n CLASS_NUM + 5)'], {}), '((cfg.TRAIN.BATCH_SIZE, train_label_lbbox_input_size,\n train_label_lbbox_input_size, cfg.YOLO.ANCHOR_PER_SCALE, cfg.YOLO.\n CLASS_NUM + 5))\n', (925, 1069), True, 'import oneflow.typing as tp\n'), ((1120, 1197), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', (['(cfg.TRAIN.BATCH_SIZE, cfg.TRAIN.MAX_BBOX_PER_SCALE, 4)'], {}), '((cfg.TRAIN.BATCH_SIZE, cfg.TRAIN.MAX_BBOX_PER_SCALE, 4))\n', (1140, 1197), True, 'import oneflow.typing as tp\n'), ((1217, 1294), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', (['(cfg.TRAIN.BATCH_SIZE, cfg.TRAIN.MAX_BBOX_PER_SCALE, 4)'], {}), '((cfg.TRAIN.BATCH_SIZE, cfg.TRAIN.MAX_BBOX_PER_SCALE, 4))\n', (1237, 1294), True, 'import oneflow.typing as tp\n'), ((1307, 1359), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', (['(cfg.YOLO.ANCHOR_PER_SCALE, 2)'], {}), '((cfg.YOLO.ANCHOR_PER_SCALE, 2))\n', (1327, 1359), True, 'import oneflow.typing as tp\n'), ((1372, 1424), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', (['(cfg.YOLO.ANCHOR_PER_SCALE, 2)'], {}), '((cfg.YOLO.ANCHOR_PER_SCALE, 2))\n', (1392, 1424), True, 'import oneflow.typing as tp\n'), ((1440, 1461), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (1459, 1461), True, 'import oneflow as flow\n'), ((1470, 1502), 'yolov3_tiny.Yolov3_tiny', 'Yolov3_tiny', (['cfg'], {'trainable': '(True)'}), '(cfg, trainable=True)\n', (1481, 1502), False, 'from yolov3_tiny import Yolov3_tiny\n'), ((1505, 1568), 'oneflow.global_function', 'flow.global_function', ([], {'type': '"""train"""', 'function_config': 'func_config'}), "(type='train', function_config=func_config)\n", (1525, 1568), True, 'import oneflow as flow\n'), ((2106, 2175), 'oneflow.optimizer.warmup.linear', 'flow.optimizer.warmup.linear', (['wramup_steps', 'cfg.TRAIN.LEARN_RATE_INIT'], {}), '(wramup_steps, cfg.TRAIN.LEARN_RATE_INIT)\n', (2134, 2175), True, 'import oneflow as flow\n'), ((2286, 2407), 'oneflow.optimizer.CosineScheduler', 'flow.optimizer.CosineScheduler', ([], {'base_lr': 'cfg.TRAIN.LEARN_RATE_INIT', 'steps': 'end_steps', 'alpha': '(0)', 'warmup': 'warmup_scheduler'}), '(base_lr=cfg.TRAIN.LEARN_RATE_INIT, steps=\n end_steps, alpha=0, warmup=warmup_scheduler)\n', (2316, 2407), True, 'import oneflow as flow\n'), ((2646, 2669), 'oneflow.train.CheckPoint', 'flow.train.CheckPoint', ([], {}), '()\n', (2667, 2669), True, 'import oneflow as flow\n'), ((3241, 3252), 'time.time', 'time.time', ([], {}), '()\n', (3250, 3252), False, 'import time\n'), ((2407, 2440), 'oneflow.optimizer.Adam', 'flow.optimizer.Adam', (['lr_scheduler'], {}), '(lr_scheduler)\n', (2426, 2440), True, 'import oneflow as flow\n'), ((3739, 3750), 'time.time', 'time.time', ([], {}), '()\n', (3748, 3750), False, 'import time\n'), ((3913, 3972), 'os.path.join', 'os.path.join', (['cfg.TRAIN.SAVE_MODEL_PATH', '"""yolov3_snapshot_"""'], {}), "(cfg.TRAIN.SAVE_MODEL_PATH, 'yolov3_snapshot_')\n", (3925, 3972), False, 'import os\n'), ((3508, 3519), 'time.time', 'time.time', ([], {}), '()\n', (3517, 3519), False, 'import time\n'), ((3569, 3586), 'numpy.abs', 'np.abs', (['giou_loss'], {}), '(giou_loss)\n', (3575, 3586), True, 'import numpy as np\n'), ((3595, 3612), 'numpy.abs', 'np.abs', (['conf_loss'], {}), '(conf_loss)\n', (3601, 3612), True, 'import numpy as np\n'), ((3658, 3675), 'numpy.abs', 'np.abs', (['prob_loss'], {}), '(prob_loss)\n', (3664, 3675), True, 'import numpy as np\n'), ((3684, 3702), 'numpy.abs', 'np.abs', (['total_loss'], {}), '(total_loss)\n', (3690, 3702), True, 'import numpy as np\n')] |
from utils.dataset import *
from utils.utils_oneflow import *
from models.GRU_oneflow import *
import oneflow.experimental.F as F
class EncoderRNN_oneflow(nn.Module):
def __init__(self, input_size, hidden_size):
super().__init__()
self.hidden_size = hidden_size
self.embedding = nn.Embedding(
num_embeddings=input_size, embedding_dim=hidden_size
)
self.gru = GRU_oneflow(input_size=hidden_size, hidden_size=hidden_size)
def forward(self, input, hidden):
embedded = self.embedding(input).reshape([1, 1, -1])
output = embedded
output, hidden = self.gru(output, hidden)
return output, hidden
def init_Hidden(self):
return flow.zeros((1, self.hidden_size))
class AttnDecoderRNN_oneflow(nn.Module):
def __init__(self, hidden_size, output_size, dropout_p=0.1, max_length=MAX_LENGTH):
super().__init__()
self.hidden_size = hidden_size
self.output_size = output_size
self.dropout_p = dropout_p
self.max_length = max_length
self.embedding = nn.Embedding(self.output_size, self.hidden_size)
self.attn = nn.Linear(self.hidden_size * 2, self.max_length)
self.attn_combine = nn.Linear(self.hidden_size * 2, self.hidden_size)
self.dropout = nn.Dropout(self.dropout_p)
self.gru = GRU_oneflow(self.hidden_size, self.hidden_size)
self.out = nn.Linear(self.hidden_size, self.output_size)
self.logsoftmax = flow.nn.LogSoftmax(dim=1)
def forward(self, input, hidden, encoder_outputs):
embedded = self.embedding(input).reshape([1, 1, -1])
embedded = self.dropout(embedded)
attn_weights = F.softmax(self.attn(flow.cat((embedded[0], hidden), -1)))
attn_applied = flow.matmul(
attn_weights.unsqueeze(0), encoder_outputs.unsqueeze(0)
)
output = flow.cat((embedded[0], attn_applied[0]), 1)
output = self.attn_combine(output).unsqueeze(0)
output = F.relu(output)
output, hidden = self.gru(output, hidden)
output = self.logsoftmax(self.out(output[0]))
return output, hidden, attn_weights
def init_Hidden(self):
return flow.zeros([1, self.hidden_size])
| [
"oneflow.experimental.F.relu"
] | [((2016, 2030), 'oneflow.experimental.F.relu', 'F.relu', (['output'], {}), '(output)\n', (2022, 2030), True, 'import oneflow.experimental.F as F\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import os
from collections import OrderedDict
import numpy as np
import oneflow as flow
import tensorflow as tf
import test_global_storage
from test_util import (
GenArgDict,
GenArgList,
type_name_to_flow_type,
type_name_to_np_type,
)
import oneflow.typing as oft
gpus = tf.config.experimental.list_physical_devices("GPU")
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
def RunOneflowOp(device_type, flow_op, x, y, data_type):
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
flow_type = type_name_to_flow_type[data_type]
@flow.global_function(type="train", function_config=func_config)
def FlowJob(
x: oft.Numpy.Placeholder(x.shape, dtype=flow_type),
y: oft.Numpy.Placeholder(y.shape, dtype=flow_type),
):
with flow.scope.placement(device_type, "0:0"):
x += flow.get_variable(
name="x",
shape=x.shape,
dtype=flow_type,
initializer=flow.zeros_initializer(),
trainable=True,
)
y += flow.get_variable(
name="y",
shape=y.shape,
dtype=flow_type,
initializer=flow.zeros_initializer(),
trainable=True,
)
loss = flow_op(x, y)
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [1e-4]), momentum=0
).minimize(loss)
flow.watch_diff(x, test_global_storage.Setter("x_diff"))
flow.watch_diff(y, test_global_storage.Setter("y_diff"))
return loss
# Oneflow
check_point = flow.train.CheckPoint()
check_point.init()
out = FlowJob(x, y).get().numpy()
x_diff = test_global_storage.Get("x_diff")
y_diff = test_global_storage.Get("y_diff")
return out, x_diff, y_diff
def RunTensorFlowOp(tf_op, x, y):
# TensorFlow
with tf.GradientTape(persistent=True) as tape:
x = tf.Variable(x)
y = tf.Variable(y)
out = tf_op(x, y)
x_diff = tape.gradient(out, x)
y_diff = tape.gradient(out, y)
return out.numpy(), x_diff.numpy(), y_diff.numpy()
def compare_with_tensorflow_grad(
device_type,
flow_op,
tf_op,
x_shape,
y_shape,
data_type,
input_minval=-10,
input_maxval=10,
out_rtol=1e-5,
out_atol=1e-5,
diff_rtol=1e-5,
diff_atol=1e-5,
):
assert device_type in ["gpu", "cpu"]
np_type = type_name_to_np_type[data_type]
x = np.random.uniform(low=input_minval, high=input_maxval, size=x_shape).astype(
np_type
)
y = np.random.uniform(low=input_minval, high=input_maxval, size=y_shape).astype(
np_type
)
if flow_op in (flow.math.divide, flow.math.mod):
y[np.where(y == 0)] += 1
of_out, of_x_diff, of_y_diff, = RunOneflowOp(device_type, flow_op, x, y, data_type)
tf_out, tf_x_diff, tf_y_diff = RunTensorFlowOp(tf_op, x, y)
assert np.allclose(of_out, tf_out, rtol=out_rtol, atol=out_atol, equal_nan=True)
assert np.allclose(
of_x_diff, tf_x_diff, rtol=diff_rtol, atol=diff_atol, equal_nan=True
)
assert np.allclose(
of_y_diff, tf_y_diff, rtol=diff_rtol, atol=diff_atol, equal_nan=True
)
flow.clear_default_session()
def compare_with_tensorflow(
device_type,
flow_op,
tf_op,
x_shape,
y_shape,
data_type,
input_minval=-10,
input_maxval=10,
out_rtol=1e-5,
out_atol=1e-5,
):
assert device_type in ["gpu", "cpu"]
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
flow_type = type_name_to_flow_type[data_type]
@flow.global_function(function_config=func_config)
def FlowJob(
x: oft.Numpy.Placeholder(x_shape, dtype=flow_type),
y: oft.Numpy.Placeholder(y_shape, dtype=flow_type),
):
with flow.scope.placement(device_type, "0:0"):
return flow_op(x, y)
np_type = type_name_to_np_type[data_type]
if np_type in (np.int8, np.int32, np.int64):
x = np.random.randint(low=input_minval, high=input_maxval, size=x_shape).astype(
np_type
)
y = np.random.randint(low=input_minval, high=input_maxval, size=y_shape).astype(
np_type
)
else:
x = np.random.uniform(low=input_minval, high=input_maxval, size=x_shape).astype(
np_type
)
y = np.random.uniform(low=input_minval, high=input_maxval, size=y_shape).astype(
np_type
)
if flow_op in (flow.math.divide, flow.math.mod):
y[np.where(y == 0)] += 1
# Oneflow
of_out = FlowJob(x, y).get().numpy()
# Tensorflow
tf_out = tf_op(x, y).numpy()
assert np.allclose(of_out, tf_out, rtol=out_rtol, atol=out_atol, equal_nan=True)
flow.clear_default_session()
@flow.unittest.skip_unless_1n1d()
class TestBroadcastNormal(flow.unittest.TestCase):
def test_broadcast_add(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["cpu", "gpu"]
arg_dict["flow_op"] = [flow.math.add]
arg_dict["tf_op"] = [tf.math.add]
arg_dict["x_shape"] = [(3, 1, 4, 1)]
arg_dict["y_shape"] = [(4, 1, 6)]
arg_dict["data_type"] = ["float32", "double"]
for arg in GenArgList(arg_dict):
compare_with_tensorflow_grad(*arg)
def test_broadcast_sub(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["cpu", "gpu"]
arg_dict["flow_op"] = [flow.math.subtract]
arg_dict["tf_op"] = [tf.math.subtract]
arg_dict["x_shape"] = [(3, 1, 4, 1)]
arg_dict["y_shape"] = [(4, 1, 6)]
arg_dict["data_type"] = ["float32", "double"]
for arg in GenArgList(arg_dict):
compare_with_tensorflow(*arg)
def test_broadcast_mul(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["flow_op"] = [flow.math.multiply]
arg_dict["tf_op"] = [tf.math.multiply]
arg_dict["x_shape"] = [(3, 1, 4, 5, 1)]
arg_dict["y_shape"] = [(1, 4, 1, 1, 5)]
arg_dict["data_type"] = ["float32", "double"]
for arg in GenArgList(arg_dict):
compare_with_tensorflow_grad(*arg)
def test_broadcast_div(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["cpu", "gpu"]
arg_dict["flow_op"] = [flow.math.divide]
arg_dict["tf_op"] = [tf.math.divide]
arg_dict["x_shape"] = [(3, 1, 4, 5, 1)]
arg_dict["y_shape"] = [(3, 4, 1, 1, 5)]
arg_dict["data_type"] = ["float32", "double"]
for arg in GenArgList(arg_dict):
compare_with_tensorflow_grad(*arg)
def test_broadcast_floormod(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu"]
arg_dict["flow_op"] = [flow.math.mod]
arg_dict["tf_op"] = [tf.math.floormod]
arg_dict["x_shape"] = [(3, 1, 4, 5, 1)]
arg_dict["y_shape"] = [(1, 4, 1, 1, 5)]
arg_dict["data_type"] = ["float32", "double", "int32", "int64"]
for arg in GenArgList(arg_dict):
compare_with_tensorflow(*arg)
def test_broadcast_maximum(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["cpu", "gpu"]
arg_dict["flow_op"] = [flow.math.maximum]
arg_dict["tf_op"] = [tf.math.maximum]
arg_dict["x_shape"] = [(3, 1, 4, 5, 1)]
arg_dict["y_shape"] = [(1, 4, 1, 1, 5)]
arg_dict["data_type"] = ["float32", "double", "int32", "int64"]
for arg in GenArgList(arg_dict):
compare_with_tensorflow(*arg)
def test_broadcast_minimum(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["cpu", "gpu"]
arg_dict["flow_op"] = [flow.math.minimum]
arg_dict["tf_op"] = [tf.math.minimum]
arg_dict["x_shape"] = [(3, 1, 4, 5, 1)]
arg_dict["y_shape"] = [(1, 4, 1, 1, 5)]
arg_dict["data_type"] = ["float32", "double", "int32", "int64"]
for arg in GenArgList(arg_dict):
compare_with_tensorflow(*arg)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.typing.Numpy.Placeholder",
"oneflow.clear_default_session",
"oneflow.unittest.skip_unless_1n1d",
"oneflow.optimizer.PiecewiseConstantScheduler",
"oneflow.FunctionConfig",
"oneflow.zeros_initializer",
"oneflow.global_function",
"oneflow.scope.placement",
"oneflow.train.CheckPoint"
] | [((895, 946), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (939, 946), True, 'import tensorflow as tf\n'), ((5568, 5600), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (5598, 5600), True, 'import oneflow as flow\n'), ((968, 1019), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['gpu', '(True)'], {}), '(gpu, True)\n', (1008, 1019), True, 'import tensorflow as tf\n'), ((1083, 1111), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (1109, 1111), True, 'import oneflow as flow\n'), ((1130, 1151), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (1149, 1151), True, 'import oneflow as flow\n'), ((1255, 1318), 'oneflow.global_function', 'flow.global_function', ([], {'type': '"""train"""', 'function_config': 'func_config'}), "(type='train', function_config=func_config)\n", (1275, 1318), True, 'import oneflow as flow\n'), ((2342, 2365), 'oneflow.train.CheckPoint', 'flow.train.CheckPoint', ([], {}), '()\n', (2363, 2365), True, 'import oneflow as flow\n'), ((2440, 2473), 'test_global_storage.Get', 'test_global_storage.Get', (['"""x_diff"""'], {}), "('x_diff')\n", (2463, 2473), False, 'import test_global_storage\n'), ((2487, 2520), 'test_global_storage.Get', 'test_global_storage.Get', (['"""y_diff"""'], {}), "('y_diff')\n", (2510, 2520), False, 'import test_global_storage\n'), ((3656, 3729), 'numpy.allclose', 'np.allclose', (['of_out', 'tf_out'], {'rtol': 'out_rtol', 'atol': 'out_atol', 'equal_nan': '(True)'}), '(of_out, tf_out, rtol=out_rtol, atol=out_atol, equal_nan=True)\n', (3667, 3729), True, 'import numpy as np\n'), ((3741, 3827), 'numpy.allclose', 'np.allclose', (['of_x_diff', 'tf_x_diff'], {'rtol': 'diff_rtol', 'atol': 'diff_atol', 'equal_nan': '(True)'}), '(of_x_diff, tf_x_diff, rtol=diff_rtol, atol=diff_atol, equal_nan\n =True)\n', (3752, 3827), True, 'import numpy as np\n'), ((3848, 3934), 'numpy.allclose', 'np.allclose', (['of_y_diff', 'tf_y_diff'], {'rtol': 'diff_rtol', 'atol': 'diff_atol', 'equal_nan': '(True)'}), '(of_y_diff, tf_y_diff, rtol=diff_rtol, atol=diff_atol, equal_nan\n =True)\n', (3859, 3934), True, 'import numpy as np\n'), ((3948, 3976), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (3974, 3976), True, 'import oneflow as flow\n'), ((4219, 4247), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (4245, 4247), True, 'import oneflow as flow\n'), ((4266, 4287), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (4285, 4287), True, 'import oneflow as flow\n'), ((4391, 4440), 'oneflow.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (4411, 4440), True, 'import oneflow as flow\n'), ((5458, 5531), 'numpy.allclose', 'np.allclose', (['of_out', 'tf_out'], {'rtol': 'out_rtol', 'atol': 'out_atol', 'equal_nan': '(True)'}), '(of_out, tf_out, rtol=out_rtol, atol=out_atol, equal_nan=True)\n', (5469, 5531), True, 'import numpy as np\n'), ((5536, 5564), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (5562, 5564), True, 'import oneflow as flow\n'), ((8889, 8904), 'unittest.main', 'unittest.main', ([], {}), '()\n', (8902, 8904), False, 'import unittest\n'), ((2614, 2646), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {'persistent': '(True)'}), '(persistent=True)\n', (2629, 2646), True, 'import tensorflow as tf\n'), ((2668, 2682), 'tensorflow.Variable', 'tf.Variable', (['x'], {}), '(x)\n', (2679, 2682), True, 'import tensorflow as tf\n'), ((2695, 2709), 'tensorflow.Variable', 'tf.Variable', (['y'], {}), '(y)\n', (2706, 2709), True, 'import tensorflow as tf\n'), ((5710, 5723), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5721, 5723), False, 'from collections import OrderedDict\n'), ((6021, 6041), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (6031, 6041), False, 'from test_util import GenArgDict, GenArgList, type_name_to_flow_type, type_name_to_np_type\n'), ((6149, 6162), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (6160, 6162), False, 'from collections import OrderedDict\n'), ((6470, 6490), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (6480, 6490), False, 'from test_util import GenArgDict, GenArgList, type_name_to_flow_type, type_name_to_np_type\n'), ((6593, 6606), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (6604, 6606), False, 'from collections import OrderedDict\n'), ((6923, 6943), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (6933, 6943), False, 'from test_util import GenArgDict, GenArgList, type_name_to_flow_type, type_name_to_np_type\n'), ((7051, 7064), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (7062, 7064), False, 'from collections import OrderedDict\n'), ((7377, 7397), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (7387, 7397), False, 'from test_util import GenArgDict, GenArgList, type_name_to_flow_type, type_name_to_np_type\n'), ((7510, 7523), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (7521, 7523), False, 'from collections import OrderedDict\n'), ((7846, 7866), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (7856, 7866), False, 'from test_util import GenArgDict, GenArgList, type_name_to_flow_type, type_name_to_np_type\n'), ((7973, 7986), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (7984, 7986), False, 'from collections import OrderedDict\n'), ((8319, 8339), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (8329, 8339), False, 'from test_util import GenArgDict, GenArgList, type_name_to_flow_type, type_name_to_np_type\n'), ((8446, 8459), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (8457, 8459), False, 'from collections import OrderedDict\n'), ((8792, 8812), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (8802, 8812), False, 'from test_util import GenArgDict, GenArgList, type_name_to_flow_type, type_name_to_np_type\n'), ((1347, 1394), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['x.shape'], {'dtype': 'flow_type'}), '(x.shape, dtype=flow_type)\n', (1368, 1394), True, 'import oneflow.typing as oft\n'), ((1407, 1454), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['y.shape'], {'dtype': 'flow_type'}), '(y.shape, dtype=flow_type)\n', (1428, 1454), True, 'import oneflow.typing as oft\n'), ((1476, 1516), 'oneflow.scope.placement', 'flow.scope.placement', (['device_type', '"""0:0"""'], {}), "(device_type, '0:0')\n", (1496, 1516), True, 'import oneflow as flow\n'), ((3199, 3267), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': 'input_minval', 'high': 'input_maxval', 'size': 'x_shape'}), '(low=input_minval, high=input_maxval, size=x_shape)\n', (3216, 3267), True, 'import numpy as np\n'), ((3306, 3374), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': 'input_minval', 'high': 'input_maxval', 'size': 'y_shape'}), '(low=input_minval, high=input_maxval, size=y_shape)\n', (3323, 3374), True, 'import numpy as np\n'), ((3468, 3484), 'numpy.where', 'np.where', (['(y == 0)'], {}), '(y == 0)\n', (3476, 3484), True, 'import numpy as np\n'), ((4469, 4516), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['x_shape'], {'dtype': 'flow_type'}), '(x_shape, dtype=flow_type)\n', (4490, 4516), True, 'import oneflow.typing as oft\n'), ((4529, 4576), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['y_shape'], {'dtype': 'flow_type'}), '(y_shape, dtype=flow_type)\n', (4550, 4576), True, 'import oneflow.typing as oft\n'), ((4598, 4638), 'oneflow.scope.placement', 'flow.scope.placement', (['device_type', '"""0:0"""'], {}), "(device_type, '0:0')\n", (4618, 4638), True, 'import oneflow as flow\n'), ((5318, 5334), 'numpy.where', 'np.where', (['(y == 0)'], {}), '(y == 0)\n', (5326, 5334), True, 'import numpy as np\n'), ((2177, 2213), 'test_global_storage.Setter', 'test_global_storage.Setter', (['"""x_diff"""'], {}), "('x_diff')\n", (2203, 2213), False, 'import test_global_storage\n'), ((2246, 2282), 'test_global_storage.Setter', 'test_global_storage.Setter', (['"""y_diff"""'], {}), "('y_diff')\n", (2272, 2282), False, 'import test_global_storage\n'), ((4781, 4849), 'numpy.random.randint', 'np.random.randint', ([], {'low': 'input_minval', 'high': 'input_maxval', 'size': 'x_shape'}), '(low=input_minval, high=input_maxval, size=x_shape)\n', (4798, 4849), True, 'import numpy as np\n'), ((4900, 4968), 'numpy.random.randint', 'np.random.randint', ([], {'low': 'input_minval', 'high': 'input_maxval', 'size': 'y_shape'}), '(low=input_minval, high=input_maxval, size=y_shape)\n', (4917, 4968), True, 'import numpy as np\n'), ((5029, 5097), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': 'input_minval', 'high': 'input_maxval', 'size': 'x_shape'}), '(low=input_minval, high=input_maxval, size=x_shape)\n', (5046, 5097), True, 'import numpy as np\n'), ((5148, 5216), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': 'input_minval', 'high': 'input_maxval', 'size': 'y_shape'}), '(low=input_minval, high=input_maxval, size=y_shape)\n', (5165, 5216), True, 'import numpy as np\n'), ((1672, 1696), 'oneflow.zeros_initializer', 'flow.zeros_initializer', ([], {}), '()\n', (1694, 1696), True, 'import oneflow as flow\n'), ((1898, 1922), 'oneflow.zeros_initializer', 'flow.zeros_initializer', ([], {}), '()\n', (1920, 1922), True, 'import oneflow as flow\n'), ((2051, 2106), 'oneflow.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[0.0001]'], {}), '([], [0.0001])\n', (2092, 2106), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import math
import numpy as np
from oneflow.test_utils.automated_test_util import *
from oneflow.nn.modules import min_max_observer
from test_util import GenArgList
from test_util import GenArgList, type_name_to_flow_type, type_name_to_np_type
import oneflow as flow
import oneflow.unittest
def gen_quant_scale_for_min_max_symmetric(weight, quantization_bit):
weight_max = np.max(np.abs(weight))
denominator = 2.0 ** (quantization_bit - 1) - 1
return (weight_max / denominator, 0)
def gen_quant_scale_for_min_max_affine(weight, quantization_bit):
weight_max = np.max(weight)
weight_min = np.min(weight)
denominator = 2.0 ** quantization_bit - 1
scale = (weight_max - weight_min) / denominator
zero_point = -np.round(weight_min / scale)
return (scale, zero_point)
def gen_quant_scale_for_min_max_cambricon(weight, quantization_bit):
weight_max = np.max(np.abs(weight))
scale = math.floor(math.log2(weight_max)) - (quantization_bit - 2)
return (scale, 0)
def product(tu):
return np.prod(tu).astype(np.int).item()
def _check_min_max_observer(
test_case,
weight,
scale_of,
zero_point_of,
quantization_bit,
quantization_scheme,
quantization_formula,
per_layer_quantization,
):
if per_layer_quantization or quantization_formula == "cambricon":
outer_num = 1
inner_num = product(weight.shape[0:])
else:
outer_num = weight.shape[0]
inner_num = product(weight.shape[1:])
scale_np = np.zeros((outer_num,))
zero_point_np = np.zeros((outer_num,))
weight_flatten = weight.flatten()
if quantization_formula == "google":
if quantization_scheme == "symmetric":
for c in range(outer_num):
(scale_np[c], zero_point_np[c]) = gen_quant_scale_for_min_max_symmetric(
weight_flatten[c * inner_num : (c + 1) * inner_num],
quantization_bit,
)
else:
for c in range(outer_num):
(scale_np[c], zero_point_np[c]) = gen_quant_scale_for_min_max_affine(
weight_flatten[c * inner_num : (c + 1) * inner_num],
quantization_bit,
)
else:
(scale_np[0], zero_point_np[0]) = gen_quant_scale_for_min_max_cambricon(
weight_flatten, quantization_bit
)
test_case.assertTrue(np.allclose(scale_of, scale_np, rtol=0.001))
rmse = np.sqrt(np.mean((zero_point_of - zero_point_np) ** 2))
assert rmse <= 1.0, "min_max_observer op zero_point calculate has bug!"
def _run_test_min_max_observer(
test_case,
device_type,
weight_shape,
quantization_bit,
quantization_scheme,
quantization_formula,
per_layer_quantization,
):
weight = (np.random.random(weight_shape) - 0.5).astype(np.float32)
tensor_weight = flow.tensor(
weight, device=flow.device(device_type), dtype=flow.float32
)
min_max_observer = flow.nn.MinMaxObserver(
quantization_formula=quantization_formula,
quantization_bit=quantization_bit,
quantization_scheme=quantization_scheme,
per_layer_quantization=per_layer_quantization,
)
scale, zero_point = min_max_observer(tensor_weight)
_check_min_max_observer(
test_case,
weight,
scale.numpy(),
zero_point.numpy(),
quantization_bit,
quantization_scheme,
quantization_formula,
per_layer_quantization,
)
class TestMinMaxObserver(flow.unittest.TestCase):
def test_min_max_observer(test_case):
arg_dict = OrderedDict()
arg_dict["test_case"] = [test_case]
arg_dict["device_type"] = ["cpu", "cuda"]
arg_dict["weight_shape"] = [(9, 40, 20, 10)]
arg_dict["quantization_bit"] = [8, 2]
arg_dict["quantization_scheme"] = ["symmetric", "affine"]
arg_dict["quantization_formula"] = ["google"]
arg_dict["per_layer_quantization"] = [True, False]
for arg in GenArgList(arg_dict):
if arg[-2] == "cambricon" and arg[-1] == False:
continue
_run_test_min_max_observer(*arg)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.nn.modules.min_max_observer",
"oneflow.device",
"oneflow.nn.MinMaxObserver"
] | [((1226, 1240), 'numpy.max', 'np.max', (['weight'], {}), '(weight)\n', (1232, 1240), True, 'import numpy as np\n'), ((1258, 1272), 'numpy.min', 'np.min', (['weight'], {}), '(weight)\n', (1264, 1272), True, 'import numpy as np\n'), ((2157, 2179), 'numpy.zeros', 'np.zeros', (['(outer_num,)'], {}), '((outer_num,))\n', (2165, 2179), True, 'import numpy as np\n'), ((2200, 2222), 'numpy.zeros', 'np.zeros', (['(outer_num,)'], {}), '((outer_num,))\n', (2208, 2222), True, 'import numpy as np\n'), ((3622, 3819), 'oneflow.nn.MinMaxObserver', 'flow.nn.MinMaxObserver', ([], {'quantization_formula': 'quantization_formula', 'quantization_bit': 'quantization_bit', 'quantization_scheme': 'quantization_scheme', 'per_layer_quantization': 'per_layer_quantization'}), '(quantization_formula=quantization_formula,\n quantization_bit=quantization_bit, quantization_scheme=\n quantization_scheme, per_layer_quantization=per_layer_quantization)\n', (3644, 3819), True, 'import oneflow as flow\n'), ((3874, 3905), 'oneflow.nn.modules.min_max_observer', 'min_max_observer', (['tensor_weight'], {}), '(tensor_weight)\n', (3890, 3905), False, 'from oneflow.nn.modules import min_max_observer\n'), ((4847, 4862), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4860, 4862), False, 'import unittest\n'), ((1032, 1046), 'numpy.abs', 'np.abs', (['weight'], {}), '(weight)\n', (1038, 1046), True, 'import numpy as np\n'), ((1389, 1417), 'numpy.round', 'np.round', (['(weight_min / scale)'], {}), '(weight_min / scale)\n', (1397, 1417), True, 'import numpy as np\n'), ((1544, 1558), 'numpy.abs', 'np.abs', (['weight'], {}), '(weight)\n', (1550, 1558), True, 'import numpy as np\n'), ((3045, 3088), 'numpy.allclose', 'np.allclose', (['scale_of', 'scale_np'], {'rtol': '(0.001)'}), '(scale_of, scale_np, rtol=0.001)\n', (3056, 3088), True, 'import numpy as np\n'), ((3110, 3155), 'numpy.mean', 'np.mean', (['((zero_point_of - zero_point_np) ** 2)'], {}), '((zero_point_of - zero_point_np) ** 2)\n', (3117, 3155), True, 'import numpy as np\n'), ((4257, 4270), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4268, 4270), False, 'from collections import OrderedDict\n'), ((4662, 4682), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (4672, 4682), False, 'from test_util import GenArgList, type_name_to_flow_type, type_name_to_np_type\n'), ((1583, 1604), 'math.log2', 'math.log2', (['weight_max'], {}), '(weight_max)\n', (1592, 1604), False, 'import math\n'), ((3548, 3572), 'oneflow.device', 'flow.device', (['device_type'], {}), '(device_type)\n', (3559, 3572), True, 'import oneflow as flow\n'), ((3435, 3465), 'numpy.random.random', 'np.random.random', (['weight_shape'], {}), '(weight_shape)\n', (3451, 3465), True, 'import numpy as np\n'), ((1683, 1694), 'numpy.prod', 'np.prod', (['tu'], {}), '(tu)\n', (1690, 1694), True, 'import numpy as np\n')] |
"""
Modified from https://github.com/pytorch/vision/blob/main/torchvision/utils.py
"""
from typing import Union, Optional, List, Tuple, Text, BinaryIO
import pathlib
import oneflow as flow
import math
irange = range
def make_grid(
tensor: Union[flow.Tensor, List[flow.Tensor]],
nrow: int = 8,
padding: int = 2,
normalize: bool = False,
range: Optional[Tuple[int, int]] = None,
scale_each: bool = False,
pad_value: int = 0,
) -> flow.Tensor:
"""Make a grid of images.
Args:
tensor (Tensor or list): 4D mini-batch Tensor of shape (B x C x H x W)
or a list of images all of the same size.
nrow (int, optional): Number of images displayed in each row of the grid.
The final grid size is ``(B / nrow, nrow)``. Default: ``8``.
padding (int, optional): amount of padding. Default: ``2``.
normalize (bool, optional): If True, shift the image to the range (0, 1),
by the min and max values specified by :attr:`range`. Default: ``False``.
range (tuple, optional): tuple (min, max) where min and max are numbers,
then these numbers are used to normalize the image. By default, min and max
are computed from the tensor.
scale_each (bool, optional): If ``True``, scale each image in the batch of
images separately rather than the (min, max) over all images. Default: ``False``.
pad_value (float, optional): Value for the padded pixels. Default: ``0``.
Example:
See this notebook `here <https://gist.github.com/anonymous/bf16430f7750c023141c562f3e9f2a91>`_
"""
if not (
isinstance(tensor, flow.Tensor)
or (
isinstance(tensor, list) and all(isinstance(t, flow.Tensor) for t in tensor)
)
):
raise TypeError(
"tensor or list of tensors expected, got {}".format(type(tensor))
)
# if list of tensors, convert to a 4D mini-batch Tensor
if isinstance(tensor, list):
tensor = flow.stack(tensor, dim=0)
if tensor.dim() == 2: # single image H x W
tensor = tensor.unsqueeze(0)
if tensor.dim() == 3: # single image
if tensor.size(0) == 1: # if single-channel, convert to 3-channel
tensor = flow.cat([tensor, tensor, tensor], 0)
tensor = tensor.unsqueeze(0)
if tensor.dim() == 4 and tensor.size(1) == 1: # single-channel images
tensor = flow.cat([tensor, tensor, tensor], 1)
if normalize is True:
tensor = tensor.clone() # avoid modifying tensor in-place
if range is not None:
assert isinstance(
range, tuple
), "range has to be a tuple (min, max) if specified. min and max are numbers"
def norm_ip(img, min, max):
img = img.clamp(min=min, max=max)
img = (img - min) / (max - min + 1e-5)
return img
def norm_range(t, range):
if range is not None:
img = norm_ip(t, range[0], range[1])
else:
img = norm_ip(t, float(t.min().item()), float(t.max().item()))
return img
if scale_each is True:
bs = tensor.shape[0] # loop over mini-batch dimension
for t in irange(bs):
tensor[t] = norm_range(tensor[t], range)
else:
tensor = norm_range(tensor, range)
if tensor.size(0) == 1:
return tensor.squeeze(0)
# make the mini-batch of images into a grid
nmaps = tensor.size(0)
xmaps = min(nrow, nmaps)
ymaps = int(math.ceil(float(nmaps) / xmaps))
height, width = int(tensor.size(2) + padding), int(tensor.size(3) + padding)
num_channels = tensor.size(1)
grid = (
flow.zeros((num_channels, height * ymaps + padding, width * xmaps + padding))
+ pad_value
)
k = 0
for y in irange(ymaps):
for x in irange(xmaps):
if k >= nmaps:
break
grid[
:,
y * height + padding : (y + 1) * height,
x * width + padding : (x + 1) * width,
] = tensor[k]
k = k + 1
return grid
def save_image(
tensor: Union[flow.Tensor, List[flow.Tensor]],
fp: Union[Text, pathlib.Path, BinaryIO],
nrow: int = 8,
padding: int = 2,
normalize: bool = False,
range: Optional[Tuple[int, int]] = None,
scale_each: bool = False,
pad_value: int = 0,
format: Optional[str] = None,
) -> None:
"""Save a given Tensor into an image file.
Args:
tensor (Tensor or list): Image to be saved. If given a mini-batch tensor,
saves the tensor as a grid of images by calling ``make_grid``.
fp (string or file object): A filename or a file object
format(Optional): If omitted, the format to use is determined from the filename extension.
If a file object was used instead of a filename, this parameter should always be used.
**kwargs: Other arguments are documented in ``make_grid``.
"""
from PIL import Image
tensor = flow.tensor(tensor.numpy())
grid = make_grid(
tensor,
nrow=nrow,
padding=padding,
pad_value=pad_value,
normalize=normalize,
range=range,
scale_each=scale_each,
)
# Add 0.5 after unnormalizing to [0, 255] to round to nearest integer
ndarr = (
grid.mul(255)
.add(0.5)
.clamp(0, 255)
.permute(1, 2, 0)
.to("cpu", flow.uint8)
.numpy()
)
im = Image.fromarray(ndarr)
im.save(fp, format=format)
| [
"oneflow.cat",
"oneflow.stack",
"oneflow.zeros"
] | [((5584, 5606), 'PIL.Image.fromarray', 'Image.fromarray', (['ndarr'], {}), '(ndarr)\n', (5599, 5606), False, 'from PIL import Image\n'), ((2030, 2055), 'oneflow.stack', 'flow.stack', (['tensor'], {'dim': '(0)'}), '(tensor, dim=0)\n', (2040, 2055), True, 'import oneflow as flow\n'), ((2448, 2485), 'oneflow.cat', 'flow.cat', (['[tensor, tensor, tensor]', '(1)'], {}), '([tensor, tensor, tensor], 1)\n', (2456, 2485), True, 'import oneflow as flow\n'), ((3761, 3838), 'oneflow.zeros', 'flow.zeros', (['(num_channels, height * ymaps + padding, width * xmaps + padding)'], {}), '((num_channels, height * ymaps + padding, width * xmaps + padding))\n', (3771, 3838), True, 'import oneflow as flow\n'), ((2280, 2317), 'oneflow.cat', 'flow.cat', (['[tensor, tensor, tensor]', '(0)'], {}), '([tensor, tensor, tensor], 0)\n', (2288, 2317), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow as flow
import numpy as np
import oneflow.typing as tp
from test_util import GenArgList
import unittest
from collections import OrderedDict
from typing import Dict
import os
import random
def _compare_hardswish_with_np(
input_shape, device_type, value_type, machine_ids, device_counts
):
min_val = random.randint(-4, -1)
max_val = random.randint(0, 4)
assert min_val < max_val
if value_type[1] == flow.float16:
input_1 = np.random.uniform(
min_val - 0.5, max_val + 0.5, size=input_shape
).astype(np.float16)
input_1 += np.random.randn(*input_shape).astype(
np.float16
) # add a randnom array, range from(0, 1)
input_1 = np.array(input_1, dtype=value_type[0])
else:
input_1 = np.random.uniform(
min_val - 0.5, max_val + 0.5, size=input_shape
).astype(value_type[0])
input_1 += np.random.randn(*input_shape).astype(
value_type[0]
) # add a randnom array, range from(0, 1)
assert device_type in ["cpu", "gpu"]
flow.clear_default_session()
if device_type == "cpu":
flow.config.cpu_device_num(device_counts)
else:
flow.config.gpu_device_num(device_counts)
func_config = flow.FunctionConfig()
func_config.default_placement_scope(flow.scope.placement(device_type, machine_ids))
# global function needs float32 as type of argument and return value
if value_type[1] == flow.float16:
func_config.default_data_type(flow.float32)
else:
func_config.default_data_type(value_type[1])
def np_hardswish(input):
elem_cnt = input.size
init_shape = input.shape
input = input.flatten()
out = np.zeros_like(input)
for i in range(elem_cnt):
if input[i] >= 3:
out[i] = input[i]
elif input[i] <= -3:
pass # The out[i] = 0, But our `out` is a all zero array
else:
out[i] = input[i] * (input[i] + 3) / 6
out = np.reshape(out, init_shape)
return np.array(out).astype(value_type[0])
np_out_hardswish = np_hardswish(input_1)
def np_diff(input):
input_shape = input.shape
input = input.flatten()
elem_cnt = input.size
diff = np.zeros(shape=(elem_cnt,))
for i in range(elem_cnt):
if input[i] > -3 and input[i] < 3:
diff[i] = input[i] / 3 + 0.5
elif input[i] >= 3:
diff[i] = 1
else:
# input[i] <= -3, The Grad is zero, And out `diff` is an zero array
pass
diff = np.reshape(diff, newshape=input_shape)
diff = np.array(diff, dtype=value_type[0])
return diff
_np_grad = np_diff(input_1)
def assert_prediction_grad(blob: tp.Numpy):
if value_type[1] == flow.float16:
assert np.allclose(blob, _np_grad, atol=1e-3)
else:
assert np.allclose(blob, _np_grad, atol=1e-5)
if value_type[1] == flow.float16:
@flow.global_function(
type="train", function_config=func_config,
)
def oneflow_hardswish(
of_input_1: tp.Numpy.Placeholder(shape=input_1.shape, dtype=flow.float32),
) -> tp.Numpy:
with flow.scope.placement(device_type, "0:0"):
v = flow.get_variable(
shape=input_1.shape,
dtype=flow.float32,
initializer=flow.zeros_initializer(),
name="x_var",
)
x_var = of_input_1 + v
x_f16 = flow.cast(x_var, flow.float16)
of_hardswish_out_f16 = flow.nn.hardswish(x_f16)
of_hardswish_out_f32 = flow.cast(of_hardswish_out_f16, flow.float32)
with flow.scope.placement(device_type, "0:0"):
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [1e-3]), momentum=0
).minimize(of_hardswish_out_f32)
flow.watch_diff(x_var, assert_prediction_grad)
return of_hardswish_out_f32
# Test float32/64
else:
@flow.global_function(
type="train", function_config=func_config,
)
def oneflow_hardswish(
of_input_1: tp.Numpy.Placeholder(shape=input_1.shape, dtype=value_type[1]),
) -> tp.Numpy:
with flow.scope.placement(device_type, "0:0"):
v = flow.get_variable(
shape=input_1.shape,
dtype=value_type[1],
initializer=flow.zeros_initializer(),
name="x_var",
)
x_var = of_input_1 + v
flow.watch_diff(x_var, assert_prediction_grad)
of_hardswish_out = flow.nn.hardswish(x_var)
with flow.scope.placement(device_type, "0:0"):
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [1e-3]), momentum=0
).minimize(of_hardswish_out)
return of_hardswish_out
of_out_hardswish = oneflow_hardswish(input_1)
if value_type[1] == flow.float16:
assert np.allclose(of_out_hardswish, np_out_hardswish, atol=1e-3)
else:
assert np.allclose(of_out_hardswish, np_out_hardswish, atol=1e-5)
def _gen_arg_dict(shape, device_type, value_type, machine_ids, device_counts):
# Generate a dict to pass parameter to test case
arg_dict = OrderedDict()
arg_dict["input_shape"] = [shape]
arg_dict["device_type"] = [device_type]
if value_type == "float" and device_type == "cpu":
arg_dict["value_type"] = [
(np.float32, flow.float32),
(np.float64, flow.float64),
]
else:
arg_dict["value_type"] = [
(np.float32, flow.float16),
(np.float32, flow.float32),
(np.float64, flow.float64),
]
arg_dict["machine_ids"] = [machine_ids]
arg_dict["device_counts"] = [device_counts]
return arg_dict
@flow.unittest.skip_unless_1n1d()
class Testhardswish1n1d(flow.unittest.TestCase):
def test_hardswish_cpu(test_case):
arg_dict = _gen_arg_dict(
shape=(3, 3),
device_type="cpu",
value_type="float",
machine_ids="0:0",
device_counts=1,
)
for arg in GenArgList(arg_dict):
_compare_hardswish_with_np(*arg)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_hardswish_gpu(test_case):
arg_dict = _gen_arg_dict(
shape=(4, 4, 4),
device_type="gpu",
value_type="float",
machine_ids="0:0",
device_counts=1,
)
for arg in GenArgList(arg_dict):
_compare_hardswish_with_np(*arg)
@flow.unittest.skip_unless_1n2d()
class Testhardswish1n2d(flow.unittest.TestCase):
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_hardswish_gpu_1n2d(test_case):
arg_dict = _gen_arg_dict(
shape=(4, 8, 4),
device_type="gpu",
value_type="float",
machine_ids="0:0-1",
device_counts=2,
)
for arg in GenArgList(arg_dict):
_compare_hardswish_with_np(*arg)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.typing.Numpy.Placeholder",
"oneflow.clear_default_session",
"oneflow.config.cpu_device_num",
"oneflow.unittest.skip_unless_1n1d",
"oneflow.nn.hardswish",
"oneflow.unittest.skip_unless_1n2d",
"oneflow.config.gpu_device_num",
"oneflow.watch_diff",
"oneflow.optimizer.PiecewiseConstantScheduler... | [((6703, 6735), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (6733, 6735), True, 'import oneflow as flow\n'), ((7508, 7540), 'oneflow.unittest.skip_unless_1n2d', 'flow.unittest.skip_unless_1n2d', ([], {}), '()\n', (7538, 7540), True, 'import oneflow as flow\n'), ((913, 935), 'random.randint', 'random.randint', (['(-4)', '(-1)'], {}), '(-4, -1)\n', (927, 935), False, 'import random\n'), ((950, 970), 'random.randint', 'random.randint', (['(0)', '(4)'], {}), '(0, 4)\n', (964, 970), False, 'import random\n'), ((1669, 1697), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (1695, 1697), True, 'import oneflow as flow\n'), ((1856, 1877), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (1875, 1877), True, 'import oneflow as flow\n'), ((6137, 6150), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (6148, 6150), False, 'from collections import OrderedDict\n'), ((8031, 8046), 'unittest.main', 'unittest.main', ([], {}), '()\n', (8044, 8046), False, 'import unittest\n'), ((1312, 1350), 'numpy.array', 'np.array', (['input_1'], {'dtype': 'value_type[0]'}), '(input_1, dtype=value_type[0])\n', (1320, 1350), True, 'import numpy as np\n'), ((1735, 1776), 'oneflow.config.cpu_device_num', 'flow.config.cpu_device_num', (['device_counts'], {}), '(device_counts)\n', (1761, 1776), True, 'import oneflow as flow\n'), ((1795, 1836), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['device_counts'], {}), '(device_counts)\n', (1821, 1836), True, 'import oneflow as flow\n'), ((1918, 1964), 'oneflow.scope.placement', 'flow.scope.placement', (['device_type', 'machine_ids'], {}), '(device_type, machine_ids)\n', (1938, 1964), True, 'import oneflow as flow\n'), ((2331, 2351), 'numpy.zeros_like', 'np.zeros_like', (['input'], {}), '(input)\n', (2344, 2351), True, 'import numpy as np\n'), ((2646, 2673), 'numpy.reshape', 'np.reshape', (['out', 'init_shape'], {}), '(out, init_shape)\n', (2656, 2673), True, 'import numpy as np\n'), ((2908, 2935), 'numpy.zeros', 'np.zeros', ([], {'shape': '(elem_cnt,)'}), '(shape=(elem_cnt,))\n', (2916, 2935), True, 'import numpy as np\n'), ((3260, 3298), 'numpy.reshape', 'np.reshape', (['diff'], {'newshape': 'input_shape'}), '(diff, newshape=input_shape)\n', (3270, 3298), True, 'import numpy as np\n'), ((3314, 3349), 'numpy.array', 'np.array', (['diff'], {'dtype': 'value_type[0]'}), '(diff, dtype=value_type[0])\n', (3322, 3349), True, 'import numpy as np\n'), ((3673, 3736), 'oneflow.global_function', 'flow.global_function', ([], {'type': '"""train"""', 'function_config': 'func_config'}), "(type='train', function_config=func_config)\n", (3693, 3736), True, 'import oneflow as flow\n'), ((4801, 4864), 'oneflow.global_function', 'flow.global_function', ([], {'type': '"""train"""', 'function_config': 'func_config'}), "(type='train', function_config=func_config)\n", (4821, 4864), True, 'import oneflow as flow\n'), ((5845, 5904), 'numpy.allclose', 'np.allclose', (['of_out_hardswish', 'np_out_hardswish'], {'atol': '(0.001)'}), '(of_out_hardswish, np_out_hardswish, atol=0.001)\n', (5856, 5904), True, 'import numpy as np\n'), ((5929, 5988), 'numpy.allclose', 'np.allclose', (['of_out_hardswish', 'np_out_hardswish'], {'atol': '(1e-05)'}), '(of_out_hardswish, np_out_hardswish, atol=1e-05)\n', (5940, 5988), True, 'import numpy as np\n'), ((7036, 7056), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (7046, 7056), False, 'from test_util import GenArgList\n'), ((7438, 7458), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (7448, 7458), False, 'from test_util import GenArgList\n'), ((7125, 7159), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (7134, 7159), False, 'import os\n'), ((7931, 7951), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (7941, 7951), False, 'from test_util import GenArgList\n'), ((7611, 7645), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (7620, 7645), False, 'import os\n'), ((3513, 3552), 'numpy.allclose', 'np.allclose', (['blob', '_np_grad'], {'atol': '(0.001)'}), '(blob, _np_grad, atol=0.001)\n', (3524, 3552), True, 'import numpy as np\n'), ((3585, 3624), 'numpy.allclose', 'np.allclose', (['blob', '_np_grad'], {'atol': '(1e-05)'}), '(blob, _np_grad, atol=1e-05)\n', (3596, 3624), True, 'import numpy as np\n'), ((4320, 4344), 'oneflow.nn.hardswish', 'flow.nn.hardswish', (['x_f16'], {}), '(x_f16)\n', (4337, 4344), True, 'import oneflow as flow\n'), ((4380, 4425), 'oneflow.cast', 'flow.cast', (['of_hardswish_out_f16', 'flow.float32'], {}), '(of_hardswish_out_f16, flow.float32)\n', (4389, 4425), True, 'import oneflow as flow\n'), ((4670, 4716), 'oneflow.watch_diff', 'flow.watch_diff', (['x_var', 'assert_prediction_grad'], {}), '(x_var, assert_prediction_grad)\n', (4685, 4716), True, 'import oneflow as flow\n'), ((5372, 5418), 'oneflow.watch_diff', 'flow.watch_diff', (['x_var', 'assert_prediction_grad'], {}), '(x_var, assert_prediction_grad)\n', (5387, 5418), True, 'import oneflow as flow\n'), ((5451, 5475), 'oneflow.nn.hardswish', 'flow.nn.hardswish', (['x_var'], {}), '(x_var)\n', (5468, 5475), True, 'import oneflow as flow\n'), ((1056, 1121), 'numpy.random.uniform', 'np.random.uniform', (['(min_val - 0.5)', '(max_val + 0.5)'], {'size': 'input_shape'}), '(min_val - 0.5, max_val + 0.5, size=input_shape)\n', (1073, 1121), True, 'import numpy as np\n'), ((1182, 1211), 'numpy.random.randn', 'np.random.randn', (['*input_shape'], {}), '(*input_shape)\n', (1197, 1211), True, 'import numpy as np\n'), ((1379, 1444), 'numpy.random.uniform', 'np.random.uniform', (['(min_val - 0.5)', '(max_val + 0.5)'], {'size': 'input_shape'}), '(min_val - 0.5, max_val + 0.5, size=input_shape)\n', (1396, 1444), True, 'import numpy as np\n'), ((1508, 1537), 'numpy.random.randn', 'np.random.randn', (['*input_shape'], {}), '(*input_shape)\n', (1523, 1537), True, 'import numpy as np\n'), ((2690, 2703), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (2698, 2703), True, 'import numpy as np\n'), ((3815, 3876), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', ([], {'shape': 'input_1.shape', 'dtype': 'flow.float32'}), '(shape=input_1.shape, dtype=flow.float32)\n', (3835, 3876), True, 'import oneflow.typing as tp\n'), ((3918, 3958), 'oneflow.scope.placement', 'flow.scope.placement', (['device_type', '"""0:0"""'], {}), "(device_type, '0:0')\n", (3938, 3958), True, 'import oneflow as flow\n'), ((4253, 4283), 'oneflow.cast', 'flow.cast', (['x_var', 'flow.float16'], {}), '(x_var, flow.float16)\n', (4262, 4283), True, 'import oneflow as flow\n'), ((4444, 4484), 'oneflow.scope.placement', 'flow.scope.placement', (['device_type', '"""0:0"""'], {}), "(device_type, '0:0')\n", (4464, 4484), True, 'import oneflow as flow\n'), ((4943, 5005), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', ([], {'shape': 'input_1.shape', 'dtype': 'value_type[1]'}), '(shape=input_1.shape, dtype=value_type[1])\n', (4963, 5005), True, 'import oneflow.typing as tp\n'), ((5047, 5087), 'oneflow.scope.placement', 'flow.scope.placement', (['device_type', '"""0:0"""'], {}), "(device_type, '0:0')\n", (5067, 5087), True, 'import oneflow as flow\n'), ((5494, 5534), 'oneflow.scope.placement', 'flow.scope.placement', (['device_type', '"""0:0"""'], {}), "(device_type, '0:0')\n", (5514, 5534), True, 'import oneflow as flow\n'), ((4112, 4136), 'oneflow.zeros_initializer', 'flow.zeros_initializer', ([], {}), '()\n', (4134, 4136), True, 'import oneflow as flow\n'), ((5242, 5266), 'oneflow.zeros_initializer', 'flow.zeros_initializer', ([], {}), '()\n', (5264, 5266), True, 'import oneflow as flow\n'), ((4542, 4596), 'oneflow.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[0.001]'], {}), '([], [0.001])\n', (4583, 4596), True, 'import oneflow as flow\n'), ((5592, 5646), 'oneflow.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[0.001]'], {}), '([], [0.001])\n', (5633, 5646), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from typing import Tuple, Optional
from oneflow.compatible.single_client.python.oneflow_export import oneflow_export
from oneflow.compatible import single_client as flow
from oneflow.compatible.single_client.python.framework import id_util as id_util
from oneflow.compatible.single_client.python.framework import (
remote_blob as remote_blob_util,
)
import oneflow._oneflow_internal
@oneflow_export("quantization.min_max_observer")
def min_max_observer(
input: oneflow._oneflow_internal.BlobDesc,
quantization_bit: int = 8,
quantization_scheme: str = "symmetric",
quantization_formula: str = "google",
per_layer_quantization: bool = True,
name: Optional[str] = None,
) -> Tuple[oneflow._oneflow_internal.BlobDesc, oneflow._oneflow_internal.BlobDesc]:
r"""Compute the quantization parameters of the input tensor.
First compute the max and min values of input tensor:
.. math::
& max\_value = max(input)
& min\_value = min(input)
Then compute the scale and zero_point with the following equations:
if quantization_scheme == "symmetric":
.. math::
& denom = 2^{quantization\_to\_bit - 1} - 1
& scale = max(|max\_value|,|min\_value|) / denom
& zero\_point = 0
elif quantization_scheme == "affine":
.. math::
& denom = 2^{quantization\_to\_bit} - 1
& scale = (max\_value - min\_value) / denom
& zero\_point = -min\_value / scale
If per_layer_quantization is False, then the shape of scale and zero_point will be (input.shape[0],).
Args:
input (oneflow._oneflow_internal.BlobDesc): input tensor.
quantization_bit (int): Quantize input to uintX / intX, X can be in range [2, 8]. Defaults to 8.
quantization_scheme (str): "symmetric" or "affine", quantize to signed / unsigned integer. Defaults to "symmetric".
quantization_formula (str): Support "google" or "cambricon".
per_layer_quantization (bool): True or False, means per-layer / per-channel quantization. Defaults to True.
name (Optional[str]): This operator's name. Defaults to None.
Returns:
Tuple[oneflow._oneflow_internal.BlobDesc, oneflow._oneflow_internal.BlobDesc]: The scale and zero_point of input tensor.
For example:
.. code-block:: python
import oneflow.compatible.single_client as flow
import numpy as np
import oneflow.compatible.single_client.typing as tp
@flow.global_function(type="predict", function_config=flow.FunctionConfig())
def QuantizeJob(
input: tp.Numpy.Placeholder(input_shape, dtype=type_name_to_flow_type[dtype])
): tp.Numpy
with flow.scope.placement(device_type, "0:0"):
scale, zero_point = flow.quantization.min_max_observer(
input, quantization_bit=8,
quantization_scheme="symmetric",
quantization_formula="google",
per_layer_quantization=True
)
return scale, zero_point
input = (np.random.random(input_shape) - 0.5).astype(type_name_to_np_type[dtype])
scale, zero_point = QuantizeJob(input)
"""
if quantization_formula == "cambricon" and not per_layer_quantization:
raise NotImplementedError(
"per-channel mode is not supported in cambricon scheme"
)
scale, zero_point = (
flow.user_op_builder(
name if name is not None else id_util.UniqueStr("MinMaxObserver_")
)
.Op("min_max_observer")
.Input("in", [input])
.Output("scale")
.Output("zero_point")
.Attr("quantization_bit", quantization_bit)
.Attr("quantization_scheme", quantization_scheme)
.Attr("quantization_formula", quantization_formula)
.Attr("per_layer_quantization", per_layer_quantization)
.Build()
.InferAndTryRun()
.RemoteBlobList()
)
return scale, zero_point
@oneflow_export("quantization.moving_average_min_max_observer")
def moving_average_min_max_observer(
input: oneflow._oneflow_internal.BlobDesc,
quantization_bit: int = 8,
quantization_scheme: str = "symmetric",
quantization_formula: str = "google",
momentum: float = 0.95,
name: Optional[str] = None,
) -> Tuple[oneflow._oneflow_internal.BlobDesc, oneflow._oneflow_internal.BlobDesc]:
r"""Compute the quantization parameters based on the moving average of the input tensor's min and max values.
First compute the moving\_max and moving\_min value of input tensor:
if quantization_scheme == "symmetric":
.. math::
& moving\_max = moving\_max * momentum + |max(input)| * (1 - momentum)
& moving\_min = moving\_max
elif quantization_scheme == "affine":
.. math::
& moving\_max = moving\_max * momentum + max(input) * (1 - momentum)
& moving\_min = moving\_min * momentum + min(input) * (1 - momentum)
The moving average of min and max values are initialized as the first batch of input `Blob`'s min and max.
Then compute the scale and zero_point with the following equations:
if quantization_scheme == "symmetric":
.. math::
& denom = 2^{quantization\_to\_bit - 1} - 1
& scale = moving\_max / denom
& zero\_point = 0
elif quantization_scheme == "affine":
.. math::
& denom = 2^{quantization\_to\_bit} - 1
& scale = (moving\_max - moving\_min) / denom
& zero\_point = -moving\_min / scale
Args:
input (oneflow._oneflow_internal.BlobDesc): input tensor.
quantization_bit (int): Quantize input to uintX / intX, X can be in range [2, 8]. Defaults to 8.
quantization_scheme (str): "symmetric" or "affine", quantize to signed / unsigned integer. Defaults to "symmetric".
quantization_formula (str): Support "google" or "cambricon".
momentum (float): Smoothing parameter for exponential moving average operation. Defaults to 0.95.
name (Optional[str]): This operator's name. Defaults to None.
Returns:
Tuple[oneflow._oneflow_internal.BlobDesc, oneflow._oneflow_internal.BlobDesc]: The scale and zero_point of input tensor.
For example:
.. code-block:: python
import oneflow.compatible.single_client as flow
import numpy as np
import oneflow.compatible.single_client.typing as tp
@flow.global_function(type="predict", function_config=flow.FunctionConfig())
def QuantizeJob(
input: tp.Numpy.Placeholder(input_shape, dtype=type_name_to_flow_type[dtype])
): tp.Numpy
with flow.scope.placement(device_type, "0:0"):
scale, zero_point = flow.quantization.moving_average_min_max_observer(
input, quantization_bit=8,
quantization_scheme="symmetric",
quantization_formula="google",
momentum=0.95
)
return scale, zero_point
input = (np.random.random(input_shape) - 0.5).astype(type_name_to_np_type[dtype])
scale, zero_point = QuantizeJob(input)
"""
op_name = (
name if name is not None else id_util.UniqueStr("MovingAverageMinMaxObserver_")
)
training = True if flow.current_global_function_desc().IsTrainable() else False
with flow.scope.namespace(op_name):
moving_max = flow.get_variable(
"moving_max",
shape=(1,),
dtype=input.dtype,
initializer=flow.zeros_initializer(input.dtype),
trainable=False,
)
moving_min = flow.get_variable(
"moving_min",
shape=(1,),
dtype=input.dtype,
initializer=flow.zeros_initializer(input.dtype),
trainable=False,
)
current_train_step = flow.get_variable(
"current_train_step",
shape=(1,),
dtype=flow.int64,
initializer=flow.zeros_initializer(flow.int64),
trainable=False,
)
stop_update_after_iters = 1
scale, zero_point = (
flow.user_op_builder(op_name)
.Op("moving_average_min_max_observer")
.Input("in", [input])
.Input("current_train_step", [current_train_step])
.Input("moving_max", [moving_max])
.Input("moving_min", [moving_min])
.Output("scale")
.Output("zero_point")
.Attr("training", training)
.Attr("stop_update_after_iters", stop_update_after_iters)
.Attr("quantization_bit", quantization_bit)
.Attr("quantization_scheme", quantization_scheme)
.Attr("quantization_formula", quantization_formula)
.Attr("momentum", momentum)
.Build()
.InferAndTryRun()
.RemoteBlobList()
)
return scale, zero_point
@oneflow_export("quantization.fake_quantization")
def fake_quantization(
input: oneflow._oneflow_internal.BlobDesc,
scale: oneflow._oneflow_internal.BlobDesc,
zero_point: oneflow._oneflow_internal.BlobDesc,
quantization_bit: int = 8,
quantization_scheme: str = "symmetric",
quantization_formula: str = "google",
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
r"""Simulate the quantize and dequantize operations in training time.
The output will be computed as:
if quantization_scheme == "symmetric":
.. math::
& quant\_max = 2^{quantization\_to\_bit - 1} - 1
& quant\_min = -quant\_max
& clamp(round(x / scale), quant\_min, quant\_max) * scale
elif quantization_scheme == "affine":
.. math::
& quant\_max = 2^{quantization\_to\_bit} - 1
& quant\_min = 0
& (clamp(round(x / scale + zero\_point), quant\_min, quant\_max) - zero\_point) * scale
Args:
input (oneflow._oneflow_internal.BlobDesc): input tensor.
scale (oneflow._oneflow_internal.BlobDesc): Computed by min_max_observer or moving_average_min_max_observer op.
zero_point (oneflow._oneflow_internal.BlobDesc): Computed by min_max_observer or moving_average_min_max_observer op.
quantization_bit (int): Quantize input to uintX / intX, X can be in range [2, 8]. Defaults to 8.
quantization_scheme (str): "symmetric" or "affine", quantize to signed / unsigned integer. Defaults to "symmetric".
quantization_formula (str): Support "google" or "cambricon".
name (Optional[str]): This operator's name. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: Input tensor after quantize and dequantize operations.
For example:
.. code-block:: python
import oneflow.compatible.single_client as flow
import numpy as np
import oneflow.compatible.single_client.typing as tp
@flow.global_function(type="predict", function_config=flow.FunctionConfig())
def QuantizeJob(
input: tp.Numpy.Placeholder(input_shape, dtype=type_name_to_flow_type[dtype])
): tp.Numpy
with flow.scope.placement(device_type, "0:0"):
scale, zero_point = flow.quantization.min_max_observer(
input, quantization_bit=8,
quantization_scheme="symmetric",
quantization_formula="google",
per_layer_quantization=True
)
fake_quantize_out = flow.quantization.fake_quantization(
input, scale, zero_point,
quantization_bit=8,
quantization_scheme="symmetric",
quantization_formula="google"
)
return fake_quantize_out
input = (np.random.random(input_shape) - 0.5).astype(type_name_to_np_type[dtype])
fake_quantize_out = QuantizeJob(input)
"""
return (
flow.user_op_builder(
name if name is not None else id_util.UniqueStr("Fake_Quantization_")
)
.Op("fake_quantization")
.Input("in", [input])
.Input("scale", [scale])
.Input("zero_point", [zero_point])
.Output("out")
.Attr("quantization_bit", quantization_bit)
.Attr("quantization_scheme", quantization_scheme)
.Attr("quantization_formula", quantization_formula)
.Build()
.InferAndTryRun()
.SoleOutputBlob()
)
| [
"oneflow.compatible.single_client.user_op_builder",
"oneflow.compatible.single_client.scope.namespace",
"oneflow.compatible.single_client.python.oneflow_export.oneflow_export",
"oneflow.compatible.single_client.current_global_function_desc",
"oneflow.compatible.single_client.python.framework.id_util.UniqueS... | [((1020, 1067), 'oneflow.compatible.single_client.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""quantization.min_max_observer"""'], {}), "('quantization.min_max_observer')\n", (1034, 1067), False, 'from oneflow.compatible.single_client.python.oneflow_export import oneflow_export\n'), ((4683, 4745), 'oneflow.compatible.single_client.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""quantization.moving_average_min_max_observer"""'], {}), "('quantization.moving_average_min_max_observer')\n", (4697, 4745), False, 'from oneflow.compatible.single_client.python.oneflow_export import oneflow_export\n'), ((9653, 9701), 'oneflow.compatible.single_client.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""quantization.fake_quantization"""'], {}), "('quantization.fake_quantization')\n", (9667, 9701), False, 'from oneflow.compatible.single_client.python.oneflow_export import oneflow_export\n'), ((8005, 8054), 'oneflow.compatible.single_client.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""MovingAverageMinMaxObserver_"""'], {}), "('MovingAverageMinMaxObserver_')\n", (8022, 8054), True, 'from oneflow.compatible.single_client.python.framework import id_util as id_util\n'), ((8156, 8185), 'oneflow.compatible.single_client.scope.namespace', 'flow.scope.namespace', (['op_name'], {}), '(op_name)\n', (8176, 8185), True, 'from oneflow.compatible import single_client as flow\n'), ((8085, 8120), 'oneflow.compatible.single_client.current_global_function_desc', 'flow.current_global_function_desc', ([], {}), '()\n', (8118, 8120), True, 'from oneflow.compatible import single_client as flow\n'), ((8332, 8367), 'oneflow.compatible.single_client.zeros_initializer', 'flow.zeros_initializer', (['input.dtype'], {}), '(input.dtype)\n', (8354, 8367), True, 'from oneflow.compatible import single_client as flow\n'), ((8553, 8588), 'oneflow.compatible.single_client.zeros_initializer', 'flow.zeros_initializer', (['input.dtype'], {}), '(input.dtype)\n', (8575, 8588), True, 'from oneflow.compatible import single_client as flow\n'), ((8789, 8823), 'oneflow.compatible.single_client.zeros_initializer', 'flow.zeros_initializer', (['flow.int64'], {}), '(flow.int64)\n', (8811, 8823), True, 'from oneflow.compatible import single_client as flow\n'), ((4177, 4213), 'oneflow.compatible.single_client.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""MinMaxObserver_"""'], {}), "('MinMaxObserver_')\n", (4194, 4213), True, 'from oneflow.compatible.single_client.python.framework import id_util as id_util\n'), ((12779, 12818), 'oneflow.compatible.single_client.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Fake_Quantization_"""'], {}), "('Fake_Quantization_')\n", (12796, 12818), True, 'from oneflow.compatible.single_client.python.framework import id_util as id_util\n'), ((8930, 8959), 'oneflow.compatible.single_client.user_op_builder', 'flow.user_op_builder', (['op_name'], {}), '(op_name)\n', (8950, 8959), True, 'from oneflow.compatible import single_client as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from typing import Optional
import oneflow as flow
import oneflow.python.framework.id_util as id_util
import oneflow.python.framework.remote_blob as remote_blob_util
from oneflow.python.oneflow_export import oneflow_export
@oneflow_export("sort")
def sort(
input: remote_blob_util.BlobDef,
direction: str = "ASCENDING",
name: Optional[str] = None,
) -> remote_blob_util.BlobDef:
assert direction in ["ASCENDING", "DESCENDING"]
return (
flow.user_op_builder(name if name is not None else id_util.UniqueStr("Sort_"))
.Op("sort")
.Input("in", [input])
.Output("out")
.Attr("direction", direction)
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("argsort")
def argsort(
input: remote_blob_util.BlobDef,
direction: str = "ASCENDING",
name: Optional[str] = None,
) -> remote_blob_util.BlobDef:
assert direction in ["ASCENDING", "DESCENDING"]
return (
flow.user_op_builder(
name if name is not None else id_util.UniqueStr("ArgSort_")
)
.Op("arg_sort")
.Input("in", [input])
.Output("out")
.Attr("direction", direction)
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
| [
"oneflow.python.oneflow_export.oneflow_export",
"oneflow.python.framework.id_util.UniqueStr"
] | [((857, 879), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""sort"""'], {}), "('sort')\n", (871, 879), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((1368, 1393), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""argsort"""'], {}), "('argsort')\n", (1382, 1393), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((1148, 1174), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Sort_"""'], {}), "('Sort_')\n", (1165, 1174), True, 'import oneflow.python.framework.id_util as id_util\n'), ((1678, 1707), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""ArgSort_"""'], {}), "('ArgSort_')\n", (1695, 1707), True, 'import oneflow.python.framework.id_util as id_util\n')] |
import oneflow as flow
from oneflow_gpt.config import get_args
_DIST_UTIL = None
def _merge_devices(devices):
node_devices = dict()
for node_id, device_id in devices:
if node_id not in node_devices:
node_devices[node_id] = []
node_devices[node_id].append(device_id)
return node_devices
class _DistributeUtil(object):
def __init__(self):
args = get_args()
self._init_parallel_size(args)
self._init_placement_group(args)
self._init_parallel_hierarchy()
def _init_parallel_size(self, args):
self.world_size_ = args.num_gpus_per_node * args.num_nodes
# tensor model parallel size.
self.tmp_size_ = min(args.tensor_model_parallel_size, self.world_size_)
assert self.world_size_ % self.tmp_size_ == 0, (
f"world size ({self.world_size_}) is not divisible by"
f" tensor model parallel size ({self.tmp_size_})"
)
ws = self.world_size_ // args.tensor_model_parallel_size
# pipeline model parallel size.
self.pmp_size_ = min(args.pipeline_model_parallel_size, ws)
self.mp_size_ = self.pmp_size_ * self.tmp_size_
assert self.world_size_ % self.mp_size_ == 0, (
f"world size ({self.world_size_}) is not divisible by"
f" tensor model parallel size ({self.tmp_size_}) times"
f" pipeline model paralle size ({self.pmp_size_})"
)
# data parallel size
self.dp_size_ = self.world_size_ // self.mp_size_
def _init_placement_group(self, args):
node_ids = [i // args.num_gpus_per_node for i in range(self.world_size_)]
device_ids = list(range(args.num_gpus_per_node)) * args.num_nodes
# [(0, 0), (0, 1), (0, 2), (0, 3), (1, 0), (1, 1), (1, 2), (1, 3)]
devices = [(n, d) for n, d in zip(node_ids, device_ids)]
num_devices_per_stage = self.world_size_ // self.pmp_size_
stages_devices = [
_merge_devices(devices[i : (i + num_devices_per_stage)])
for i in range(0, self.world_size_, num_devices_per_stage)
]
assert args.num_layers % self.pmp_size_ == 0, (
f"number of layers ({args.num_layers}) is not divisible by"
f" pipeline model parallel size ({self.pmp_size_})"
)
num_layers_per_stage = args.num_layers // self.pmp_size_
self.layers_stage_ids_ = [
i // num_layers_per_stage for i in range(args.num_layers)
]
self.layers_devices_ = [
stages_devices[stage_id] for stage_id in self.layers_stage_ids_
]
def _init_parallel_hierarchy(self):
if self.is_data_model_parallel():
self.parallel_hierarchy_ = (self.dp_size_, self.tmp_size_)
else:
self.parallel_hierarchy_ = None
@property
def parallel_hierarchy(self):
return self.parallel_hierarchy_
@property
def tensor_model_parallel_size(self):
return self.tmp_size_
@property
def pipeline_model_parallel_size(self):
return self.pmp_size_
@property
def model_parallel_size(self):
return self.tmp_size_ * self.pmp_size_
@property
def data_parallel_size(self):
return self.dp_size_
def get_layer_devices(self, layer_idx):
return self.layers_devices_[layer_idx]
def get_layer_stage_id(self, layer_idx):
return self.layers_stage_ids_[layer_idx]
def is_tensor_model_parallel(self):
return self.tensor_model_parallel_size > 1
def is_data_parallel(self):
return self.data_parallel_size > 1
def is_pipeline_model_parallel(self):
return self.pipeline_model_parallel_size > 1
def is_data_model_parallel(self):
return self.is_tensor_model_parallel() and self.is_data_parallel()
def is_non_data_model_parallel(self):
return not self.is_tensor_model_parallel() and not self.is_data_parallel()
def get_dist_util():
global _DIST_UTIL
if _DIST_UTIL is None:
_DIST_UTIL = _DistributeUtil()
return _DIST_UTIL
def get_layer_placement(layer_idx, device_type="cuda"):
dist_util = get_dist_util()
return flow.placement(
device_type,
dist_util.get_layer_devices(layer_idx),
dist_util.parallel_hierarchy,
)
def get_nd_sbp(sbp_list):
assert isinstance(sbp_list, list)
assert len(sbp_list) == 2
assert all(isinstance(sbp, flow.sbp.sbp) for sbp in sbp_list)
dist_util = get_dist_util()
if dist_util.is_data_model_parallel():
return sbp_list
elif dist_util.is_data_parallel():
return sbp_list[:1]
elif dist_util.is_tensor_model_parallel():
return sbp_list[1:]
elif dist_util.is_non_data_model_parallel():
return [flow.sbp.broadcast]
else:
raise NotImplementedError
def get_hidden_sbp():
return get_nd_sbp([flow.sbp.split(0), flow.sbp.broadcast])
| [
"oneflow.sbp.split"
] | [((404, 414), 'oneflow_gpt.config.get_args', 'get_args', ([], {}), '()\n', (412, 414), False, 'from oneflow_gpt.config import get_args\n'), ((4916, 4933), 'oneflow.sbp.split', 'flow.sbp.split', (['(0)'], {}), '(0)\n', (4930, 4933), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
import oneflow as flow
import oneflow.unittest
import torch as pytorch
import torchvision
from oneflow.test_utils.automated_test_util import *
def _get_np_rois():
random_img_idx = np.asarray(
[random(0, 2).to(int).value() for _ in range(200)]
).reshape((200, 1))
random_box_idx = np.asarray(
[random(0, 64 * 64).to(float).value() for _ in range(400)]
).reshape((200, 2))
def get_h_w(idx1, idx2):
if idx1 > idx2:
idx1, idx2 = idx2, idx1
h1 = idx1 // 64
w1 = idx1 % 64
h2 = idx2 // 64
w2 = idx2 % 64
return [x / 2 for x in [h1, w1, h2, w2]]
zipped = zip(random_box_idx[:, 0], random_box_idx[:, 1])
concated = [get_h_w(idx1, idx2) for (idx1, idx2) in zipped]
concated = np.array(concated)
rois = np.hstack((random_img_idx, concated)).astype(np.float32)
return rois
def _test_roi_align(test_case, placement, rois_sbp):
dims = [8, 8, 64, 64]
x = random_tensor(4, *dims).to_global(
placement=placement,
sbp=[flow.sbp.broadcast for _ in range(len(placement.ranks.shape))],
)
x.oneflow = x.oneflow.detach().requires_grad_()
x.pytorch = x.pytorch.detach().requires_grad_()
def get_h_w(idx1, idx2):
if idx1 > idx2:
idx1, idx2 = idx2, idx1
h1 = idx1 // 64
w1 = idx1 % 64
h2 = idx2 // 64
w2 = idx2 % 64
return [x / 2 for x in [h1, w1, h2, w2]]
np_rois = _get_np_rois()
of_rois = (
flow.tensor(np_rois, dtype=flow.float)
.to_global(
placement=flow.env.all_device_placement("cpu"), sbp=[flow.sbp.broadcast,]
)
.to_global(placement, rois_sbp)
)
torch_rois = pytorch.tensor(np_rois)
of_out = flow.roi_align(x.oneflow, of_rois, 2.0, 14, 14, 2, True)
torch_out = torchvision.ops.roi_align(
x.pytorch,
torch_rois,
spatial_scale=2.0,
output_size=[14, 14],
sampling_ratio=2,
aligned=True,
)
# compare output
of_local = of_out.to_global(
placement=flow.env.all_device_placement("cpu"), sbp=[flow.sbp.broadcast,]
).to_local()
test_case.assertTrue(
np.allclose(
of_local.numpy(), torch_out.detach().cpu().numpy(), rtol=1e-04, atol=1e-4
)
)
# compare backward
of_out.sum().backward()
torch_out.sum().backward()
of_input_grad = x.oneflow.grad.to_global(
placement=flow.env.all_device_placement("cpu"), sbp=[flow.sbp.broadcast,]
).to_local()
torch_input_grad = x.pytorch.grad.detach().cpu()
test_case.assertTrue(
np.allclose(
of_input_grad.numpy(), torch_input_grad.numpy(), rtol=1e-04, atol=1e-4
)
)
def _test_roi_align_in_fixed_data_impl(test_case, placement, sbp):
from test_roi_align import input_np, rois_np, input_grad_np
input = (
flow.tensor(input_np, dtype=flow.float32)
.to_global(flow.env.all_device_placement("cpu"), [flow.sbp.broadcast,])
.to_global(placement, sbp)
.requires_grad_()
)
rois = (
flow.tensor(rois_np, dtype=flow.float32)
.to_global(flow.env.all_device_placement("cpu"), [flow.sbp.broadcast,])
.to_global(
placement, [flow.sbp.broadcast for _ in range(len(placement.ranks.shape))]
)
)
of_out = flow.roi_align(input, rois, 2.0, 5, 5, 2, True)
of_out.sum().backward()
test_case.assertTrue(
np.allclose(input.grad.numpy(), input_grad_np, rtol=1e-04, atol=1e-4)
)
class TestConsistentRoiAlign(flow.unittest.TestCase):
# TODO(wyg): It is a bug in pytorch-1.9.0, torchvision-0.10.0 and python3.7.10.
# Open this test after updating the versions of pytorch in CI.
# @globaltest
# def test_consistent_roi_align(test_case):
# for placement in all_placement():
# # TODO: roi_align only support gpu
# if placement.type == "cpu":
# continue
# for rois_sbp in all_sbp(placement, max_dim=0, except_partial_sum=True):
# _test_roi_align(test_case, placement, rois_sbp)
def test_consistent_roi_align_in_fixed_data(test_case):
for placement in all_placement():
# TODO: roi_align only support gpu
if placement.type == "cpu":
continue
for rois_sbp in all_sbp(placement, max_dim=0, except_partial_sum=True):
_test_roi_align_in_fixed_data_impl(test_case, placement, rois_sbp)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.roi_align",
"oneflow.tensor",
"oneflow.env.all_device_placement"
] | [((1406, 1424), 'numpy.array', 'np.array', (['concated'], {}), '(concated)\n', (1414, 1424), True, 'import numpy as np\n'), ((2354, 2377), 'torch.tensor', 'pytorch.tensor', (['np_rois'], {}), '(np_rois)\n', (2368, 2377), True, 'import torch as pytorch\n'), ((2392, 2448), 'oneflow.roi_align', 'flow.roi_align', (['x.oneflow', 'of_rois', '(2.0)', '(14)', '(14)', '(2)', '(True)'], {}), '(x.oneflow, of_rois, 2.0, 14, 14, 2, True)\n', (2406, 2448), True, 'import oneflow as flow\n'), ((2465, 2590), 'torchvision.ops.roi_align', 'torchvision.ops.roi_align', (['x.pytorch', 'torch_rois'], {'spatial_scale': '(2.0)', 'output_size': '[14, 14]', 'sampling_ratio': '(2)', 'aligned': '(True)'}), '(x.pytorch, torch_rois, spatial_scale=2.0,\n output_size=[14, 14], sampling_ratio=2, aligned=True)\n', (2490, 2590), False, 'import torchvision\n'), ((3995, 4042), 'oneflow.roi_align', 'flow.roi_align', (['input', 'rois', '(2.0)', '(5)', '(5)', '(2)', '(True)'], {}), '(input, rois, 2.0, 5, 5, 2, True)\n', (4009, 4042), True, 'import oneflow as flow\n'), ((5203, 5218), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5216, 5218), False, 'import unittest\n'), ((1436, 1473), 'numpy.hstack', 'np.hstack', (['(random_img_idx, concated)'], {}), '((random_img_idx, concated))\n', (1445, 1473), True, 'import numpy as np\n'), ((3798, 3834), 'oneflow.env.all_device_placement', 'flow.env.all_device_placement', (['"""cpu"""'], {}), "('cpu')\n", (3827, 3834), True, 'import oneflow as flow\n'), ((2136, 2174), 'oneflow.tensor', 'flow.tensor', (['np_rois'], {'dtype': 'flow.float'}), '(np_rois, dtype=flow.float)\n', (2147, 2174), True, 'import oneflow as flow\n'), ((2217, 2253), 'oneflow.env.all_device_placement', 'flow.env.all_device_placement', (['"""cpu"""'], {}), "('cpu')\n", (2246, 2253), True, 'import oneflow as flow\n'), ((2715, 2751), 'oneflow.env.all_device_placement', 'flow.env.all_device_placement', (['"""cpu"""'], {}), "('cpu')\n", (2744, 2751), True, 'import oneflow as flow\n'), ((3092, 3128), 'oneflow.env.all_device_placement', 'flow.env.all_device_placement', (['"""cpu"""'], {}), "('cpu')\n", (3121, 3128), True, 'import oneflow as flow\n'), ((3738, 3778), 'oneflow.tensor', 'flow.tensor', (['rois_np'], {'dtype': 'flow.float32'}), '(rois_np, dtype=flow.float32)\n', (3749, 3778), True, 'import oneflow as flow\n'), ((3589, 3625), 'oneflow.env.all_device_placement', 'flow.env.all_device_placement', (['"""cpu"""'], {}), "('cpu')\n", (3618, 3625), True, 'import oneflow as flow\n'), ((3528, 3569), 'oneflow.tensor', 'flow.tensor', (['input_np'], {'dtype': 'flow.float32'}), '(input_np, dtype=flow.float32)\n', (3539, 3569), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
import oneflow as flow
from test_util import GenArgList, type_name_to_flow_type, type_name_to_np_type
import oneflow.typing as tp
import os
ninf = -float("inf")
def _logsumexp(a, b):
if a < b:
a, b = b, a
if b == ninf:
return a
else:
return a + np.log(1 + np.exp(b - a))
def logsumexp(*args):
res = args[0]
for e in args[1:]:
res = _logsumexp(res, e)
return res
def log_softmax(logits, axis=0):
max_value = np.max(logits, axis, keepdims=True)
exp = np.exp(logits - max_value)
exp_sum = np.sum(exp, axis, keepdims=True)
dist = exp / exp_sum
return np.log(dist)
def get_target_prime(targets, b, s, blank):
if s % 2 == 0:
return blank
else:
return targets[b, s // 2]
def ctc_loss_np(log_probs, targets, input_lengths, target_lengths, blank=0):
max_input_length, batch_size, _ = log_probs.shape
_, max_target_length = targets.shape
loss = np.zeros(batch_size)
alpha = np.zeros([batch_size, max_input_length, 2 * max_target_length + 1])
alpha[:, 0] = ninf
for b in range(0, batch_size):
input_length = input_lengths[b]
target_length = target_lengths[b]
alpha[b, 0, 0] = log_probs[0, b, blank]
if target_length > 0:
current_target_prime = get_target_prime(targets, b, 1, blank)
alpha[b, 0, 1] = log_probs[0, b, current_target_prime]
for t in range(1, input_length):
for s in range(0, 2 * target_length + 1):
current_target_prime = get_target_prime(targets, b, s, blank)
la1 = alpha[b, t - 1, s]
if s > 0:
la2 = alpha[b, t - 1, s - 1]
else:
la2 = ninf
if (
s > 1
and get_target_prime(targets, b, s - 2, blank)
!= current_target_prime
):
la3 = alpha[b, t - 1, s - 2]
else:
la3 = ninf
alpha[b, t, s] = (
logsumexp(la1, la2, la3) + log_probs[t, b, current_target_prime]
)
if target_length == 0:
loss[b] = -alpha[b, input_length - 1, 0]
else:
l1 = alpha[b, input_length - 1, target_length * 2]
l2 = alpha[b, input_length - 1, target_length * 2 - 1]
loss[b] = -logsumexp(l1, l2)
return loss, alpha
def ctc_loss_grad_np(
grad_out,
loss,
alpha,
log_probs,
targets,
input_lengths,
target_lengths,
blank=0,
zero_infinity=False,
):
max_input_length, batch_size, num_labels = log_probs.shape
_, max_target_length = targets.shape
beta = np.zeros([batch_size, max_input_length, 2 * max_target_length + 1])
grad = np.zeros(log_probs.shape, dtype=log_probs.dtype)
grad.fill(ninf)
for b in range(0, batch_size):
input_length = input_lengths[b]
target_length = target_lengths[b]
nll = loss[b]
if zero_infinity and nll == float("inf"):
grad[:, b, :] = 0
continue
if input_length > 0:
beta[b, input_length - 1, :] = ninf
beta[b, input_length - 1, 2 * target_length] = log_probs[
input_length - 1, b, blank
]
grad[input_length - 1, b, blank] = (
alpha[b, input_length - 1, 2 * target_length]
+ beta[b, input_length - 1, 2 * target_length]
)
if target_length > 0:
current_target_prime = get_target_prime(
targets, b, 2 * target_length - 1, blank
)
beta[b, input_length - 1, 2 * target_length - 1] = log_probs[
input_length - 1, b, current_target_prime
]
grad[input_length - 1, b, current_target_prime] = (
alpha[b, input_length - 1, 2 * target_length - 1]
+ beta[b, input_length - 1, 2 * target_length - 1]
)
for t in range(input_length - 2, -1, -1):
for s in range(2 * target_length, -1, -1):
current_target_prime = get_target_prime(targets, b, s, blank)
lb1 = beta[b, t + 1, s]
if s < 2 * target_length:
lb2 = beta[b, t + 1, s + 1]
else:
lb2 = ninf
if (
s < 2 * target_length - 1
and get_target_prime(targets, b, s + 2, blank)
!= current_target_prime
):
lb3 = beta[b, t + 1, s + 2]
else:
lb3 = ninf
beta[b, t, s] = (
logsumexp(lb1, lb2, lb3) + log_probs[t, b, current_target_prime]
)
alpha_beta = alpha[b, t, s] + beta[b, t, s]
lcab = grad[t, b, current_target_prime]
if lcab == ninf:
grad[t, b, current_target_prime] = alpha_beta
else:
grad[t, b, current_target_prime] = logsumexp(lcab, alpha_beta)
for t in range(0, input_length):
for c in range(0, num_labels):
res = grad[t, b, c]
lp = log_probs[t, b, c]
grad[t, b, c] = (np.exp(lp) - np.exp(res + nll - lp)) * grad_out[b]
if input_length < max_input_length:
grad[input_length:max_input_length, b] = 0
return grad
def compare_with_np(
device_type,
device_num,
data_type,
max_input_length,
batch_size,
num_classes,
max_target_length,
blank,
reduction,
zero_infinity,
):
assert data_type in ["float32", "double"]
assert flow.is_valid_device_tag(device_type)
assert reduction in ["none", "mean", "sum"]
assert zero_infinity in [False, True]
flow.clear_default_session()
if device_type == "cpu":
flow.config.cpu_device_num(device_num)
else:
flow.config.gpu_device_num(device_num)
flow_data_type = type_name_to_flow_type[data_type]
np_data_type = type_name_to_np_type[data_type]
func_config = flow.FunctionConfig()
func_config.default_logical_view(flow.scope.consistent_view())
func_config.default_data_type(flow_data_type)
func_config.default_placement_scope(
flow.scope.placement(device_type, "0:0-{}".format(device_num - 1))
)
log_probs = np.random.random(
size=(max_input_length, batch_size, num_classes)
).astype(np_data_type)
log_probs = log_softmax(log_probs, axis=2)
targets = np.random.randint(
1, high=num_classes, size=(batch_size, max_target_length), dtype=np.int32
)
input_lengths = np.random.randint(
max_input_length / 2, high=max_input_length, size=(batch_size,), dtype=np.int32
)
target_lengths = np.random.randint(
max_target_length / 2,
high=max_target_length,
size=(batch_size,),
dtype=np.int32,
)
np_loss, np_alpha = ctc_loss_np(
log_probs, targets, input_lengths, target_lengths, blank
)
np_out = np.where(np_loss == float("inf"), 0, np_loss) if zero_infinity else np_loss
if reduction == "mean":
np_out = np.mean(
np.divide(
np_out, np.clip(target_lengths, 1, a_max=None).astype(np_data_type)
)
)
elif reduction == "sum":
np_out = np.sum(np_out)
np_grad_out = np.ones_like(np_loss, dtype=np_data_type)
if reduction == "mean":
np_grad_out = np.divide(
np_grad_out, np.clip(target_lengths, 1, a_max=None).astype(np_data_type)
)
np_grad_out /= target_lengths.size
np_grad = ctc_loss_grad_np(
np_grad_out,
np_loss,
np_alpha,
log_probs,
targets,
input_lengths,
target_lengths,
blank,
zero_infinity,
)
def assert_loss_grad(blob: tp.Numpy):
assert np.allclose(blob, np_grad, atol=1e-5, equal_nan=True)
@flow.global_function(type="train", function_config=func_config)
def ctc_loss_job(
log_probs: tp.Numpy.Placeholder(
shape=(max_input_length, batch_size, num_classes), dtype=flow_data_type
),
targets: tp.Numpy.Placeholder(
shape=(batch_size, max_target_length), dtype=flow.int32
),
input_lengths: tp.Numpy.Placeholder(shape=(batch_size,), dtype=flow.int32),
target_lengths: tp.Numpy.Placeholder(shape=(batch_size,), dtype=flow.int32),
) -> tp.Numpy:
with flow.scope.placement(device_type, "0:0"):
v = flow.get_variable(
shape=log_probs.shape,
dtype=flow_data_type,
initializer=flow.zeros_initializer(),
name="x_var",
)
x_var = log_probs + v
flow.watch_diff(x_var, assert_loss_grad)
loss = flow.ctc_loss(
x_var,
targets,
input_lengths,
target_lengths,
blank,
reduction,
zero_infinity,
)
with flow.scope.placement(device_type, "0:0"):
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [1e-3]), momentum=0
).minimize(loss)
return loss
of_out = ctc_loss_job(log_probs, targets, input_lengths, target_lengths)
assert np.allclose(of_out, np_out, atol=1e-5)
def gen_arg_list(type):
arg_dict = OrderedDict()
if type == "1n2d":
arg_dict["device_type"] = ["gpu"]
arg_dict["device_num"] = [2]
else:
arg_dict["device_type"] = ["cpu", "gpu"]
arg_dict["device_num"] = [1]
arg_dict["data_type"] = ["float32"]
arg_dict["max_input_length"] = [20]
arg_dict["batch_size"] = [4]
arg_dict["num_classes"] = [5]
arg_dict["max_target_length"] = [10]
arg_dict["blank"] = [0, 4] # 0 <= blank < num_classes
arg_dict["reduction"] = ["mean", "none"]
arg_dict["zero_infinity"] = [False, True]
return GenArgList(arg_dict)
@flow.unittest.skip_unless_1n1d()
class TestCTCLoss1n1d(flow.unittest.TestCase):
def test_ctc_loss(test_case):
for arg in gen_arg_list("1n1d"):
compare_with_np(*arg)
@flow.unittest.skip_unless_1n2d()
class TestCTCLoss1n2d(flow.unittest.TestCase):
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_ctc_loss(test_case):
for arg in gen_arg_list("1n2d"):
compare_with_np(*arg)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.scope.consistent_view",
"oneflow.typing.Numpy.Placeholder",
"oneflow.is_valid_device_tag",
"oneflow.clear_default_session",
"oneflow.unittest.skip_unless_1n1d",
"oneflow.config.cpu_device_num",
"oneflow.unittest.skip_unless_1n2d",
"oneflow.config.gpu_device_num",
"oneflow.watch_diff",
"on... | [((10850, 10882), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (10880, 10882), True, 'import oneflow as flow\n'), ((11042, 11074), 'oneflow.unittest.skip_unless_1n2d', 'flow.unittest.skip_unless_1n2d', ([], {}), '()\n', (11072, 11074), True, 'import oneflow as flow\n'), ((1136, 1171), 'numpy.max', 'np.max', (['logits', 'axis'], {'keepdims': '(True)'}), '(logits, axis, keepdims=True)\n', (1142, 1171), True, 'import numpy as np\n'), ((1182, 1208), 'numpy.exp', 'np.exp', (['(logits - max_value)'], {}), '(logits - max_value)\n', (1188, 1208), True, 'import numpy as np\n'), ((1223, 1255), 'numpy.sum', 'np.sum', (['exp', 'axis'], {'keepdims': '(True)'}), '(exp, axis, keepdims=True)\n', (1229, 1255), True, 'import numpy as np\n'), ((1292, 1304), 'numpy.log', 'np.log', (['dist'], {}), '(dist)\n', (1298, 1304), True, 'import numpy as np\n'), ((1621, 1641), 'numpy.zeros', 'np.zeros', (['batch_size'], {}), '(batch_size)\n', (1629, 1641), True, 'import numpy as np\n'), ((1654, 1721), 'numpy.zeros', 'np.zeros', (['[batch_size, max_input_length, 2 * max_target_length + 1]'], {}), '([batch_size, max_input_length, 2 * max_target_length + 1])\n', (1662, 1721), True, 'import numpy as np\n'), ((3419, 3486), 'numpy.zeros', 'np.zeros', (['[batch_size, max_input_length, 2 * max_target_length + 1]'], {}), '([batch_size, max_input_length, 2 * max_target_length + 1])\n', (3427, 3486), True, 'import numpy as np\n'), ((3498, 3546), 'numpy.zeros', 'np.zeros', (['log_probs.shape'], {'dtype': 'log_probs.dtype'}), '(log_probs.shape, dtype=log_probs.dtype)\n', (3506, 3546), True, 'import numpy as np\n'), ((6494, 6531), 'oneflow.is_valid_device_tag', 'flow.is_valid_device_tag', (['device_type'], {}), '(device_type)\n', (6518, 6531), True, 'import oneflow as flow\n'), ((6627, 6655), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (6653, 6655), True, 'import oneflow as flow\n'), ((6913, 6934), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (6932, 6934), True, 'import oneflow as flow\n'), ((7354, 7450), 'numpy.random.randint', 'np.random.randint', (['(1)'], {'high': 'num_classes', 'size': '(batch_size, max_target_length)', 'dtype': 'np.int32'}), '(1, high=num_classes, size=(batch_size, max_target_length),\n dtype=np.int32)\n', (7371, 7450), True, 'import numpy as np\n'), ((7481, 7584), 'numpy.random.randint', 'np.random.randint', (['(max_input_length / 2)'], {'high': 'max_input_length', 'size': '(batch_size,)', 'dtype': 'np.int32'}), '(max_input_length / 2, high=max_input_length, size=(\n batch_size,), dtype=np.int32)\n', (7498, 7584), True, 'import numpy as np\n'), ((7615, 7720), 'numpy.random.randint', 'np.random.randint', (['(max_target_length / 2)'], {'high': 'max_target_length', 'size': '(batch_size,)', 'dtype': 'np.int32'}), '(max_target_length / 2, high=max_target_length, size=(\n batch_size,), dtype=np.int32)\n', (7632, 7720), True, 'import numpy as np\n'), ((8219, 8260), 'numpy.ones_like', 'np.ones_like', (['np_loss'], {'dtype': 'np_data_type'}), '(np_loss, dtype=np_data_type)\n', (8231, 8260), True, 'import numpy as np\n'), ((8794, 8857), 'oneflow.global_function', 'flow.global_function', ([], {'type': '"""train"""', 'function_config': 'func_config'}), "(type='train', function_config=func_config)\n", (8814, 8857), True, 'import oneflow as flow\n'), ((10184, 10223), 'numpy.allclose', 'np.allclose', (['of_out', 'np_out'], {'atol': '(1e-05)'}), '(of_out, np_out, atol=1e-05)\n', (10195, 10223), True, 'import numpy as np\n'), ((10264, 10277), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (10275, 10277), False, 'from collections import OrderedDict\n'), ((10826, 10846), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (10836, 10846), False, 'from test_util import GenArgList, type_name_to_flow_type, type_name_to_np_type\n'), ((11344, 11359), 'unittest.main', 'unittest.main', ([], {}), '()\n', (11357, 11359), False, 'import unittest\n'), ((6693, 6731), 'oneflow.config.cpu_device_num', 'flow.config.cpu_device_num', (['device_num'], {}), '(device_num)\n', (6719, 6731), True, 'import oneflow as flow\n'), ((6750, 6788), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['device_num'], {}), '(device_num)\n', (6776, 6788), True, 'import oneflow as flow\n'), ((6972, 7000), 'oneflow.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (6998, 7000), True, 'import oneflow as flow\n'), ((8734, 8788), 'numpy.allclose', 'np.allclose', (['blob', 'np_grad'], {'atol': '(1e-05)', 'equal_nan': '(True)'}), '(blob, np_grad, atol=1e-05, equal_nan=True)\n', (8745, 8788), True, 'import numpy as np\n'), ((9630, 9670), 'oneflow.watch_diff', 'flow.watch_diff', (['x_var', 'assert_loss_grad'], {}), '(x_var, assert_loss_grad)\n', (9645, 9670), True, 'import oneflow as flow\n'), ((9686, 9783), 'oneflow.ctc_loss', 'flow.ctc_loss', (['x_var', 'targets', 'input_lengths', 'target_lengths', 'blank', 'reduction', 'zero_infinity'], {}), '(x_var, targets, input_lengths, target_lengths, blank,\n reduction, zero_infinity)\n', (9699, 9783), True, 'import oneflow as flow\n'), ((11143, 11177), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (11152, 11177), False, 'import os\n'), ((7191, 7257), 'numpy.random.random', 'np.random.random', ([], {'size': '(max_input_length, batch_size, num_classes)'}), '(size=(max_input_length, batch_size, num_classes))\n', (7207, 7257), True, 'import numpy as np\n'), ((8185, 8199), 'numpy.sum', 'np.sum', (['np_out'], {}), '(np_out)\n', (8191, 8199), True, 'import numpy as np\n'), ((8899, 8996), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', ([], {'shape': '(max_input_length, batch_size, num_classes)', 'dtype': 'flow_data_type'}), '(shape=(max_input_length, batch_size, num_classes),\n dtype=flow_data_type)\n', (8919, 8996), True, 'import oneflow.typing as tp\n'), ((9033, 9110), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', ([], {'shape': '(batch_size, max_target_length)', 'dtype': 'flow.int32'}), '(shape=(batch_size, max_target_length), dtype=flow.int32)\n', (9053, 9110), True, 'import oneflow.typing as tp\n'), ((9157, 9216), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', ([], {'shape': '(batch_size,)', 'dtype': 'flow.int32'}), '(shape=(batch_size,), dtype=flow.int32)\n', (9177, 9216), True, 'import oneflow.typing as tp\n'), ((9242, 9301), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', ([], {'shape': '(batch_size,)', 'dtype': 'flow.int32'}), '(shape=(batch_size,), dtype=flow.int32)\n', (9262, 9301), True, 'import oneflow.typing as tp\n'), ((9335, 9375), 'oneflow.scope.placement', 'flow.scope.placement', (['device_type', '"""0:0"""'], {}), "(device_type, '0:0')\n", (9355, 9375), True, 'import oneflow as flow\n'), ((9889, 9929), 'oneflow.scope.placement', 'flow.scope.placement', (['device_type', '"""0:0"""'], {}), "(device_type, '0:0')\n", (9909, 9929), True, 'import oneflow as flow\n'), ((957, 970), 'numpy.exp', 'np.exp', (['(b - a)'], {}), '(b - a)\n', (963, 970), True, 'import numpy as np\n'), ((8347, 8385), 'numpy.clip', 'np.clip', (['target_lengths', '(1)'], {'a_max': 'None'}), '(target_lengths, 1, a_max=None)\n', (8354, 8385), True, 'import numpy as np\n'), ((9517, 9541), 'oneflow.zeros_initializer', 'flow.zeros_initializer', ([], {}), '()\n', (9539, 9541), True, 'import oneflow as flow\n'), ((6074, 6084), 'numpy.exp', 'np.exp', (['lp'], {}), '(lp)\n', (6080, 6084), True, 'import numpy as np\n'), ((6087, 6109), 'numpy.exp', 'np.exp', (['(res + nll - lp)'], {}), '(res + nll - lp)\n', (6093, 6109), True, 'import numpy as np\n'), ((8055, 8093), 'numpy.clip', 'np.clip', (['target_lengths', '(1)'], {'a_max': 'None'}), '(target_lengths, 1, a_max=None)\n', (8062, 8093), True, 'import numpy as np\n'), ((9979, 10033), 'oneflow.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[0.001]'], {}), '([], [0.001])\n', (10020, 10033), True, 'import oneflow as flow\n')] |
# coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable, Optional
import oneflow as flow
from flowvision import datasets
from libai.data.structures import DistTensorData, Instance
class CIFAR10Dataset(datasets.CIFAR10):
r"""`CIFAR10 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ Dataset in LiBai.
Args:
root (string): Root directory of dataset where directory
``cifar-10-batches-py`` exists or will be saved to if download is set to True.
train (bool, optional): If True, creates dataset from training set, otherwise
creates from test set.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If the dataset is already downloaded, it will not be
downloaded again.
"""
def __init__(
self,
root: str,
train: bool = True,
transform: Optional[Callable] = None,
download: bool = False,
**kwargs
):
super(CIFAR10Dataset, self).__init__(
root=root, train=train, transform=transform, download=download, **kwargs
)
def __getitem__(self, index: int):
img, target = super().__getitem__(index)
data_sample = Instance(
images=DistTensorData(img, placement_idx=0),
labels=DistTensorData(flow.tensor(target, dtype=flow.long), placement_idx=-1),
)
return data_sample
class CIFAR100Dataset(datasets.CIFAR100):
r"""`CIFAR100 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ Dataset in LiBai.
Args:
root (string): Root directory of dataset where directory
``cifar-10-batches-py`` exists or will be saved to if download is set to True.
train (bool, optional): If True, creates dataset from training set, otherwise
creates from test set.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If the dataset is already downloaded, it will not be
downloaded again.
dataset_name (str, optional): Name for the dataset as an identifier. E.g, ``cifar100``
"""
def __init__(
self,
root: str,
train: bool = True,
transform: Optional[Callable] = None,
download: bool = False,
**kwargs
):
super(CIFAR100Dataset, self).__init__(
root=root, train=train, transform=transform, download=download, **kwargs
)
def __getitem__(self, index: int):
img, target = super().__getitem__(index)
data_sample = Instance(
images=DistTensorData(img, placement_idx=0),
labels=DistTensorData(flow.tensor(target, dtype=flow.long), placement_idx=-1),
)
return data_sample
| [
"oneflow.tensor"
] | [((2035, 2071), 'libai.data.structures.DistTensorData', 'DistTensorData', (['img'], {'placement_idx': '(0)'}), '(img, placement_idx=0)\n', (2049, 2071), False, 'from libai.data.structures import DistTensorData, Instance\n'), ((3560, 3596), 'libai.data.structures.DistTensorData', 'DistTensorData', (['img'], {'placement_idx': '(0)'}), '(img, placement_idx=0)\n', (3574, 3596), False, 'from libai.data.structures import DistTensorData, Instance\n'), ((2107, 2143), 'oneflow.tensor', 'flow.tensor', (['target'], {'dtype': 'flow.long'}), '(target, dtype=flow.long)\n', (2118, 2143), True, 'import oneflow as flow\n'), ((3632, 3668), 'oneflow.tensor', 'flow.tensor', (['target'], {'dtype': 'flow.long'}), '(target, dtype=flow.long)\n', (3643, 3668), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
import oneflow as flow
import oneflow.unittest
from oneflow.test_utils.automated_test_util import *
from oneflow.test_utils.automated_test_util.util import broadcast
def _test_batch_gather(test_case, ndim, placement, sbp):
dims = [random(1, 3).to(int).value() * 8 for _ in range(ndim)]
x = random_tensor(ndim, *dims, requires_grad=True)
local_x = flow.tensor(x.pytorch.detach().cpu().numpy(), requires_grad=True)
global_x = x.oneflow.to_global(placement=placement, sbp=sbp)
global_x.retain_grad()
indices_ndim = random(1, ndim + 1).to(int).value()
indices_dims = [dims[i] for i in range(indices_ndim)]
indices_dims[-1] = random(1, dims[indices_ndim - 1]).to(int).value()
indices = np.random.choice(dims[indices_ndim - 1], indices_dims)
indices = broadcast(indices)
local_indices = flow.tensor(indices)
global_indices = local_indices.to_global(
placement=placement, sbp=[flow.sbp.broadcast for _ in range(len(sbp))]
)
global_out = flow.batch_gather(global_x, global_indices)
global_out.sum().backward()
local_out = flow.batch_gather(local_x, local_indices)
local_out.sum().backward()
test_case.assertTrue(
np.allclose(
global_x.grad.detach().cpu().numpy(),
local_x.grad.detach().cpu().numpy(),
atol=1e-5,
rtol=1e-5,
)
)
class TestBatchGather(flow.unittest.TestCase):
@globaltest
def test_batch_gather(test_case):
ndim = 2
for placement in all_placement():
for sbp in all_sbp(placement, max_dim=ndim):
_test_batch_gather(test_case, ndim, placement, sbp)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.batch_gather",
"oneflow.test_utils.automated_test_util.util.broadcast",
"oneflow.tensor"
] | [((1348, 1402), 'numpy.random.choice', 'np.random.choice', (['dims[indices_ndim - 1]', 'indices_dims'], {}), '(dims[indices_ndim - 1], indices_dims)\n', (1364, 1402), True, 'import numpy as np\n'), ((1417, 1435), 'oneflow.test_utils.automated_test_util.util.broadcast', 'broadcast', (['indices'], {}), '(indices)\n', (1426, 1435), False, 'from oneflow.test_utils.automated_test_util.util import broadcast\n'), ((1456, 1476), 'oneflow.tensor', 'flow.tensor', (['indices'], {}), '(indices)\n', (1467, 1476), True, 'import oneflow as flow\n'), ((1626, 1669), 'oneflow.batch_gather', 'flow.batch_gather', (['global_x', 'global_indices'], {}), '(global_x, global_indices)\n', (1643, 1669), True, 'import oneflow as flow\n'), ((1718, 1759), 'oneflow.batch_gather', 'flow.batch_gather', (['local_x', 'local_indices'], {}), '(local_x, local_indices)\n', (1735, 1759), True, 'import oneflow as flow\n'), ((2319, 2334), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2332, 2334), False, 'import unittest\n')] |
import oneflow as flow
def load_checkpoint(
model, path_to_checkpoint,
):
"""
Load the checkpoint from the given file. If inflation is True, inflate the
2D Conv weights from the checkpoint to 3D Conv.
Args:
path_to_checkpoint (string): path to the checkpoint to load.
model (model): model to load the weights from the checkpoint.
"""
checkpoint = flow.load(path_to_checkpoint)
if not isinstance(checkpoint, dict):
raise RuntimeError(
"No state_dict found in checkpoint file {}".format(path_to_checkpoint)
)
# get state_dict from checkpoint
if "state_dict" in checkpoint:
state_dict = checkpoint["state_dict"]
else:
state_dict = checkpoint
model.load_state_dict(state_dict)
return model
| [
"oneflow.load"
] | [((393, 422), 'oneflow.load', 'flow.load', (['path_to_checkpoint'], {}), '(path_to_checkpoint)\n', (402, 422), True, 'import oneflow as flow\n')] |
import oneflow as flow
import os
class OFRecordDataLoader(object):
def __init__(
self,
ofrecord_root: str = "./ofrecord",
mode: str = "train", # "val"
dataset_size: int = 9469,
batch_size: int = 1,
):
channel_last = False
output_layout = "NHWC" if channel_last else "NCHW"
self.train_record_reader = flow.nn.OfrecordReader(
os.path.join(ofrecord_root, mode),
batch_size=batch_size,
data_part_num=1,
part_name_suffix_length=5,
random_shuffle=True if mode == "train" else False,
shuffle_after_epoch=True if mode == "train" else False,
)
self.record_label_decoder = flow.nn.OfrecordRawDecoder(
"class/label", shape=(), dtype=flow.int32
)
color_space = "RGB"
height = 299
width = 299
self.record_image_decoder = (
flow.nn.OFRecordImageDecoderRandomCrop("encoded", color_space=color_space)
if mode == "train"
else flow.nn.OFRecordImageDecoder("encoded", color_space=color_space)
)
self.resize = (
flow.nn.image.Resize(target_size=[height, width])
if mode == "train"
else flow.nn.image.Resize(
resize_side="shorter", keep_aspect_ratio=True, target_size=299
)
)
self.flip = flow.nn.CoinFlip(batch_size=batch_size) if mode == "train" else None
rgb_mean = [123.68, 116.779, 103.939]
rgb_std = [58.393, 57.12, 57.375]
self.crop_mirror_norm = (
flow.nn.CropMirrorNormalize(
color_space=color_space,
output_layout=output_layout,
mean=rgb_mean,
std=rgb_std,
output_dtype=flow.float,
)
if mode == "train"
else flow.nn.CropMirrorNormalize(
color_space=color_space,
output_layout=output_layout,
crop_h=height,
crop_w=width,
crop_pos_y=0.5,
crop_pos_x=0.5,
mean=rgb_mean,
std=rgb_std,
output_dtype=flow.float,
)
)
self.batch_size = batch_size
self.dataset_size = dataset_size
def __len__(self):
return self.dataset_size // self.batch_size
def get_batch(self):
train_record = self.train_record_reader()
label = self.record_label_decoder(train_record)
image_raw_buffer = self.record_image_decoder(train_record)
image = self.resize(image_raw_buffer)[0]
rng = self.flip() if self.flip != None else None
image = self.crop_mirror_norm(image, rng)
return image, label
| [
"oneflow.nn.OFRecordImageDecoder",
"oneflow.nn.OfrecordRawDecoder",
"oneflow.nn.OFRecordImageDecoderRandomCrop",
"oneflow.nn.CropMirrorNormalize",
"oneflow.nn.image.Resize",
"oneflow.nn.CoinFlip"
] | [((727, 796), 'oneflow.nn.OfrecordRawDecoder', 'flow.nn.OfrecordRawDecoder', (['"""class/label"""'], {'shape': '()', 'dtype': 'flow.int32'}), "('class/label', shape=(), dtype=flow.int32)\n", (753, 796), True, 'import oneflow as flow\n'), ((412, 445), 'os.path.join', 'os.path.join', (['ofrecord_root', 'mode'], {}), '(ofrecord_root, mode)\n', (424, 445), False, 'import os\n'), ((940, 1014), 'oneflow.nn.OFRecordImageDecoderRandomCrop', 'flow.nn.OFRecordImageDecoderRandomCrop', (['"""encoded"""'], {'color_space': 'color_space'}), "('encoded', color_space=color_space)\n", (978, 1014), True, 'import oneflow as flow\n'), ((1063, 1127), 'oneflow.nn.OFRecordImageDecoder', 'flow.nn.OFRecordImageDecoder', (['"""encoded"""'], {'color_space': 'color_space'}), "('encoded', color_space=color_space)\n", (1091, 1127), True, 'import oneflow as flow\n'), ((1175, 1224), 'oneflow.nn.image.Resize', 'flow.nn.image.Resize', ([], {'target_size': '[height, width]'}), '(target_size=[height, width])\n', (1195, 1224), True, 'import oneflow as flow\n'), ((1273, 1361), 'oneflow.nn.image.Resize', 'flow.nn.image.Resize', ([], {'resize_side': '"""shorter"""', 'keep_aspect_ratio': '(True)', 'target_size': '(299)'}), "(resize_side='shorter', keep_aspect_ratio=True,\n target_size=299)\n", (1293, 1361), True, 'import oneflow as flow\n'), ((1419, 1458), 'oneflow.nn.CoinFlip', 'flow.nn.CoinFlip', ([], {'batch_size': 'batch_size'}), '(batch_size=batch_size)\n', (1435, 1458), True, 'import oneflow as flow\n'), ((1623, 1762), 'oneflow.nn.CropMirrorNormalize', 'flow.nn.CropMirrorNormalize', ([], {'color_space': 'color_space', 'output_layout': 'output_layout', 'mean': 'rgb_mean', 'std': 'rgb_std', 'output_dtype': 'flow.float'}), '(color_space=color_space, output_layout=\n output_layout, mean=rgb_mean, std=rgb_std, output_dtype=flow.float)\n', (1650, 1762), True, 'import oneflow as flow\n'), ((1901, 2106), 'oneflow.nn.CropMirrorNormalize', 'flow.nn.CropMirrorNormalize', ([], {'color_space': 'color_space', 'output_layout': 'output_layout', 'crop_h': 'height', 'crop_w': 'width', 'crop_pos_y': '(0.5)', 'crop_pos_x': '(0.5)', 'mean': 'rgb_mean', 'std': 'rgb_std', 'output_dtype': 'flow.float'}), '(color_space=color_space, output_layout=\n output_layout, crop_h=height, crop_w=width, crop_pos_y=0.5, crop_pos_x=\n 0.5, mean=rgb_mean, std=rgb_std, output_dtype=flow.float)\n', (1928, 2106), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import unittest
import numpy as np
import oneflow as flow
import oneflow.typing as tp
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test(test_case):
flow.config.gpu_device_num(2)
@flow.global_function()
def add() -> tp.Numpy:
with flow.scope.placement("gpu", "0:0-1"):
x = flow.get_variable(
name="x", shape=(2, 3), initializer=flow.random_uniform_initializer(),
)
y = flow.get_variable(
name="y", shape=(2, 3), initializer=flow.random_uniform_initializer(),
)
return flow.math.add_n([x, y])
check_point = flow.train.CheckPoint()
check_point.init()
x_value = np.random.random((2, 3)).astype(np.float32)
y_value = np.random.random((2, 3)).astype(np.float32)
flow.experimental.set_interface_blob_value("x", x_value)
flow.experimental.set_interface_blob_value("y", y_value)
test_case.assertTrue(
np.array_equal(x_value, flow.experimental.get_interface_blob_value("x"))
)
test_case.assertTrue(
np.array_equal(y_value, flow.experimental.get_interface_blob_value("y"))
)
test_case.assertTrue(np.array_equal(add(), x_value + y_value))
| [
"oneflow.global_function",
"oneflow.math.add_n",
"oneflow.experimental.set_interface_blob_value",
"oneflow.scope.placement",
"oneflow.config.gpu_device_num",
"oneflow.random_uniform_initializer",
"oneflow.experimental.get_interface_blob_value",
"oneflow.train.CheckPoint"
] | [((791, 820), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['(2)'], {}), '(2)\n', (817, 820), True, 'import oneflow as flow\n'), ((827, 849), 'oneflow.global_function', 'flow.global_function', ([], {}), '()\n', (847, 849), True, 'import oneflow as flow\n'), ((1262, 1285), 'oneflow.train.CheckPoint', 'flow.train.CheckPoint', ([], {}), '()\n', (1283, 1285), True, 'import oneflow as flow\n'), ((1429, 1485), 'oneflow.experimental.set_interface_blob_value', 'flow.experimental.set_interface_blob_value', (['"""x"""', 'x_value'], {}), "('x', x_value)\n", (1471, 1485), True, 'import oneflow as flow\n'), ((1490, 1546), 'oneflow.experimental.set_interface_blob_value', 'flow.experimental.set_interface_blob_value', (['"""y"""', 'y_value'], {}), "('y', y_value)\n", (1532, 1546), True, 'import oneflow as flow\n'), ((707, 741), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (716, 741), False, 'import os\n'), ((890, 926), 'oneflow.scope.placement', 'flow.scope.placement', (['"""gpu"""', '"""0:0-1"""'], {}), "('gpu', '0:0-1')\n", (910, 926), True, 'import oneflow as flow\n'), ((1219, 1242), 'oneflow.math.add_n', 'flow.math.add_n', (['[x, y]'], {}), '([x, y])\n', (1234, 1242), True, 'import oneflow as flow\n'), ((1323, 1347), 'numpy.random.random', 'np.random.random', (['(2, 3)'], {}), '((2, 3))\n', (1339, 1347), True, 'import numpy as np\n'), ((1381, 1405), 'numpy.random.random', 'np.random.random', (['(2, 3)'], {}), '((2, 3))\n', (1397, 1405), True, 'import numpy as np\n'), ((1605, 1652), 'oneflow.experimental.get_interface_blob_value', 'flow.experimental.get_interface_blob_value', (['"""x"""'], {}), "('x')\n", (1647, 1652), True, 'import oneflow as flow\n'), ((1718, 1765), 'oneflow.experimental.get_interface_blob_value', 'flow.experimental.get_interface_blob_value', (['"""y"""'], {}), "('y')\n", (1760, 1765), True, 'import oneflow as flow\n'), ((1015, 1048), 'oneflow.random_uniform_initializer', 'flow.random_uniform_initializer', ([], {}), '()\n', (1046, 1048), True, 'import oneflow as flow\n'), ((1151, 1184), 'oneflow.random_uniform_initializer', 'flow.random_uniform_initializer', ([], {}), '()\n', (1182, 1184), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
# const fold Optimizer.
# if op's inputs are all const then do op computation when building the graph to improve performance
# for example, input of transpose node is const then we can do transpose statically instead of at runtime
from oneflow.python.framework import id_util
from .. import util
from .optimizer_base import GraphOptimizerBase
# pylint: disable=logging-not-lazy,unused-argument,missing-docstring
# key is op_type, value is the function to compute outputs
# the schema of function is: inputs are(node, graph), output is a list of constant values.
_func_map = {}
def _register_func(op_type):
def _internal_fun(func):
_func_map[op_type] = func
return func
return _internal_fun
class ConstFoldOptimizer(GraphOptimizerBase):
def __init__(self): # pylint: disable=useless-super-delegation
super(ConstFoldOptimizer, self).__init__()
def _Optimize(self, graph):
return self._ApplyOptimization(graph, self._OptimizeAtCurrentGraphLevel)
def _OptimizeAtCurrentGraphLevel(self, graph):
graph_changed = True
while graph_changed:
graph_changed = False
ops = graph.get_nodes()
for op in ops:
if self._ShouldSkip(op):
continue
if self._FoldNode(op, graph):
graph_changed = True
self.graph_been_opt = True
return graph
@staticmethod
def _ShouldSkip(node):
# only support onnx official op for now, op in other domain is not supported for now
if not util.is_onnx_domain(node.domain):
return True
if node.is_const() or node.is_graph_input():
return True
skip_type = ["Identity"]
if node.op_type in skip_type:
return True
return False
def _FoldNode(self, node, graph):
""" if node's input are all const and it's not graph's output then it can be fold.
if node can be fold True will be return indicating that graph is changed
"""
if self._AllInputsAreConst(node.input_nodes) and not self._IsGraphOutput(
node, graph
):
process_func = _func_map.get(node.op_type, None)
if process_func:
const_outputs = process_func(node, graph)
self._ReplaceNodeWithConst(node, graph, const_outputs)
return True
self.logger.debug(
"need to add function to fold op %s whose op_type is %s",
node.name,
node.op_type,
)
return False
@staticmethod
def _AllInputsAreConst(nodes):
return all(node.is_const() for node in nodes if node)
@staticmethod
def _IsGraphOutput(node, graph):
node_out_set = set(node.output_tensor_names)
graph_out_set = set(graph.outputs)
return node_out_set.intersection(graph_out_set)
@staticmethod
def _ReplaceNodeWithConst(node, graph, vals):
util.MakeSure(
len(node.output_tensor_names) == len(vals),
"length of node outputs and const vals should be same",
)
for old_input, val in zip(node.output_tensor_names, vals):
const_node = graph.MakeConst(id_util.UniqueStr("const_fold_opt"), val)
graph.set_dtype(
const_node.output_tensor_names[0], util.Numpy2OnnxDtype(val.dtype)
)
graph.set_shape(const_node.output_tensor_names[0], val.shape)
graph.ReplaceAllInputs(
graph.get_nodes(), old_input, const_node.output_tensor_names[0]
)
graph.RemoveNode(node.name)
@staticmethod
@_register_func("Cast")
def _FoldCast(node, graph):
const_val = node.input_nodes[0].get_tensor_value(as_list=False)
np_dtype = util.ONNX_2_NUMPY_DTYPE[node.attrs["to"]]
const_val_after_cast = const_val.astype(np_dtype)
return [const_val_after_cast]
@staticmethod
@_register_func("Transpose")
def _FoldTranspose(node, graph) -> list:
const_val = node.input_nodes[0].get_tensor_value(as_list=False)
perm = node.attrs.get("perm", None)
const_val_after_trans = const_val.transpose(perm)
return [const_val_after_trans]
@staticmethod
@_register_func("Unsqueeze")
def _FoldUnsqueeze(node, graph):
"""
numpy expand_dims only supports to unsqueeze one dim one time, so reshape is used to simplify the logic
"""
const_val = node.input_nodes[0].get_tensor_value(as_list=False)
axes = node.attrs["axes"]
util.MakeSure(
all(axis >= 0 for axis in axes),
"onnx spec says it only supports positive axis",
)
shape_in = const_val.shape
dims_out = len(shape_in) + len(axes)
# calculate the shape of output accroding to onnx Unsqueeze's spec
# https://github.com/onnx/onnx/blob/master/docs/Operators.md#Unsqueeze
shape_in = iter(shape_in)
shape_out = [None] * dims_out
for ind in axes:
shape_out[ind] = 1
for ind, val in enumerate(shape_out):
if val is None:
shape_out[ind] = next(shape_in)
const_val_after_unsqueeze = const_val.reshape(shape_out)
return [const_val_after_unsqueeze]
| [
"oneflow.python.framework.id_util.UniqueStr"
] | [((3972, 4007), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""const_fold_opt"""'], {}), "('const_fold_opt')\n", (3989, 4007), False, 'from oneflow.python.framework import id_util\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import, division, print_function
import oneflow as flow
import oneflow.core.operator.op_conf_pb2 as op_conf_util
def _conv2d_layer(
name,
input,
filters,
kernel_size=3,
strides=1,
padding="SAME",
data_format="NCHW",
dilation_rate=1,
activation=op_conf_util.kSigmoid,
use_bias=True,
trainable=True,
weight_initializer=flow.random_uniform_initializer(),
bias_initializer=flow.constant_initializer(),
):
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
else:
kernel_size = tuple(kernel_size)
weight_shape = (filters, input.shape[1]) + kernel_size
weight = flow.get_variable(
name + "-weight",
shape=weight_shape,
dtype=input.dtype,
initializer=weight_initializer,
)
output = flow.nn.conv2d(
input, weight, strides, padding, data_format, dilation_rate, name=name
)
if use_bias:
bias = flow.get_variable(
name + "-bias",
shape=(filters,),
dtype=input.dtype,
initializer=bias_initializer,
)
output = flow.nn.bias_add(output, bias, data_format)
if activation is not None:
if activation == op_conf_util.kRelu:
output = flow.math.relu(output)
elif activation == op_conf_util.kSigmoid:
output = flow.math.sigmoid(output)
else:
raise NotImplementedError
return output
def InceptionA(in_blob, index):
with flow.scope.namespace("mixed_{}".format(index)):
with flow.scope.namespace("branch1x1"):
branch1x1 = _conv2d_layer(
"conv0", in_blob, filters=64, kernel_size=1, strides=1, padding="SAME"
)
with flow.scope.namespace("branch5x5"):
branch5x5_1 = _conv2d_layer(
"conv0", in_blob, filters=48, kernel_size=1, strides=1, padding="SAME"
)
branch5x5_2 = _conv2d_layer(
"conv1",
branch5x5_1,
filters=64,
kernel_size=5,
strides=1,
padding="SAME",
)
with flow.scope.namespace("branch3x3dbl"):
branch3x3dbl_1 = _conv2d_layer(
"conv0", in_blob, filters=64, kernel_size=1, strides=1, padding="SAME"
)
branch3x3dbl_2 = _conv2d_layer(
"conv1",
branch3x3dbl_1,
filters=96,
kernel_size=3,
strides=1,
padding="SAME",
)
branch3x3dbl_3 = _conv2d_layer(
"conv2",
branch3x3dbl_2,
filters=96,
kernel_size=3,
strides=1,
padding="SAME",
)
with flow.scope.namespace("branch_pool"):
branch_pool_1 = flow.nn.avg_pool2d(
in_blob,
ksize=3,
strides=1,
padding="SAME",
data_format="NCHW",
name="pool",
)
branch_pool_2 = _conv2d_layer(
"conv",
branch_pool_1,
filters=32 if index == 0 else 64,
kernel_size=1,
strides=1,
padding="SAME",
)
inceptionA_bn = []
inceptionA_bn.append(branch1x1)
inceptionA_bn.append(branch5x5_2)
inceptionA_bn.append(branch3x3dbl_3)
inceptionA_bn.append(branch_pool_2)
mixed_concat = flow.concat(values=inceptionA_bn, axis=1, name="concat")
return mixed_concat
def InceptionB(in_blob, index):
with flow.scope.namespace("mixed_{}".format(index)):
with flow.scope.namespace("branch3x3"):
branch3x3 = _conv2d_layer(
"conv0", in_blob, filters=384, kernel_size=3, strides=2, padding="VALID"
)
with flow.scope.namespace("branch3x3dbl"):
branch3x3dbl_1 = _conv2d_layer(
"conv0", in_blob, filters=64, kernel_size=1, strides=1, padding="SAME"
)
branch3x3dbl_2 = _conv2d_layer(
"conv1",
branch3x3dbl_1,
filters=96,
kernel_size=3,
strides=1,
padding="SAME",
)
branch3x3dbl_3 = _conv2d_layer(
"conv2",
branch3x3dbl_2,
filters=96,
kernel_size=3,
strides=2,
padding="VALID",
)
with flow.scope.namespace("branch_pool"):
branch_pool = flow.nn.max_pool2d(
in_blob,
ksize=3,
strides=2,
padding="VALID",
data_format="NCHW",
name="pool0",
)
inceptionB_bn = []
inceptionB_bn.append(branch3x3)
inceptionB_bn.append(branch3x3dbl_3)
inceptionB_bn.append(branch_pool)
mixed_concat = flow.concat(values=inceptionB_bn, axis=1, name="concat")
return mixed_concat
def InceptionC(in_blob, index, filters):
with flow.scope.namespace("mixed_{}".format(index)):
with flow.scope.namespace("branch1x1"):
branch1x1 = _conv2d_layer(
"conv0", in_blob, filters=192, kernel_size=1, strides=1, padding="SAME"
)
with flow.scope.namespace("branch7x7"):
branch7x7_1 = _conv2d_layer(
"conv0",
in_blob,
filters=filters,
kernel_size=1,
strides=1,
padding="SAME",
)
branch7x7_2 = _conv2d_layer(
"conv1",
branch7x7_1,
filters=filters,
kernel_size=[1, 7],
strides=1,
padding="SAME",
)
branch7x7_3 = _conv2d_layer(
"conv2",
branch7x7_2,
filters=192,
kernel_size=[7, 1],
strides=[1, 1],
padding="SAME",
)
with flow.scope.namespace("branch7x7dbl"):
branch7x7dbl_1 = _conv2d_layer(
"conv0",
in_blob,
filters=filters,
kernel_size=1,
strides=1,
padding="SAME",
)
branch7x7dbl_2 = _conv2d_layer(
"conv1",
branch7x7dbl_1,
filters=filters,
kernel_size=[7, 1],
strides=1,
padding="SAME",
)
branch7x7dbl_3 = _conv2d_layer(
"conv2",
branch7x7dbl_2,
filters=filters,
kernel_size=[1, 7],
strides=1,
padding="SAME",
)
branch7x7dbl_4 = _conv2d_layer(
"conv3",
branch7x7dbl_3,
filters=filters,
kernel_size=[7, 1],
strides=1,
padding="SAME",
)
branch7x7dbl_5 = _conv2d_layer(
"conv4",
branch7x7dbl_4,
filters=192,
kernel_size=[1, 7],
strides=1,
padding="SAME",
)
with flow.scope.namespace("branch_pool"):
branch_pool_1 = flow.nn.avg_pool2d(
in_blob,
ksize=3,
strides=1,
padding="SAME",
data_format="NCHW",
name="pool",
)
branch_pool_2 = _conv2d_layer(
"conv",
branch_pool_1,
filters=192,
kernel_size=[1, 1],
strides=1,
padding="SAME",
)
inceptionC_bn = []
inceptionC_bn.append(branch1x1)
inceptionC_bn.append(branch7x7_3)
inceptionC_bn.append(branch7x7dbl_5)
inceptionC_bn.append(branch_pool_2)
mixed_concat = flow.concat(values=inceptionC_bn, axis=1, name="concat")
return mixed_concat
def InceptionD(in_blob, index):
with flow.scope.namespace("mixed_{}".format(index)):
with flow.scope.namespace("branch3x3"):
branch3x3_1 = _conv2d_layer(
"conv0", in_blob, filters=192, kernel_size=1, strides=1, padding="SAME"
)
branch3x3_2 = _conv2d_layer(
"conv1",
branch3x3_1,
filters=320,
kernel_size=3,
strides=2,
padding="VALID",
)
with flow.scope.namespace("branch7x7x3"):
branch7x7x3_1 = _conv2d_layer(
"conv0", in_blob, filters=192, kernel_size=1, strides=1, padding="SAME"
)
branch7x7x3_2 = _conv2d_layer(
"conv1",
branch7x7x3_1,
filters=192,
kernel_size=[1, 7],
strides=1,
padding="SAME",
)
branch7x7x3_3 = _conv2d_layer(
"conv2",
branch7x7x3_2,
filters=192,
kernel_size=[7, 1],
strides=1,
padding="SAME",
)
branch7x7x3_4 = _conv2d_layer(
"conv3",
branch7x7x3_3,
filters=192,
kernel_size=3,
strides=2,
padding="VALID",
)
with flow.scope.namespace("branch_pool"):
branch_pool = flow.nn.max_pool2d(
in_blob,
ksize=3,
strides=2,
padding="VALID",
data_format="NCHW",
name="pool",
)
inceptionD_bn = []
inceptionD_bn.append(branch3x3_2)
inceptionD_bn.append(branch7x7x3_4)
inceptionD_bn.append(branch_pool)
mixed_concat = flow.concat(values=inceptionD_bn, axis=1, name="concat")
return mixed_concat
def InceptionE(in_blob, index):
with flow.scope.namespace("mixed_{}".format(index)):
with flow.scope.namespace("branch1x1"):
branch1x1 = _conv2d_layer(
"conv0", in_blob, filters=320, kernel_size=1, strides=1, padding="SAME"
)
with flow.scope.namespace("branch3x3"):
branch3x3_1 = _conv2d_layer(
"conv0", in_blob, filters=384, kernel_size=1, strides=1, padding="SAME"
)
branch3x3_2 = _conv2d_layer(
"conv1",
branch3x3_1,
filters=384,
kernel_size=[1, 3],
strides=1,
padding="SAME",
)
branch3x3_3 = _conv2d_layer(
"conv2",
branch3x3_1,
filters=384,
kernel_size=[3, 1],
strides=[1, 1],
padding="SAME",
)
inceptionE_1_bn = []
inceptionE_1_bn.append(branch3x3_2)
inceptionE_1_bn.append(branch3x3_3)
concat_branch3x3 = flow.concat(
values=inceptionE_1_bn, axis=1, name="concat"
)
with flow.scope.namespace("branch3x3dbl"):
branch3x3dbl_1 = _conv2d_layer(
"conv0", in_blob, filters=448, kernel_size=1, strides=1, padding="SAME"
)
branch3x3dbl_2 = _conv2d_layer(
"conv1",
branch3x3dbl_1,
filters=384,
kernel_size=3,
strides=1,
padding="SAME",
)
branch3x3dbl_3 = _conv2d_layer(
"conv2",
branch3x3dbl_2,
filters=384,
kernel_size=[1, 3],
strides=1,
padding="SAME",
)
branch3x3dbl_4 = _conv2d_layer(
"conv3",
branch3x3dbl_2,
filters=384,
kernel_size=[3, 1],
strides=1,
padding="SAME",
)
inceptionE_2_bn = []
inceptionE_2_bn.append(branch3x3dbl_3)
inceptionE_2_bn.append(branch3x3dbl_4)
concat_branch3x3dbl = flow.concat(
values=inceptionE_2_bn, axis=1, name="concat"
)
with flow.scope.namespace("branch_pool"):
branch_pool_1 = flow.nn.avg_pool2d(
in_blob,
ksize=3,
strides=1,
padding="SAME",
data_format="NCHW",
name="pool",
)
branch_pool_2 = _conv2d_layer(
"conv",
branch_pool_1,
filters=192,
kernel_size=[1, 1],
strides=1,
padding="SAME",
)
inceptionE_total_bn = []
inceptionE_total_bn.append(branch1x1)
inceptionE_total_bn.append(concat_branch3x3)
inceptionE_total_bn.append(concat_branch3x3dbl)
inceptionE_total_bn.append(branch_pool_2)
concat_total = flow.concat(values=inceptionE_total_bn, axis=1, name="concat")
return concat_total
def inceptionv3(images, labels, trainable=True):
conv0 = _conv2d_layer(
"conv0", images, filters=32, kernel_size=3, strides=2, padding="VALID"
)
conv1 = _conv2d_layer(
"conv1", conv0, filters=32, kernel_size=3, strides=1, padding="VALID"
)
conv2 = _conv2d_layer(
"conv2", conv1, filters=64, kernel_size=3, strides=1, padding="SAME"
)
pool1 = flow.nn.max_pool2d(
conv2, ksize=3, strides=2, padding="VALID", data_format="NCHW", name="pool1"
)
conv3 = _conv2d_layer(
"conv3", pool1, filters=80, kernel_size=1, strides=1, padding="VALID"
)
conv4 = _conv2d_layer(
"conv4", conv3, filters=192, kernel_size=3, strides=1, padding="VALID"
)
pool2 = flow.nn.max_pool2d(
conv4, ksize=3, strides=2, padding="VALID", data_format="NCHW", name="pool2"
)
# mixed_0 ~ mixed_2
mixed_0 = InceptionA(pool2, 0)
mixed_1 = InceptionA(mixed_0, 1)
mixed_2 = InceptionA(mixed_1, 2)
# mixed_3
mixed_3 = InceptionB(mixed_2, 3)
# mixed_4 ~ mixed_7
mixed_4 = InceptionC(mixed_3, 4, 128)
mixed_5 = InceptionC(mixed_4, 5, 160)
mixed_6 = InceptionC(mixed_5, 6, 160)
mixed_7 = InceptionC(mixed_6, 7, 192)
# mixed_8
mixed_8 = InceptionD(mixed_7, 8)
# mixed_9 ~ mixed_10
mixed_9 = InceptionE(mixed_8, 9)
mixed_10 = InceptionE(mixed_9, 10)
# pool3
pool3 = flow.nn.avg_pool2d(
mixed_10, ksize=8, strides=1, padding="VALID", data_format="NCHW", name="pool3"
)
with flow.scope.namespace("logits"):
pool3 = flow.reshape(pool3, [pool3.shape[0], -1])
# TODO: Need to transpose weight when converting model from TF to OF if
# you want to use layers.dense interface.
# fc1 = flow.layers.dense(
# pool3,
# 1001,
# activation=None,
# use_bias=False,
# kernel_initializer=flow.truncated_normal(0.816496580927726),
# bias_initializer=flow.constant_initializer(),
# name="fc1",
# )
weight = flow.get_variable(
"fc1-weight",
shape=(pool3.shape[1], 1001),
dtype=flow.float,
initializer=flow.truncated_normal(0.816496580927726),
model_name="weight",
)
bias = flow.get_variable(
"fc1-bias",
shape=(1001,),
dtype=flow.float,
initializer=flow.constant_initializer(),
model_name="bias",
)
fc1 = flow.matmul(pool3, weight)
fc1 = flow.nn.bias_add(fc1, bias)
return fc1
| [
"oneflow.nn.conv2d",
"oneflow.scope.namespace",
"oneflow.matmul",
"oneflow.math.sigmoid",
"oneflow.truncated_normal",
"oneflow.concat",
"oneflow.constant_initializer",
"oneflow.get_variable",
"oneflow.reshape",
"oneflow.random_uniform_initializer",
"oneflow.nn.avg_pool2d",
"oneflow.nn.bias_add... | [((990, 1023), 'oneflow.random_uniform_initializer', 'flow.random_uniform_initializer', ([], {}), '()\n', (1021, 1023), True, 'import oneflow as flow\n'), ((1046, 1073), 'oneflow.constant_initializer', 'flow.constant_initializer', ([], {}), '()\n', (1071, 1073), True, 'import oneflow as flow\n'), ((1287, 1397), 'oneflow.get_variable', 'flow.get_variable', (["(name + '-weight')"], {'shape': 'weight_shape', 'dtype': 'input.dtype', 'initializer': 'weight_initializer'}), "(name + '-weight', shape=weight_shape, dtype=input.dtype,\n initializer=weight_initializer)\n", (1304, 1397), True, 'import oneflow as flow\n'), ((1446, 1536), 'oneflow.nn.conv2d', 'flow.nn.conv2d', (['input', 'weight', 'strides', 'padding', 'data_format', 'dilation_rate'], {'name': 'name'}), '(input, weight, strides, padding, data_format, dilation_rate,\n name=name)\n', (1460, 1536), True, 'import oneflow as flow\n'), ((14483, 14584), 'oneflow.nn.max_pool2d', 'flow.nn.max_pool2d', (['conv2'], {'ksize': '(3)', 'strides': '(2)', 'padding': '"""VALID"""', 'data_format': '"""NCHW"""', 'name': '"""pool1"""'}), "(conv2, ksize=3, strides=2, padding='VALID', data_format=\n 'NCHW', name='pool1')\n", (14501, 14584), True, 'import oneflow as flow\n'), ((14829, 14930), 'oneflow.nn.max_pool2d', 'flow.nn.max_pool2d', (['conv4'], {'ksize': '(3)', 'strides': '(2)', 'padding': '"""VALID"""', 'data_format': '"""NCHW"""', 'name': '"""pool2"""'}), "(conv4, ksize=3, strides=2, padding='VALID', data_format=\n 'NCHW', name='pool2')\n", (14847, 14930), True, 'import oneflow as flow\n'), ((15498, 15601), 'oneflow.nn.avg_pool2d', 'flow.nn.avg_pool2d', (['mixed_10'], {'ksize': '(8)', 'strides': '(1)', 'padding': '"""VALID"""', 'data_format': '"""NCHW"""', 'name': '"""pool3"""'}), "(mixed_10, ksize=8, strides=1, padding='VALID',\n data_format='NCHW', name='pool3')\n", (15516, 15601), True, 'import oneflow as flow\n'), ((1579, 1683), 'oneflow.get_variable', 'flow.get_variable', (["(name + '-bias')"], {'shape': '(filters,)', 'dtype': 'input.dtype', 'initializer': 'bias_initializer'}), "(name + '-bias', shape=(filters,), dtype=input.dtype,\n initializer=bias_initializer)\n", (1596, 1683), True, 'import oneflow as flow\n'), ((1756, 1799), 'oneflow.nn.bias_add', 'flow.nn.bias_add', (['output', 'bias', 'data_format'], {}), '(output, bias, data_format)\n', (1772, 1799), True, 'import oneflow as flow\n'), ((4208, 4264), 'oneflow.concat', 'flow.concat', ([], {'values': 'inceptionA_bn', 'axis': '(1)', 'name': '"""concat"""'}), "(values=inceptionA_bn, axis=1, name='concat')\n", (4219, 4264), True, 'import oneflow as flow\n'), ((5698, 5754), 'oneflow.concat', 'flow.concat', ([], {'values': 'inceptionB_bn', 'axis': '(1)', 'name': '"""concat"""'}), "(values=inceptionB_bn, axis=1, name='concat')\n", (5709, 5754), True, 'import oneflow as flow\n'), ((8814, 8870), 'oneflow.concat', 'flow.concat', ([], {'values': 'inceptionC_bn', 'axis': '(1)', 'name': '"""concat"""'}), "(values=inceptionC_bn, axis=1, name='concat')\n", (8825, 8870), True, 'import oneflow as flow\n'), ((10774, 10830), 'oneflow.concat', 'flow.concat', ([], {'values': 'inceptionD_bn', 'axis': '(1)', 'name': '"""concat"""'}), "(values=inceptionD_bn, axis=1, name='concat')\n", (10785, 10830), True, 'import oneflow as flow\n'), ((13999, 14061), 'oneflow.concat', 'flow.concat', ([], {'values': 'inceptionE_total_bn', 'axis': '(1)', 'name': '"""concat"""'}), "(values=inceptionE_total_bn, axis=1, name='concat')\n", (14010, 14061), True, 'import oneflow as flow\n'), ((15622, 15652), 'oneflow.scope.namespace', 'flow.scope.namespace', (['"""logits"""'], {}), "('logits')\n", (15642, 15652), True, 'import oneflow as flow\n'), ((15670, 15711), 'oneflow.reshape', 'flow.reshape', (['pool3', '[pool3.shape[0], -1]'], {}), '(pool3, [pool3.shape[0], -1])\n', (15682, 15711), True, 'import oneflow as flow\n'), ((16618, 16644), 'oneflow.matmul', 'flow.matmul', (['pool3', 'weight'], {}), '(pool3, weight)\n', (16629, 16644), True, 'import oneflow as flow\n'), ((16659, 16686), 'oneflow.nn.bias_add', 'flow.nn.bias_add', (['fc1', 'bias'], {}), '(fc1, bias)\n', (16675, 16686), True, 'import oneflow as flow\n'), ((1898, 1920), 'oneflow.math.relu', 'flow.math.relu', (['output'], {}), '(output)\n', (1912, 1920), True, 'import oneflow as flow\n'), ((2193, 2226), 'oneflow.scope.namespace', 'flow.scope.namespace', (['"""branch1x1"""'], {}), "('branch1x1')\n", (2213, 2226), True, 'import oneflow as flow\n'), ((2381, 2414), 'oneflow.scope.namespace', 'flow.scope.namespace', (['"""branch5x5"""'], {}), "('branch5x5')\n", (2401, 2414), True, 'import oneflow as flow\n'), ((2798, 2834), 'oneflow.scope.namespace', 'flow.scope.namespace', (['"""branch3x3dbl"""'], {}), "('branch3x3dbl')\n", (2818, 2834), True, 'import oneflow as flow\n'), ((3460, 3495), 'oneflow.scope.namespace', 'flow.scope.namespace', (['"""branch_pool"""'], {}), "('branch_pool')\n", (3480, 3495), True, 'import oneflow as flow\n'), ((3525, 3626), 'oneflow.nn.avg_pool2d', 'flow.nn.avg_pool2d', (['in_blob'], {'ksize': '(3)', 'strides': '(1)', 'padding': '"""SAME"""', 'data_format': '"""NCHW"""', 'name': '"""pool"""'}), "(in_blob, ksize=3, strides=1, padding='SAME', data_format\n ='NCHW', name='pool')\n", (3543, 3626), True, 'import oneflow as flow\n'), ((4394, 4427), 'oneflow.scope.namespace', 'flow.scope.namespace', (['"""branch3x3"""'], {}), "('branch3x3')\n", (4414, 4427), True, 'import oneflow as flow\n'), ((4584, 4620), 'oneflow.scope.namespace', 'flow.scope.namespace', (['"""branch3x3dbl"""'], {}), "('branch3x3dbl')\n", (4604, 4620), True, 'import oneflow as flow\n'), ((5247, 5282), 'oneflow.scope.namespace', 'flow.scope.namespace', (['"""branch_pool"""'], {}), "('branch_pool')\n", (5267, 5282), True, 'import oneflow as flow\n'), ((5310, 5412), 'oneflow.nn.max_pool2d', 'flow.nn.max_pool2d', (['in_blob'], {'ksize': '(3)', 'strides': '(2)', 'padding': '"""VALID"""', 'data_format': '"""NCHW"""', 'name': '"""pool0"""'}), "(in_blob, ksize=3, strides=2, padding='VALID',\n data_format='NCHW', name='pool0')\n", (5328, 5412), True, 'import oneflow as flow\n'), ((5893, 5926), 'oneflow.scope.namespace', 'flow.scope.namespace', (['"""branch1x1"""'], {}), "('branch1x1')\n", (5913, 5926), True, 'import oneflow as flow\n'), ((6082, 6115), 'oneflow.scope.namespace', 'flow.scope.namespace', (['"""branch7x7"""'], {}), "('branch7x7')\n", (6102, 6115), True, 'import oneflow as flow\n'), ((6833, 6869), 'oneflow.scope.namespace', 'flow.scope.namespace', (['"""branch7x7dbl"""'], {}), "('branch7x7dbl')\n", (6853, 6869), True, 'import oneflow as flow\n'), ((8083, 8118), 'oneflow.scope.namespace', 'flow.scope.namespace', (['"""branch_pool"""'], {}), "('branch_pool')\n", (8103, 8118), True, 'import oneflow as flow\n'), ((8148, 8249), 'oneflow.nn.avg_pool2d', 'flow.nn.avg_pool2d', (['in_blob'], {'ksize': '(3)', 'strides': '(1)', 'padding': '"""SAME"""', 'data_format': '"""NCHW"""', 'name': '"""pool"""'}), "(in_blob, ksize=3, strides=1, padding='SAME', data_format\n ='NCHW', name='pool')\n", (8166, 8249), True, 'import oneflow as flow\n'), ((9000, 9033), 'oneflow.scope.namespace', 'flow.scope.namespace', (['"""branch3x3"""'], {}), "('branch3x3')\n", (9020, 9033), True, 'import oneflow as flow\n'), ((9420, 9455), 'oneflow.scope.namespace', 'flow.scope.namespace', (['"""branch7x7x3"""'], {}), "('branch7x7x3')\n", (9440, 9455), True, 'import oneflow as flow\n'), ((10322, 10357), 'oneflow.scope.namespace', 'flow.scope.namespace', (['"""branch_pool"""'], {}), "('branch_pool')\n", (10342, 10357), True, 'import oneflow as flow\n'), ((10385, 10486), 'oneflow.nn.max_pool2d', 'flow.nn.max_pool2d', (['in_blob'], {'ksize': '(3)', 'strides': '(2)', 'padding': '"""VALID"""', 'data_format': '"""NCHW"""', 'name': '"""pool"""'}), "(in_blob, ksize=3, strides=2, padding='VALID',\n data_format='NCHW', name='pool')\n", (10403, 10486), True, 'import oneflow as flow\n'), ((10960, 10993), 'oneflow.scope.namespace', 'flow.scope.namespace', (['"""branch1x1"""'], {}), "('branch1x1')\n", (10980, 10993), True, 'import oneflow as flow\n'), ((11149, 11182), 'oneflow.scope.namespace', 'flow.scope.namespace', (['"""branch3x3"""'], {}), "('branch3x3')\n", (11169, 11182), True, 'import oneflow as flow\n'), ((11958, 12016), 'oneflow.concat', 'flow.concat', ([], {'values': 'inceptionE_1_bn', 'axis': '(1)', 'name': '"""concat"""'}), "(values=inceptionE_1_bn, axis=1, name='concat')\n", (11969, 12016), True, 'import oneflow as flow\n'), ((12060, 12096), 'oneflow.scope.namespace', 'flow.scope.namespace', (['"""branch3x3dbl"""'], {}), "('branch3x3dbl')\n", (12080, 12096), True, 'import oneflow as flow\n'), ((13125, 13183), 'oneflow.concat', 'flow.concat', ([], {'values': 'inceptionE_2_bn', 'axis': '(1)', 'name': '"""concat"""'}), "(values=inceptionE_2_bn, axis=1, name='concat')\n", (13136, 13183), True, 'import oneflow as flow\n'), ((13227, 13262), 'oneflow.scope.namespace', 'flow.scope.namespace', (['"""branch_pool"""'], {}), "('branch_pool')\n", (13247, 13262), True, 'import oneflow as flow\n'), ((13292, 13393), 'oneflow.nn.avg_pool2d', 'flow.nn.avg_pool2d', (['in_blob'], {'ksize': '(3)', 'strides': '(1)', 'padding': '"""SAME"""', 'data_format': '"""NCHW"""', 'name': '"""pool"""'}), "(in_blob, ksize=3, strides=1, padding='SAME', data_format\n ='NCHW', name='pool')\n", (13310, 13393), True, 'import oneflow as flow\n'), ((1992, 2017), 'oneflow.math.sigmoid', 'flow.math.sigmoid', (['output'], {}), '(output)\n', (2009, 2017), True, 'import oneflow as flow\n'), ((16310, 16350), 'oneflow.truncated_normal', 'flow.truncated_normal', (['(0.816496580927726)'], {}), '(0.816496580927726)\n', (16331, 16350), True, 'import oneflow as flow\n'), ((16534, 16561), 'oneflow.constant_initializer', 'flow.constant_initializer', ([], {}), '()\n', (16559, 16561), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow as flow
def instance_norm_cambricon(input, name_prefix, trainable=True):
out, mean, var = flow.nn.InstanceNorm2d(input, name=name_prefix)
return out
def instance_norm_gpu(input, name_prefix, trainable = True):
(mean, variance) = flow.nn.moments(input, [1, 2], keepdims = True)
gamma = flow.get_variable(
name_prefix + "_gamma",
shape = (1, 1, 1, input.shape[3]),
dtype=input.dtype,
initializer = flow.ones_initializer(),
trainable = trainable
)
beta = flow.get_variable(
name_prefix + "_beta",
shape = (1, 1, 1, input.shape[3]),
dtype=input.dtype,
initializer = flow.zeros_initializer(),
trainable = trainable
)
epsilon = 1e-5
normalized = (input - mean) / flow.math.sqrt(variance + epsilon)
return gamma * normalized + beta
def instance_norm(input, name_prefix, trainable=True, backend="gpu"):
if backend == "gpu":
return instance_norm_gpu(input, name_prefix, trainable)
elif backend == "cambricon":
return instance_norm_cambricon(input, name_prefix, trainable)
else:
return None
def conv2d_layer(
name,
input,
out_channel,
kernel_size=3,
strides=1,
padding="SAME",
data_format="NHWC",
dilation_rate=1,
use_bias=True,
weight_initializer=flow.variance_scaling_initializer(
2, "fan_out", "random_normal", data_format="NHWC"
),
bias_initializer=flow.zeros_initializer(),
trainable=True,
):
weight_shape = (out_channel, kernel_size, kernel_size, input.shape[3])
weight = flow.get_variable(
name + "_weight",
shape=weight_shape,
dtype=input.dtype,
initializer=weight_initializer,
trainable=trainable,
)
output = flow.nn.conv2d(
input, weight, strides, padding, data_format, dilation_rate, name=name
)
if use_bias:
bias = flow.get_variable(
name + "_bias",
shape=(out_channel,),
dtype=input.dtype,
initializer=bias_initializer,
trainable=trainable,
)
output = flow.nn.bias_add(output, bias, data_format)
return output
def upsampleConvLayer(
input,
name_prefix,
channel,
kernel_size,
hw_scale=(2, 2),
data_format="NHWC",
# interpolation = "bilinear",
interpolation="nearest",
trainable=True,
):
upsample = flow.layers.upsample_2d(
input,
size=hw_scale,
data_format=data_format,
interpolation=interpolation,
name=name_prefix + "_%s" % interpolation,
)
return conv2d_layer(
name_prefix + "_conv",
upsample,
channel,
kernel_size=kernel_size,
strides=1,
trainable=trainable,
)
def resBlock(input, channel, name_prefix, trainable=True, backend="gpu"):
out = conv2d_layer(
name_prefix + "_conv1",
input,
channel,
kernel_size=3,
strides=1,
trainable=trainable,
)
out = instance_norm(out, name_prefix + "_in1", trainable=trainable, backend=backend)
out = flow.nn.relu(out)
out = conv2d_layer(
name_prefix + "_conv2",
out,
channel,
kernel_size=3,
strides=1,
trainable=trainable,
)
out = instance_norm(out, name_prefix + "_in2", trainable=trainable, backend=backend)
return out + input
def styleNet(input, trainable=True, backend="gpu"):
with flow.scope.namespace("style_transfer"):
# Initial convolution layers
conv1 = conv2d_layer(
"first_conv", input, 32, kernel_size=9, strides=1, trainable=trainable
)
in1 = instance_norm(conv1, "first_conv_in", backend=backend)
in1 = flow.nn.relu(in1)
conv2 = conv2d_layer(
"second_conv", in1, 64, kernel_size=3, strides=2, trainable=trainable
)
in2 = instance_norm(conv2, "second_conv_in", backend=backend)
in2 = flow.nn.relu(in2)
conv3 = conv2d_layer(
"third_conv", in2, 128, kernel_size=3, strides=2, trainable=trainable
)
in3 = instance_norm(conv3, "third_conv_in", trainable=trainable, backend=backend)
in3 = flow.nn.relu(in3)
# Residual layers
res1 = resBlock(in3, 128, "res1", trainable=trainable, backend=backend)
res2 = resBlock(res1, 128, "res2", trainable=trainable, backend=backend)
res3 = resBlock(res2, 128, "res3", trainable=trainable, backend=backend)
res4 = resBlock(res3, 128, "res4", trainable=trainable, backend=backend)
res5 = resBlock(res4, 128, "res5", trainable=trainable, backend=backend)
# Upsampling Layers
upsample1 = upsampleConvLayer(res5, "upsample1", 64, 3, trainable=trainable)
# upsample1 = deconv(res5, 64, "upsample1", kernel_size = 4, strides = [2, 2], trainable = True)
in4 = instance_norm(upsample1, "upsample1_in", trainable=trainable, backend=backend)
in4 = flow.nn.relu(in4)
upsample2 = upsampleConvLayer(in4, "upsample2", 32, 3, trainable=trainable)
# upsample2 = deconv(in4, 32, "upsample2", kernel_size = 4, strides = [2, 2], trainable = True)
in5 = instance_norm(upsample2, "upsample2_in", trainable=trainable, backend=backend)
in5 = flow.nn.relu(in5)
out = conv2d_layer(
"last_conv", in5, 3, kernel_size=9, strides=1, trainable=trainable
)
# out = flow.clamp(conv1, 0, 255)
# print('out.shape', out.shape)
return out
def mse_loss(input):
return flow.math.reduce_mean(flow.math.square(input))
| [
"oneflow.variance_scaling_initializer",
"oneflow.nn.InstanceNorm2d",
"oneflow.nn.conv2d",
"oneflow.scope.namespace",
"oneflow.nn.relu",
"oneflow.ones_initializer",
"oneflow.zeros_initializer",
"oneflow.get_variable",
"oneflow.nn.moments",
"oneflow.math.square",
"oneflow.nn.bias_add",
"oneflow.... | [((701, 748), 'oneflow.nn.InstanceNorm2d', 'flow.nn.InstanceNorm2d', (['input'], {'name': 'name_prefix'}), '(input, name=name_prefix)\n', (723, 748), True, 'import oneflow as flow\n'), ((849, 894), 'oneflow.nn.moments', 'flow.nn.moments', (['input', '[1, 2]'], {'keepdims': '(True)'}), '(input, [1, 2], keepdims=True)\n', (864, 894), True, 'import oneflow as flow\n'), ((1944, 2032), 'oneflow.variance_scaling_initializer', 'flow.variance_scaling_initializer', (['(2)', '"""fan_out"""', '"""random_normal"""'], {'data_format': '"""NHWC"""'}), "(2, 'fan_out', 'random_normal',\n data_format='NHWC')\n", (1977, 2032), True, 'import oneflow as flow\n'), ((2065, 2089), 'oneflow.zeros_initializer', 'flow.zeros_initializer', ([], {}), '()\n', (2087, 2089), True, 'import oneflow as flow\n'), ((2202, 2333), 'oneflow.get_variable', 'flow.get_variable', (["(name + '_weight')"], {'shape': 'weight_shape', 'dtype': 'input.dtype', 'initializer': 'weight_initializer', 'trainable': 'trainable'}), "(name + '_weight', shape=weight_shape, dtype=input.dtype,\n initializer=weight_initializer, trainable=trainable)\n", (2219, 2333), True, 'import oneflow as flow\n'), ((2390, 2480), 'oneflow.nn.conv2d', 'flow.nn.conv2d', (['input', 'weight', 'strides', 'padding', 'data_format', 'dilation_rate'], {'name': 'name'}), '(input, weight, strides, padding, data_format, dilation_rate,\n name=name)\n', (2404, 2480), True, 'import oneflow as flow\n'), ((3028, 3173), 'oneflow.layers.upsample_2d', 'flow.layers.upsample_2d', (['input'], {'size': 'hw_scale', 'data_format': 'data_format', 'interpolation': 'interpolation', 'name': "(name_prefix + '_%s' % interpolation)"}), "(input, size=hw_scale, data_format=data_format,\n interpolation=interpolation, name=name_prefix + '_%s' % interpolation)\n", (3051, 3173), True, 'import oneflow as flow\n'), ((3735, 3752), 'oneflow.nn.relu', 'flow.nn.relu', (['out'], {}), '(out)\n', (3747, 3752), True, 'import oneflow as flow\n'), ((1381, 1415), 'oneflow.math.sqrt', 'flow.math.sqrt', (['(variance + epsilon)'], {}), '(variance + epsilon)\n', (1395, 1415), True, 'import oneflow as flow\n'), ((2523, 2652), 'oneflow.get_variable', 'flow.get_variable', (["(name + '_bias')"], {'shape': '(out_channel,)', 'dtype': 'input.dtype', 'initializer': 'bias_initializer', 'trainable': 'trainable'}), "(name + '_bias', shape=(out_channel,), dtype=input.dtype,\n initializer=bias_initializer, trainable=trainable)\n", (2540, 2652), True, 'import oneflow as flow\n'), ((2737, 2780), 'oneflow.nn.bias_add', 'flow.nn.bias_add', (['output', 'bias', 'data_format'], {}), '(output, bias, data_format)\n', (2753, 2780), True, 'import oneflow as flow\n'), ((4091, 4129), 'oneflow.scope.namespace', 'flow.scope.namespace', (['"""style_transfer"""'], {}), "('style_transfer')\n", (4111, 4129), True, 'import oneflow as flow\n'), ((4374, 4391), 'oneflow.nn.relu', 'flow.nn.relu', (['in1'], {}), '(in1)\n', (4386, 4391), True, 'import oneflow as flow\n'), ((4598, 4615), 'oneflow.nn.relu', 'flow.nn.relu', (['in2'], {}), '(in2)\n', (4610, 4615), True, 'import oneflow as flow\n'), ((4842, 4859), 'oneflow.nn.relu', 'flow.nn.relu', (['in3'], {}), '(in3)\n', (4854, 4859), True, 'import oneflow as flow\n'), ((5615, 5632), 'oneflow.nn.relu', 'flow.nn.relu', (['in4'], {}), '(in4)\n', (5627, 5632), True, 'import oneflow as flow\n'), ((5928, 5945), 'oneflow.nn.relu', 'flow.nn.relu', (['in5'], {}), '(in5)\n', (5940, 5945), True, 'import oneflow as flow\n'), ((6216, 6239), 'oneflow.math.square', 'flow.math.square', (['input'], {}), '(input)\n', (6232, 6239), True, 'import oneflow as flow\n'), ((1052, 1075), 'oneflow.ones_initializer', 'flow.ones_initializer', ([], {}), '()\n', (1073, 1075), True, 'import oneflow as flow\n'), ((1266, 1290), 'oneflow.zeros_initializer', 'flow.zeros_initializer', ([], {}), '()\n', (1288, 1290), True, 'import oneflow as flow\n')] |
#!/usr/bin/python3
import argparse
import os
import time
from functools import partial
import numpy as np
import oneflow as flow
from oneflow import nn
from modeling import BertForPreTraining
from utils.ofrecord_data_utils import OfRecordDataLoader
def save_model(module: nn.Module, checkpoint_path: str, epoch: int, acc: float):
flow.save(
module.state_dict(),
os.path.join(checkpoint_path, "epoch_%d_val_acc_%f" % (epoch, acc)),
)
def train(epoch, iter_per_epoch, graph, print_interval):
total_loss = 0
total_correct = 0
total_element = 0
for i in range(iter_per_epoch):
start_t = time.time()
next_sent_output, next_sent_labels, loss = graph()
# Waiting for sync
loss = loss.numpy().item()
end_t = time.time()
# next sentence prediction accuracy
correct = (
next_sent_output.argmax(dim=-1)
.eq(next_sent_labels.squeeze(1))
.sum()
.numpy()
.item()
)
total_loss += loss
total_correct += correct
total_element += next_sent_labels.nelement()
if (i + 1) % print_interval == 0:
print(
"Epoch {}, train iter {}, loss {:.3f}, iter time: {:.3f}s".format(
epoch, (i + 1), total_loss / (i + 1), end_t - start_t
)
)
print(
"Epoch {}, train iter {}, loss {:.3f}, total accuracy {:.2f}".format(
epoch, (i + 1), total_loss / (i + 1), total_correct * 100.0 / total_element
)
)
def validation(
epoch: int, iter_per_epoch: int, graph: nn.Graph, print_interval: int
) -> float:
total_correct = 0
total_element = 0
for i in range(iter_per_epoch):
start_t = time.time()
next_sent_output, next_sent_labels = graph()
next_sent_output = next_sent_output.numpy()
next_sent_labels = next_sent_labels.numpy()
end_t = time.time()
# next sentence prediction accuracy
correct = (
next_sent_output.argmax(axis=-1) == next_sent_labels.squeeze(1)
).sum()
total_correct += correct
total_element += next_sent_labels.size
if (i + 1) % print_interval == 0:
print(
"Epoch {}, val iter {}, val time: {:.3f}s".format(
epoch, (i + 1), end_t - start_t
)
)
print(
"Epoch {}, val iter {}, total accuracy {:.2f}".format(
epoch, (i + 1), total_correct * 100.0 / total_element
)
)
return total_correct / total_element
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--ofrecord_path",
type=str,
default="wiki_ofrecord_seq_len_128_example",
help="Path to ofrecord dataset",
)
parser.add_argument(
"--train_batch_size", type=int, default=32, help="Training batch size"
)
parser.add_argument(
"--val_batch_size", type=int, default=32, help="Validation batch size"
)
parser.add_argument(
"--hidden_size", type=int, default=768, help="Hidden size of transformer model",
)
parser.add_argument(
"--num_hidden_layers", type=int, default=12, help="Number of layers"
)
parser.add_argument(
"-a",
"--num_attention_heads",
type=int,
default=12,
help="Number of attention heads",
)
parser.add_argument(
"--intermediate_size",
type=int,
default=3072,
help="intermediate size of bert encoder",
)
parser.add_argument("--max_position_embeddings", type=int, default=512)
parser.add_argument(
"-s", "--seq_length", type=int, default=128, help="Maximum sequence len"
)
parser.add_argument(
"--vocab_size", type=int, default=30522, help="Total number of vocab"
)
parser.add_argument("--type_vocab_size", type=int, default=2)
parser.add_argument("--attention_probs_dropout_prob", type=float, default=0.1)
parser.add_argument("--hidden_dropout_prob", type=float, default=0.1)
parser.add_argument("--hidden_size_per_head", type=int, default=64)
parser.add_argument("--max_predictions_per_seq", type=int, default=20)
parser.add_argument("-e", "--epochs", type=int, default=10, help="Number of epochs")
parser.add_argument(
"--with-cuda",
type=bool,
default=True,
help="Training with CUDA: true, or false",
)
parser.add_argument(
"--cuda_devices", type=int, nargs="+", default=None, help="CUDA device ids"
)
parser.add_argument("--lr", type=float, default=1e-4, help="Learning rate of adam")
parser.add_argument(
"--adam_weight_decay", type=float, default=0.01, help="Weight_decay of adam"
)
parser.add_argument(
"--adam_beta1", type=float, default=0.9, help="Adam first beta value"
)
parser.add_argument(
"--adam_beta2", type=float, default=0.999, help="Adam first beta value"
)
parser.add_argument(
"--print_interval", type=int, default=10, help="Interval of printing"
)
parser.add_argument(
"--checkpoint_path",
type=str,
default="checkpoints",
help="Path to model saving",
)
args = parser.parse_args()
if args.with_cuda:
device = flow.device("cuda")
else:
device = flow.device("cpu")
print("Device is: ", device)
print("Creating Dataloader")
train_data_loader = OfRecordDataLoader(
ofrecord_dir=args.ofrecord_path,
mode="train",
dataset_size=1024,
batch_size=args.train_batch_size,
data_part_num=1,
seq_length=args.seq_length,
max_predictions_per_seq=args.max_predictions_per_seq,
)
test_data_loader = OfRecordDataLoader(
ofrecord_dir=args.ofrecord_path,
mode="test",
dataset_size=1024,
batch_size=args.val_batch_size,
data_part_num=1,
seq_length=args.seq_length,
max_predictions_per_seq=args.max_predictions_per_seq,
)
print("Building BERT Model")
bert_model = BertForPreTraining(
args.vocab_size,
args.seq_length,
args.hidden_size,
args.num_hidden_layers,
args.num_attention_heads,
args.intermediate_size,
nn.GELU(),
args.hidden_dropout_prob,
args.attention_probs_dropout_prob,
args.max_position_embeddings,
args.type_vocab_size,
)
# print(bert_model)
bert_model.to(device)
optimizer = flow.optim.Adam(
bert_model.parameters(), lr=args.lr, betas=(args.adam_beta1, args.adam_beta2),
)
steps = args.epochs * len(train_data_loader)
cosine_annealing_lr = flow.optim.lr_scheduler.CosineDecayLR(
optimizer, decay_steps=steps
)
ns_criterion = nn.CrossEntropyLoss(reduction="mean")
mlm_criterion = nn.CrossEntropyLoss(reduction="none")
def get_masked_lm_loss(
logit_blob,
masked_lm_positions,
masked_lm_labels,
label_weights,
max_prediction_per_seq,
):
# gather valid position indices
logit_blob = flow.gather(
logit_blob,
index=masked_lm_positions.unsqueeze(2).repeat(1, 1, args.vocab_size),
dim=1,
)
logit_blob = flow.reshape(logit_blob, [-1, args.vocab_size])
label_id_blob = flow.reshape(masked_lm_labels, [-1])
# The `positions` tensor might be zero-padded (if the sequence is too
# short to have the maximum number of predictions). The `label_weights`
# tensor has a value of 1.0 for every real prediction and 0.0 for the
# padding predictions.
pre_example_loss = mlm_criterion(logit_blob, label_id_blob)
pre_example_loss = flow.reshape(pre_example_loss, [-1, max_prediction_per_seq])
sum_label_weight = flow.sum(label_weights, dim=-1)
sum_label_weight = sum_label_weight / label_weights.shape[0]
numerator = flow.sum(pre_example_loss * label_weights)
denominator = flow.sum(label_weights) + 1e-5
loss = numerator / denominator
return loss
class BertGraph(nn.Graph):
def __init__(self):
super().__init__()
self.bert = bert_model
self.ns_criterion = ns_criterion
self.masked_lm_criterion = partial(
get_masked_lm_loss, max_prediction_per_seq=args.max_predictions_per_seq
)
self.add_optimizer(optimizer, lr_sch=cosine_annealing_lr)
self._train_data_loader = train_data_loader
def build(self):
(
input_ids,
next_sentence_labels,
input_mask,
segment_ids,
masked_lm_ids,
masked_lm_positions,
masked_lm_weights,
) = self._train_data_loader()
input_ids = input_ids.to(device=device)
input_mask = input_mask.to(device=device)
segment_ids = segment_ids.to(device=device)
next_sentence_labels = next_sentence_labels.to(device=device)
masked_lm_ids = masked_lm_ids.to(device=device)
masked_lm_positions = masked_lm_positions.to(device=device)
masked_lm_weights = masked_lm_weights.to(device=device)
# 1. forward the next_sentence_prediction and masked_lm model
prediction_scores, seq_relationship_scores = self.bert(
input_ids, segment_ids, input_mask
)
# 2-1. loss of is_next classification result
next_sentence_loss = self.ns_criterion(
seq_relationship_scores.view(-1, 2), next_sentence_labels.view(-1)
)
masked_lm_loss = self.masked_lm_criterion(
prediction_scores, masked_lm_positions, masked_lm_ids, masked_lm_weights
)
total_loss = next_sentence_loss + masked_lm_loss
total_loss.backward()
return seq_relationship_scores, next_sentence_labels, total_loss
bert_graph = BertGraph()
class BertEvalGraph(nn.Graph):
def __init__(self):
super().__init__()
self.bert = bert_model
self._test_data_loader = test_data_loader
def build(self):
(
input_ids,
next_sent_labels,
input_masks,
segment_ids,
masked_lm_ids,
masked_lm_positions,
masked_lm_weights,
) = self._test_data_loader()
input_ids = input_ids.to(device=device)
input_masks = input_masks.to(device=device)
segment_ids = segment_ids.to(device=device)
next_sent_labels = next_sent_labels.to(device=device)
masked_lm_ids = masked_lm_ids.to(device=device)
masked_lm_positions = masked_lm_positions.to(device)
with flow.no_grad():
# 1. forward the next_sentence_prediction and masked_lm model
_, seq_relationship_scores = self.bert(
input_ids, input_masks, segment_ids
)
return seq_relationship_scores, next_sent_labels
bert_eval_graph = BertEvalGraph()
for epoch in range(args.epochs):
# Train
bert_model.train()
train(epoch, len(train_data_loader), bert_graph, args.print_interval)
# Eval
bert_model.eval()
val_acc = validation(
epoch, len(test_data_loader), bert_eval_graph, args.print_interval * 10
)
print("Saveing model ...")
save_model(bert_model, args.checkpoint_path, epoch, val_acc)
if __name__ == "__main__":
main()
| [
"oneflow.optim.lr_scheduler.CosineDecayLR",
"oneflow.no_grad",
"oneflow.nn.CrossEntropyLoss",
"oneflow.nn.GELU",
"oneflow.reshape",
"oneflow.sum",
"oneflow.device"
] | [((2661, 2686), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2684, 2686), False, 'import argparse\n'), ((5546, 5776), 'utils.ofrecord_data_utils.OfRecordDataLoader', 'OfRecordDataLoader', ([], {'ofrecord_dir': 'args.ofrecord_path', 'mode': '"""train"""', 'dataset_size': '(1024)', 'batch_size': 'args.train_batch_size', 'data_part_num': '(1)', 'seq_length': 'args.seq_length', 'max_predictions_per_seq': 'args.max_predictions_per_seq'}), "(ofrecord_dir=args.ofrecord_path, mode='train',\n dataset_size=1024, batch_size=args.train_batch_size, data_part_num=1,\n seq_length=args.seq_length, max_predictions_per_seq=args.\n max_predictions_per_seq)\n", (5564, 5776), False, 'from utils.ofrecord_data_utils import OfRecordDataLoader\n'), ((5851, 6078), 'utils.ofrecord_data_utils.OfRecordDataLoader', 'OfRecordDataLoader', ([], {'ofrecord_dir': 'args.ofrecord_path', 'mode': '"""test"""', 'dataset_size': '(1024)', 'batch_size': 'args.val_batch_size', 'data_part_num': '(1)', 'seq_length': 'args.seq_length', 'max_predictions_per_seq': 'args.max_predictions_per_seq'}), "(ofrecord_dir=args.ofrecord_path, mode='test',\n dataset_size=1024, batch_size=args.val_batch_size, data_part_num=1,\n seq_length=args.seq_length, max_predictions_per_seq=args.\n max_predictions_per_seq)\n", (5869, 6078), False, 'from utils.ofrecord_data_utils import OfRecordDataLoader\n'), ((6798, 6865), 'oneflow.optim.lr_scheduler.CosineDecayLR', 'flow.optim.lr_scheduler.CosineDecayLR', (['optimizer'], {'decay_steps': 'steps'}), '(optimizer, decay_steps=steps)\n', (6835, 6865), True, 'import oneflow as flow\n'), ((6900, 6937), 'oneflow.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {'reduction': '"""mean"""'}), "(reduction='mean')\n", (6919, 6937), False, 'from oneflow import nn\n'), ((6958, 6995), 'oneflow.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {'reduction': '"""none"""'}), "(reduction='none')\n", (6977, 6995), False, 'from oneflow import nn\n'), ((386, 453), 'os.path.join', 'os.path.join', (['checkpoint_path', "('epoch_%d_val_acc_%f' % (epoch, acc))"], {}), "(checkpoint_path, 'epoch_%d_val_acc_%f' % (epoch, acc))\n", (398, 453), False, 'import os\n'), ((638, 649), 'time.time', 'time.time', ([], {}), '()\n', (647, 649), False, 'import time\n'), ((789, 800), 'time.time', 'time.time', ([], {}), '()\n', (798, 800), False, 'import time\n'), ((1786, 1797), 'time.time', 'time.time', ([], {}), '()\n', (1795, 1797), False, 'import time\n'), ((1973, 1984), 'time.time', 'time.time', ([], {}), '()\n', (1982, 1984), False, 'import time\n'), ((5388, 5407), 'oneflow.device', 'flow.device', (['"""cuda"""'], {}), "('cuda')\n", (5399, 5407), True, 'import oneflow as flow\n'), ((5435, 5453), 'oneflow.device', 'flow.device', (['"""cpu"""'], {}), "('cpu')\n", (5446, 5453), True, 'import oneflow as flow\n'), ((6382, 6391), 'oneflow.nn.GELU', 'nn.GELU', ([], {}), '()\n', (6389, 6391), False, 'from oneflow import nn\n'), ((7392, 7439), 'oneflow.reshape', 'flow.reshape', (['logit_blob', '[-1, args.vocab_size]'], {}), '(logit_blob, [-1, args.vocab_size])\n', (7404, 7439), True, 'import oneflow as flow\n'), ((7464, 7500), 'oneflow.reshape', 'flow.reshape', (['masked_lm_labels', '[-1]'], {}), '(masked_lm_labels, [-1])\n', (7476, 7500), True, 'import oneflow as flow\n'), ((7864, 7924), 'oneflow.reshape', 'flow.reshape', (['pre_example_loss', '[-1, max_prediction_per_seq]'], {}), '(pre_example_loss, [-1, max_prediction_per_seq])\n', (7876, 7924), True, 'import oneflow as flow\n'), ((7952, 7983), 'oneflow.sum', 'flow.sum', (['label_weights'], {'dim': '(-1)'}), '(label_weights, dim=-1)\n', (7960, 7983), True, 'import oneflow as flow\n'), ((8073, 8115), 'oneflow.sum', 'flow.sum', (['(pre_example_loss * label_weights)'], {}), '(pre_example_loss * label_weights)\n', (8081, 8115), True, 'import oneflow as flow\n'), ((8138, 8161), 'oneflow.sum', 'flow.sum', (['label_weights'], {}), '(label_weights)\n', (8146, 8161), True, 'import oneflow as flow\n'), ((8438, 8523), 'functools.partial', 'partial', (['get_masked_lm_loss'], {'max_prediction_per_seq': 'args.max_predictions_per_seq'}), '(get_masked_lm_loss, max_prediction_per_seq=args.max_predictions_per_seq\n )\n', (8445, 8523), False, 'from functools import partial\n'), ((11057, 11071), 'oneflow.no_grad', 'flow.no_grad', ([], {}), '()\n', (11069, 11071), True, 'import oneflow as flow\n')] |
import math
import numpy as np
import oneflow as flow
import oneflow.nn as nn
def gelu(x):
"""
Implementation of the GELU activation function currently in Google BERT repo (identical to OpenAI GPT). Also see
the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415
"""
return 0.5 * x * (1.0 + flow.tanh(math.sqrt(2.0 / math.pi) * (x + 0.044715 * flow.pow(x, 3.0))))
class LayerNorm(nn.Module):
"Construct a layernorm module."
def __init__(self, hidden_size, eps=1e-6):
super(LayerNorm, self).__init__()
self.eps = eps
self.weight = nn.Parameter(flow.ones(hidden_size, dtype=flow.float32))
self.bias = nn.Parameter(flow.zeros(hidden_size, dtype=flow.float32))
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = (x - mean).pow(2).mean(-1, keepdim=True)
x = (x - mean) / flow.sqrt(std + self.eps)
return self.weight * x + self.bias
class Conv1D(nn.Module):
"""
1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2).
Basically works like a linear layer but the weights are transposed.
Args:
nf (:obj:`int`): The number of output features.
nx (:obj:`int`): The number of input features.
"""
def __init__(self, nf, nx):
super(Conv1D, self).__init__()
self.nf = nf
self.weight = nn.Parameter(flow.Tensor(nx, nf))
nn.init.normal_(self.weight, mean=0, std=0.02)
self.bias = nn.Parameter(flow.zeros(nf))
def forward(self, x):
bsz, seq_len, channels = x.size()
# size_out = x.size()[:-1] + (self.nf,)
x = flow.addmm(self.bias, x.view(-1, channels), self.weight)
x = x.view(bsz, seq_len, self.nf)
return x
class GPT2Attention(nn.Module):
def __init__(self, config):
super(GPT2Attention, self).__init__()
max_positions = config.max_position_embeddings
self.register_buffer(
"bias",
flow.tril(flow.ones((max_positions, max_positions), dtype=flow.int8)).view(
1, 1, max_positions, max_positions
),
)
self.register_buffer("masked_bias", flow.tensor(-1e4))
self.embed_dim = config.hidden_size
self.num_heads = config.num_attention_heads
assert self.embed_dim % self.num_heads == 0
self.head_dim = self.embed_dim // self.num_heads
self.scale_attn_weights = config.scale_attn_weights
self.c_attn = Conv1D(self.embed_dim * 3, self.embed_dim)
self.c_proj = Conv1D(self.embed_dim, self.embed_dim)
self.attn_dropout = nn.Dropout(config.attn_pdrop)
self.resid_dropout = nn.Dropout(config.resid_pdrop)
def _attn(self, query, key, value):
attn_weights = flow.matmul(query, key.transpose(-2, -1))
if self.scale_attn_weights:
attn_weights = attn_weights / (float(value.size(-1)) ** 0.5)
query_length, key_length = query.size(-2), key.size(-2)
causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length]
attn_weights = flow.where(causal_mask, attn_weights, self.masked_bias.to(attn_weights.dtype))
attn_weights = nn.Softmax(dim=-1)(attn_weights)
attn_weights = self.attn_dropout(attn_weights)
attn_output = flow.matmul(attn_weights, value)
return attn_output, attn_weights
def _split_heads(self, tensor, num_heads, attn_head_size):
"""
Splits hidden_size dim into attn_head_size and num_heads
"""
bsz, seq_len = tensor.size()[:-1]
new_shape = (bsz, seq_len, num_heads, attn_head_size)
# new_shape = tensor.size()[:-1] + (num_heads, attn_head_size)
tensor = tensor.view(*new_shape)
return tensor.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
def _merge_heads(self, tensor, num_heads, attn_head_size):
"""
Merges attn_head_size dim and num_attn_heads dim into hidden_size
"""
tensor = tensor.permute(0, 2, 1, 3)
bsz, seq_len = tensor.size()[:-2]
# new_shape = tensor.size()[:-2] + (num_heads * attn_head_size,)
new_shape = (bsz, seq_len, num_heads * attn_head_size)
return tensor.view(*new_shape)
def forward(self, hidden_states, layer_past=None, use_cache=False):
hidden_states = self.c_attn(hidden_states)
query, key, value = flow.chunk(hidden_states, chunks=3, dim=2)
query = self._split_heads(query, self.num_heads, self.head_dim)
key = self._split_heads(key, self.num_heads, self.head_dim)
value = self._split_heads(value, self.num_heads, self.head_dim)
if layer_past is not None:
past_key, past_value = layer_past
key = flow.cat((past_key, key), dim=-2)
value = flow.cat((past_value, value), dim=-2)
if use_cache is True:
present = (key, value)
else:
present = None
attn_output, attn_weights = self._attn(query, key, value)
attn_output = self._merge_heads(attn_output, self.num_heads, self.head_dim)
attn_output = self.c_proj(attn_output)
attn_output = self.resid_dropout(attn_output)
outputs = (attn_output, present, attn_weights)
return outputs
class GPT2MLP(nn.Module):
def __init__(self, intermediate_size, config):
super().__init__()
embed_dim = config.hidden_size
self.c_fc = Conv1D(intermediate_size, embed_dim)
self.c_proj = Conv1D(embed_dim, intermediate_size)
self.act = gelu
self.dropout = nn.Dropout(config.resid_pdrop)
def forward(self, hidden_states):
hidden_states = self.c_fc(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.c_proj(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
class GPT2Block(nn.Module):
def __init__(self, config):
super().__init__()
hidden_size = config.hidden_size
inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size
self.ln_1 = LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
self.attn = GPT2Attention(config)
self.ln_2 = LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
self.mlp = GPT2MLP(inner_dim, config)
def forward(self, hidden_states, layer_past=None, use_cache=False):
residual = hidden_states
hidden_states = self.ln_1(hidden_states)
attn_outputs = self.attn(hidden_states, layer_past, use_cache)
attn_output = attn_outputs[0]
outputs = attn_outputs[1:]
hidden_states = attn_output + residual
residual = hidden_states
hidden_states = self.ln_2(hidden_states)
feed_forward_hidden_states = self.mlp(hidden_states)
hidden_states = residual + feed_forward_hidden_states
if use_cache:
outputs = (hidden_states,) + outputs # hiddden_states, present, attn_weights
else:
outputs = (hidden_states,) + outputs[1:] # hiddden_states, attn_weights
return outputs
class GPT2Model(nn.Module):
def __init__(self, config):
super(GPT2Model, self).__init__()
self.embed_dim = config.hidden_size
self.wte = nn.Embedding(config.vocab_size, self.embed_dim)
self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim)
self.drop = nn.Dropout(config.embd_pdrop)
self.h = nn.ModuleList([GPT2Block(config) for _ in range(config.num_hidden_layers)])
self.ln_f = LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
def forward(self, input_ids, position_ids=None, token_type_ids=None, past_key_values=None, use_cache=False, output_attentions=False, output_hidden_states=False):
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_ids.size(-1))
batch_size = input_ids.shape[0]
if position_ids is not None:
position_ids = position_ids.view(-1, input_shape[-1])
if token_type_ids is not None:
token_type_ids = token_type_ids.view(-1, input_shape[-1])
if past_key_values is None:
past_length = 0
past_key_values = [None] * len(self.h)
else:
past_length = past_key_values[0][0].size(-2)
if position_ids is None:
position_ids = flow.arange(past_length, input_shape[-1] + past_length, dtype=flow.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
inputs_embeds = self.wte(input_ids)
position_embeds = self.wpe(position_ids)
hidden_states = inputs_embeds + position_embeds
if token_type_ids is not None:
token_type_embeds = self.wte(token_type_ids)
hidden_states = hidden_states + token_type_embeds
hidden_states = self.drop(hidden_states)
presents = () if use_cache else None
all_attentions = () if output_attentions else None
all_hidden_states = () if output_hidden_states else None
for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = block(hidden_states, layer_past, use_cache)
hidden_states = outputs[0]
if use_cache is True:
presents = presents + (outputs[1],)
if output_attentions:
all_attentions = all_attentions + (outputs[2 if use_cache else 1],)
hidden_states = self.ln_f(hidden_states)
output_shape = (input_shape[0], input_shape[1], hidden_states.size(-1))
hidden_states = hidden_states.view(*output_shape)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
return tuple(v for v in [hidden_states, presents, all_hidden_states, all_attentions] if v is not None)
class LMHead(nn.Module):
""" Language Model Head for the transformer """
def __init__(self, model, config):
super(LMHead, self).__init__()
self.n_embd = config.n_embd
embed_shape = model.wte.weight.shape
self.decoder = nn.Linear(embed_shape[1], embed_shape[0], bias=False)
self.decoder.weight = model.wte.weight # Tied weights
def forward(self, h):
# Truncated Language modeling logits (we remove the last token)
h_trunc = h[:, :-1].view(-1, self.n_embd)
lm_logits = self.decoder(h_trunc)
return lm_logits
class GPT2LMHeadModel(nn.Module):
def __init__(self, config):
super(GPT2LMHeadModel, self).__init__()
self.transformer = GPT2Model(config)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
# self.tie_weights()
# self.lm_head = LMHead(self.transformer, config)
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def tie_weights(self):
self.lm_head.weight = self.transformer.wte.weight
def forward(self, input_ids, position_ids=None, token_type_ids=None, labels=None, past_key_values=None, use_cache=False, output_attentions=False, output_hidden_states=False):
transformer_outputs = self.transformer(input_ids, position_ids, token_type_ids, past_key_values, use_cache, output_attentions, output_hidden_states)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states)
loss = None
if labels is not None:
# Shift so that tokens < n predict n
seq_len = lm_logits.size(1)
shift_logits = lm_logits[..., :seq_len - 1, :]
shift_labels = labels[..., 1:]
# Flatten the tokens
loss_fct = nn.CrossEntropyLoss()
shift_logits = shift_logits.view(-1, shift_logits.size(-1))
shift_labels = shift_labels.view(-1)
loss = loss_fct(shift_logits, shift_labels)
output = (lm_logits,) + transformer_outputs[1:]
if loss is not None:
return (loss,) + output
else:
return output
| [
"oneflow.Tensor",
"oneflow.arange",
"oneflow.nn.init.normal_",
"oneflow.cat",
"oneflow.pow",
"oneflow.matmul",
"oneflow.sqrt",
"oneflow.zeros",
"oneflow.nn.CrossEntropyLoss",
"oneflow.ones",
"oneflow.nn.Dropout",
"oneflow.chunk",
"oneflow.nn.Embedding",
"oneflow.tensor",
"oneflow.nn.Soft... | [((1444, 1490), 'oneflow.nn.init.normal_', 'nn.init.normal_', (['self.weight'], {'mean': '(0)', 'std': '(0.02)'}), '(self.weight, mean=0, std=0.02)\n', (1459, 1490), True, 'import oneflow.nn as nn\n'), ((2667, 2696), 'oneflow.nn.Dropout', 'nn.Dropout', (['config.attn_pdrop'], {}), '(config.attn_pdrop)\n', (2677, 2696), True, 'import oneflow.nn as nn\n'), ((2726, 2756), 'oneflow.nn.Dropout', 'nn.Dropout', (['config.resid_pdrop'], {}), '(config.resid_pdrop)\n', (2736, 2756), True, 'import oneflow.nn as nn\n'), ((3366, 3398), 'oneflow.matmul', 'flow.matmul', (['attn_weights', 'value'], {}), '(attn_weights, value)\n', (3377, 3398), True, 'import oneflow as flow\n'), ((4470, 4512), 'oneflow.chunk', 'flow.chunk', (['hidden_states'], {'chunks': '(3)', 'dim': '(2)'}), '(hidden_states, chunks=3, dim=2)\n', (4480, 4512), True, 'import oneflow as flow\n'), ((5664, 5694), 'oneflow.nn.Dropout', 'nn.Dropout', (['config.resid_pdrop'], {}), '(config.resid_pdrop)\n', (5674, 5694), True, 'import oneflow.nn as nn\n'), ((7381, 7428), 'oneflow.nn.Embedding', 'nn.Embedding', (['config.vocab_size', 'self.embed_dim'], {}), '(config.vocab_size, self.embed_dim)\n', (7393, 7428), True, 'import oneflow.nn as nn\n'), ((7448, 7508), 'oneflow.nn.Embedding', 'nn.Embedding', (['config.max_position_embeddings', 'self.embed_dim'], {}), '(config.max_position_embeddings, self.embed_dim)\n', (7460, 7508), True, 'import oneflow.nn as nn\n'), ((7538, 7567), 'oneflow.nn.Dropout', 'nn.Dropout', (['config.embd_pdrop'], {}), '(config.embd_pdrop)\n', (7548, 7567), True, 'import oneflow.nn as nn\n'), ((10428, 10481), 'oneflow.nn.Linear', 'nn.Linear', (['embed_shape[1]', 'embed_shape[0]'], {'bias': '(False)'}), '(embed_shape[1], embed_shape[0], bias=False)\n', (10437, 10481), True, 'import oneflow.nn as nn\n'), ((10951, 11006), 'oneflow.nn.Linear', 'nn.Linear', (['config.n_embd', 'config.vocab_size'], {'bias': '(False)'}), '(config.n_embd, config.vocab_size, bias=False)\n', (10960, 11006), True, 'import oneflow.nn as nn\n'), ((616, 658), 'oneflow.ones', 'flow.ones', (['hidden_size'], {'dtype': 'flow.float32'}), '(hidden_size, dtype=flow.float32)\n', (625, 658), True, 'import oneflow as flow\n'), ((693, 736), 'oneflow.zeros', 'flow.zeros', (['hidden_size'], {'dtype': 'flow.float32'}), '(hidden_size, dtype=flow.float32)\n', (703, 736), True, 'import oneflow as flow\n'), ((885, 910), 'oneflow.sqrt', 'flow.sqrt', (['(std + self.eps)'], {}), '(std + self.eps)\n', (894, 910), True, 'import oneflow as flow\n'), ((1415, 1434), 'oneflow.Tensor', 'flow.Tensor', (['nx', 'nf'], {}), '(nx, nf)\n', (1426, 1434), True, 'import oneflow as flow\n'), ((1524, 1538), 'oneflow.zeros', 'flow.zeros', (['nf'], {}), '(nf)\n', (1534, 1538), True, 'import oneflow as flow\n'), ((2218, 2239), 'oneflow.tensor', 'flow.tensor', (['(-10000.0)'], {}), '(-10000.0)\n', (2229, 2239), True, 'import oneflow as flow\n'), ((3255, 3273), 'oneflow.nn.Softmax', 'nn.Softmax', ([], {'dim': '(-1)'}), '(dim=-1)\n', (3265, 3273), True, 'import oneflow.nn as nn\n'), ((4826, 4859), 'oneflow.cat', 'flow.cat', (['(past_key, key)'], {'dim': '(-2)'}), '((past_key, key), dim=-2)\n', (4834, 4859), True, 'import oneflow as flow\n'), ((4880, 4917), 'oneflow.cat', 'flow.cat', (['(past_value, value)'], {'dim': '(-2)'}), '((past_value, value), dim=-2)\n', (4888, 4917), True, 'import oneflow as flow\n'), ((8516, 8617), 'oneflow.arange', 'flow.arange', (['past_length', '(input_shape[-1] + past_length)'], {'dtype': 'flow.long', 'device': 'input_ids.device'}), '(past_length, input_shape[-1] + past_length, dtype=flow.long,\n device=input_ids.device)\n', (8527, 8617), True, 'import oneflow as flow\n'), ((12086, 12107), 'oneflow.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (12105, 12107), True, 'import oneflow.nn as nn\n'), ((340, 364), 'math.sqrt', 'math.sqrt', (['(2.0 / math.pi)'], {}), '(2.0 / math.pi)\n', (349, 364), False, 'import math\n'), ((2032, 2090), 'oneflow.ones', 'flow.ones', (['(max_positions, max_positions)'], {'dtype': 'flow.int8'}), '((max_positions, max_positions), dtype=flow.int8)\n', (2041, 2090), True, 'import oneflow as flow\n'), ((383, 399), 'oneflow.pow', 'flow.pow', (['x', '(3.0)'], {}), '(x, 3.0)\n', (391, 399), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow as flow
from oneflow.framework.tensor import register_tensor_op
def masked_select_op(input, mask):
"""
Returns a new 1-D tensor which indexes the input tensor according to the boolean mask mask which is a BoolTensor(In oneFlow BoolTensor is replaced by Int8Tensor).
The shapes of the mask tensor and the input tensor don’t need to match, but they must be broadcastable.
Args:
input (Tensor): the input tensor.
mask (Tensor): the tensor containing the binary mask to index with
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> input = flow.tensor(np.array([[-0.4620, 0.3139], [0.3898, -0.7197], [0.0478, -0.1657]]), dtype=flow.float32)
>>> mask = input.gt(0.05)
>>> out = flow.masked_select(input, mask)
>>> out
tensor([0.3139, 0.3898], dtype=oneflow.float32)
"""
assert len(input.shape) == len(
mask.shape
), f"The dim of masked_select module's inputs can not match, please check!"
broadcast_like_shape = []
broadcast_x_axes = []
broadcast_mask_axes = []
for i in range(len(input.shape)):
max_dim = max(input.shape[i], mask.shape[i])
broadcast_like_shape.append(max_dim)
if max_dim != input.shape[i]:
broadcast_x_axes.append(i)
if max_dim != mask.shape[i]:
broadcast_mask_axes.append(i)
broadcast_like_tensor = flow.zeros(
tuple(broadcast_like_shape), dtype=flow.float32, device=input.device
)
broadcast_like_tensor.requires_grad = input.requires_grad or mask.requires_grad
if len(broadcast_x_axes) != 0:
input = flow.broadcast_like(
input, broadcast_like_tensor, broadcast_axes=tuple(broadcast_x_axes)
)
if len(broadcast_mask_axes) != 0:
mask = flow.broadcast_like(
mask, broadcast_like_tensor, broadcast_axes=tuple(broadcast_mask_axes)
)
mask = mask.to(dtype=input.dtype)
res = flow._C.mul(input, mask)
indices = flow.argwhere(res)
gather_res = flow._C.gather_nd(res, indices)
return gather_res.flatten()
@register_tensor_op("masked_select")
def tensor_masked_select_op(input, mask):
"""
See :func:`oneflow.masked_select`
"""
return masked_select_op(input, mask)
if __name__ == "__main__":
import doctest
doctest.testmod(raise_on_error=True)
| [
"oneflow._C.gather_nd",
"oneflow._C.mul",
"oneflow.framework.tensor.register_tensor_op",
"oneflow.argwhere"
] | [((2766, 2801), 'oneflow.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""masked_select"""'], {}), "('masked_select')\n", (2784, 2801), False, 'from oneflow.framework.tensor import register_tensor_op\n'), ((2624, 2648), 'oneflow._C.mul', 'flow._C.mul', (['input', 'mask'], {}), '(input, mask)\n', (2635, 2648), True, 'import oneflow as flow\n'), ((2663, 2681), 'oneflow.argwhere', 'flow.argwhere', (['res'], {}), '(res)\n', (2676, 2681), True, 'import oneflow as flow\n'), ((2699, 2730), 'oneflow._C.gather_nd', 'flow._C.gather_nd', (['res', 'indices'], {}), '(res, indices)\n', (2716, 2730), True, 'import oneflow as flow\n'), ((2994, 3030), 'doctest.testmod', 'doctest.testmod', ([], {'raise_on_error': '(True)'}), '(raise_on_error=True)\n', (3009, 3030), False, 'import doctest\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import unittest
from collections import OrderedDict
import numpy as np
from oneflow.test_utils.test_util import GenArgList
import oneflow as flow
import oneflow.unittest
@flow.unittest.skip_unless_1n2d()
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
class TestModuleToCosistent(flow.unittest.TestCase):
def test_module_to_global(test_case):
rank = flow.env.get_rank()
P = flow.placement("cuda", ranks=[0, 1])
B = flow.sbp.broadcast
class ReuseVarModule(flow.nn.Module):
def __init__(self):
super().__init__()
self.linear1 = flow.nn.Linear(3, 4)
self.linear2 = flow.nn.Linear(3, 4)
self.linear2.weight = self.linear1.weight
reuse_var_m = ReuseVarModule()
test_case.assertTrue(reuse_var_m.linear1.weight is reuse_var_m.linear2.weight)
test_case.assertEqual(
reuse_var_m.linear1.weight.device, flow.device("cpu", rank)
)
test_case.assertTrue(reuse_var_m.linear1.bias is not reuse_var_m.linear2.bias)
test_case.assertEqual(reuse_var_m.linear1.bias.device, flow.device("cpu", rank))
reuse_var_m.to_global(placement=P, sbp=B)
test_case.assertTrue(reuse_var_m.linear1.weight is reuse_var_m.linear2.weight)
test_case.assertEqual(reuse_var_m.linear1.weight.placement, P)
test_case.assertEqual(reuse_var_m.linear1.weight.sbp[0], B)
test_case.assertTrue(reuse_var_m.linear1.bias is not reuse_var_m.linear2.bias)
test_case.assertEqual(reuse_var_m.linear1.bias.placement, P)
test_case.assertEqual(reuse_var_m.linear1.bias.sbp[0], B)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.nn.Linear",
"oneflow.unittest.skip_unless_1n2d",
"oneflow.env.get_rank",
"oneflow.device",
"oneflow.placement"
] | [((775, 807), 'oneflow.unittest.skip_unless_1n2d', 'flow.unittest.skip_unless_1n2d', ([], {}), '()\n', (805, 807), True, 'import oneflow as flow\n'), ((825, 859), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (834, 859), False, 'import os\n'), ((2322, 2337), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2335, 2337), False, 'import unittest\n'), ((994, 1013), 'oneflow.env.get_rank', 'flow.env.get_rank', ([], {}), '()\n', (1011, 1013), True, 'import oneflow as flow\n'), ((1026, 1062), 'oneflow.placement', 'flow.placement', (['"""cuda"""'], {'ranks': '[0, 1]'}), "('cuda', ranks=[0, 1])\n", (1040, 1062), True, 'import oneflow as flow\n'), ((1576, 1600), 'oneflow.device', 'flow.device', (['"""cpu"""', 'rank'], {}), "('cpu', rank)\n", (1587, 1600), True, 'import oneflow as flow\n'), ((1762, 1786), 'oneflow.device', 'flow.device', (['"""cpu"""', 'rank'], {}), "('cpu', rank)\n", (1773, 1786), True, 'import oneflow as flow\n'), ((1239, 1259), 'oneflow.nn.Linear', 'flow.nn.Linear', (['(3)', '(4)'], {}), '(3, 4)\n', (1253, 1259), True, 'import oneflow as flow\n'), ((1291, 1311), 'oneflow.nn.Linear', 'flow.nn.Linear', (['(3)', '(4)'], {}), '(3, 4)\n', (1305, 1311), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import oneflow as flow
import oneflow.typing as oft
from typing import Tuple
def test_FixedTensorDef(test_case):
@flow.global_function()
def Foo(x: oft.Numpy.Placeholder((2, 5))):
return x
data = np.ones((2, 5), dtype=np.float32)
of_ret = Foo(data).get()
test_case.assertEqual(of_ret.numpy().max(), 1)
test_case.assertEqual(of_ret.numpy().min(), 1)
test_case.assertTrue(np.allclose(of_ret.numpy(), data))
def test_FixedTensorDef_batch_axis(test_case):
@flow.global_function()
def Foo(x: oft.Numpy.Placeholder((2, 5), batch_axis=1)):
test_case.assertEqual(x.batch_axis, 1)
return x
data = np.ones((2, 5), dtype=np.float32)
Foo(np.ones((2, 5), dtype=np.float32)).get()
def test_FixedTensorDef_no_batch_axis(test_case):
@flow.global_function()
def Foo(x: oft.Numpy.Placeholder((2, 5), batch_axis=None)):
test_case.assertTrue(x.batch_axis is None)
return x
data = np.ones((2, 5), dtype=np.float32)
Foo(np.ones((2, 5), dtype=np.float32)).get()
def test_FixedTensorDef_2_device(test_case):
flow.config.gpu_device_num(2)
@flow.global_function()
def Foo(x: oft.Numpy.Placeholder((2, 5))):
return x
data = np.ones((2, 5), dtype=np.float32)
of_ret = Foo(data).get()
test_case.assertEqual(of_ret.numpy().max(), 1)
test_case.assertEqual(of_ret.numpy().min(), 1)
test_case.assertTrue(np.allclose(of_ret.numpy(), data))
def test_MirroredTensorDef(test_case):
func_config = flow.FunctionConfig()
func_config.default_logical_view(flow.scope.mirrored_view())
@flow.global_function(func_config)
def Foo(x: oft.ListNumpy.Placeholder((2, 5))):
return x
data = np.ones((1, 5), dtype=np.float32)
ndarray_list = Foo([data]).get().numpy_list()
test_case.assertEqual(len(ndarray_list), 1)
test_case.assertTrue(np.allclose(ndarray_list[0], data))
def test_MirroredTensorListDef(test_case):
func_config = flow.FunctionConfig()
func_config.default_logical_view(flow.scope.mirrored_view())
@flow.global_function(func_config)
def Foo(x: oft.ListListNumpy.Placeholder((2, 5))):
return x
data = np.ones((1, 5), dtype=np.float32)
ndarray_list = Foo([[data]]).get().numpy_lists()
test_case.assertEqual(len(ndarray_list), 1)
test_case.assertEqual(len(ndarray_list[0]), 1)
test_case.assertTrue(np.allclose(ndarray_list[0][0], data))
def test_MirroredTensorDef_4_device(test_case):
num_gpus = 4
flow.config.gpu_device_num(num_gpus)
func_config = flow.FunctionConfig()
func_config.default_logical_view(flow.scope.mirrored_view())
image_shape = (64, 3, 224, 224)
label_shape = (64, 1)
@flow.global_function(func_config)
def Foo(
image_label: Tuple[
oft.ListNumpy.Placeholder(image_shape),
oft.ListNumpy.Placeholder(label_shape),
]
):
return image_label
ndarray_lst = lambda shape: [
np.random.rand(*shape).astype(np.float32) for i in range(num_gpus)
]
images = ndarray_lst(image_shape)
labels = ndarray_lst(label_shape)
inputs = (images, labels)
outputs = [output.numpy_list() for output in Foo(inputs).get()]
test_case.assertEqual(len(outputs), len(inputs))
for o, i in zip(outputs, inputs):
test_case.assertEqual(len(o), len(i))
for o_nda, i_nda in zip(o, i):
assert type(o_nda) is np.ndarray
assert type(i_nda) is np.ndarray
# test_case.assertTrue(np.allclose(o_nda, i_nda))
test_case.assertTrue(np.array_equal(o_nda, i_nda))
| [
"oneflow.global_function",
"oneflow.typing.Numpy.Placeholder",
"oneflow.FunctionConfig",
"oneflow.typing.ListListNumpy.Placeholder",
"oneflow.config.gpu_device_num",
"oneflow.typing.ListNumpy.Placeholder",
"oneflow.scope.mirrored_view"
] | [((729, 751), 'oneflow.global_function', 'flow.global_function', ([], {}), '()\n', (749, 751), True, 'import oneflow as flow\n'), ((828, 861), 'numpy.ones', 'np.ones', (['(2, 5)'], {'dtype': 'np.float32'}), '((2, 5), dtype=np.float32)\n', (835, 861), True, 'import numpy as np\n'), ((1107, 1129), 'oneflow.global_function', 'flow.global_function', ([], {}), '()\n', (1127, 1129), True, 'import oneflow as flow\n'), ((1267, 1300), 'numpy.ones', 'np.ones', (['(2, 5)'], {'dtype': 'np.float32'}), '((2, 5), dtype=np.float32)\n', (1274, 1300), True, 'import numpy as np\n'), ((1407, 1429), 'oneflow.global_function', 'flow.global_function', ([], {}), '()\n', (1427, 1429), True, 'import oneflow as flow\n'), ((1574, 1607), 'numpy.ones', 'np.ones', (['(2, 5)'], {'dtype': 'np.float32'}), '((2, 5), dtype=np.float32)\n', (1581, 1607), True, 'import numpy as np\n'), ((1708, 1737), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['(2)'], {}), '(2)\n', (1734, 1737), True, 'import oneflow as flow\n'), ((1744, 1766), 'oneflow.global_function', 'flow.global_function', ([], {}), '()\n', (1764, 1766), True, 'import oneflow as flow\n'), ((1843, 1876), 'numpy.ones', 'np.ones', (['(2, 5)'], {'dtype': 'np.float32'}), '((2, 5), dtype=np.float32)\n', (1850, 1876), True, 'import numpy as np\n'), ((2127, 2148), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (2146, 2148), True, 'import oneflow as flow\n'), ((2220, 2253), 'oneflow.global_function', 'flow.global_function', (['func_config'], {}), '(func_config)\n', (2240, 2253), True, 'import oneflow as flow\n'), ((2334, 2367), 'numpy.ones', 'np.ones', (['(1, 5)'], {'dtype': 'np.float32'}), '((1, 5), dtype=np.float32)\n', (2341, 2367), True, 'import numpy as np\n'), ((2590, 2611), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (2609, 2611), True, 'import oneflow as flow\n'), ((2683, 2716), 'oneflow.global_function', 'flow.global_function', (['func_config'], {}), '(func_config)\n', (2703, 2716), True, 'import oneflow as flow\n'), ((2801, 2834), 'numpy.ones', 'np.ones', (['(1, 5)'], {'dtype': 'np.float32'}), '((1, 5), dtype=np.float32)\n', (2808, 2834), True, 'import numpy as np\n'), ((3122, 3158), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['num_gpus'], {}), '(num_gpus)\n', (3148, 3158), True, 'import oneflow as flow\n'), ((3177, 3198), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (3196, 3198), True, 'import oneflow as flow\n'), ((3333, 3366), 'oneflow.global_function', 'flow.global_function', (['func_config'], {}), '(func_config)\n', (3353, 3366), True, 'import oneflow as flow\n'), ((2186, 2212), 'oneflow.scope.mirrored_view', 'flow.scope.mirrored_view', ([], {}), '()\n', (2210, 2212), True, 'import oneflow as flow\n'), ((2491, 2525), 'numpy.allclose', 'np.allclose', (['ndarray_list[0]', 'data'], {}), '(ndarray_list[0], data)\n', (2502, 2525), True, 'import numpy as np\n'), ((2649, 2675), 'oneflow.scope.mirrored_view', 'flow.scope.mirrored_view', ([], {}), '()\n', (2673, 2675), True, 'import oneflow as flow\n'), ((3012, 3049), 'numpy.allclose', 'np.allclose', (['ndarray_list[0][0]', 'data'], {}), '(ndarray_list[0][0], data)\n', (3023, 3049), True, 'import numpy as np\n'), ((3236, 3262), 'oneflow.scope.mirrored_view', 'flow.scope.mirrored_view', ([], {}), '()\n', (3260, 3262), True, 'import oneflow as flow\n'), ((767, 796), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(2, 5)'], {}), '((2, 5))\n', (788, 796), True, 'import oneflow.typing as oft\n'), ((1145, 1188), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(2, 5)'], {'batch_axis': '(1)'}), '((2, 5), batch_axis=1)\n', (1166, 1188), True, 'import oneflow.typing as oft\n'), ((1445, 1491), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(2, 5)'], {'batch_axis': 'None'}), '((2, 5), batch_axis=None)\n', (1466, 1491), True, 'import oneflow.typing as oft\n'), ((1782, 1811), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(2, 5)'], {}), '((2, 5))\n', (1803, 1811), True, 'import oneflow.typing as oft\n'), ((2269, 2302), 'oneflow.typing.ListNumpy.Placeholder', 'oft.ListNumpy.Placeholder', (['(2, 5)'], {}), '((2, 5))\n', (2294, 2302), True, 'import oneflow.typing as oft\n'), ((2732, 2769), 'oneflow.typing.ListListNumpy.Placeholder', 'oft.ListListNumpy.Placeholder', (['(2, 5)'], {}), '((2, 5))\n', (2761, 2769), True, 'import oneflow.typing as oft\n'), ((1309, 1342), 'numpy.ones', 'np.ones', (['(2, 5)'], {'dtype': 'np.float32'}), '((2, 5), dtype=np.float32)\n', (1316, 1342), True, 'import numpy as np\n'), ((1616, 1649), 'numpy.ones', 'np.ones', (['(2, 5)'], {'dtype': 'np.float32'}), '((2, 5), dtype=np.float32)\n', (1623, 1649), True, 'import numpy as np\n'), ((4208, 4236), 'numpy.array_equal', 'np.array_equal', (['o_nda', 'i_nda'], {}), '(o_nda, i_nda)\n', (4222, 4236), True, 'import numpy as np\n'), ((3420, 3458), 'oneflow.typing.ListNumpy.Placeholder', 'oft.ListNumpy.Placeholder', (['image_shape'], {}), '(image_shape)\n', (3445, 3458), True, 'import oneflow.typing as oft\n'), ((3472, 3510), 'oneflow.typing.ListNumpy.Placeholder', 'oft.ListNumpy.Placeholder', (['label_shape'], {}), '(label_shape)\n', (3497, 3510), True, 'import oneflow.typing as oft\n'), ((3599, 3621), 'numpy.random.rand', 'np.random.rand', (['*shape'], {}), '(*shape)\n', (3613, 3621), True, 'import numpy as np\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import collections.abc
import oneflow as flow
import oneflow.python.framework.c_api_util as c_api_util
import oneflow.python.framework.hob as hob
import oneflow.python.eager.gradient_util as gradient_util
import oneflow.python.lib.core.enable_if as enable_if
from oneflow.python.oneflow_export import oneflow_export
import oneflow.python.framework.remote_blob as remote_blob_util
import oneflow.core.operator.op_conf_pb2 as op_conf_pb
import oneflow.core.job.job_conf_pb2 as job_conf_pb
from typing import Tuple, Optional, Union, Sequence, Text
class ClipGradientConf:
@property
def clip_conf(self) -> op_conf_pb.ClipConf:
raise NotImplementedError()
@oneflow_export("optimizer.grad_clipping.by_global_norm")
class ClipByGlobalNorm(ClipGradientConf):
def __init__(self, clip_norm):
self.clip_norm = clip_norm
@property
def clip_conf(self):
clip_conf = op_conf_pb.ClipConf()
clip_conf.clip_by_global_norm.clip_norm = self.clip_norm
return clip_conf
class WarmupConf:
@property
def warmup_conf(self) -> op_conf_pb.WarmupConf:
raise NotImplementedError()
@oneflow_export("optimizer.warmup.constant")
class ConstantWarmup(WarmupConf):
def __init__(self, steps, multiplier):
self.steps = steps
self.multiplier = multiplier
@property
def warmup_conf(self) -> op_conf_pb.WarmupConf:
warmup_conf = op_conf_pb.WarmupConf()
warmup_conf.constant_conf.warmup_batches = self.steps
warmup_conf.constant_conf.multiplier = self.multiplier
return warmup_conf
@oneflow_export("optimizer.warmup.linear")
class LinearWarmup(WarmupConf):
def __init__(self, steps, start_multiplier):
self.steps = steps
self.start_multiplier = start_multiplier
@property
def warmup_conf(self) -> op_conf_pb.WarmupConf:
warmup_conf = op_conf_pb.WarmupConf()
warmup_conf.linear_conf.warmup_batches = self.steps
warmup_conf.linear_conf.start_multiplier = self.start_multiplier
return warmup_conf
class LrScheduler:
def __init__(
self,
base_lr: Optional[float] = None,
lr_lbn: Optional[Text] = None,
warmup: Optional[WarmupConf] = None,
):
self.base_lr = base_lr
self.lr_lbn = lr_lbn
self.warmup = warmup
@property
def learning_rate_decay_conf(self) -> Optional[op_conf_pb.LearningRateDecayConf]:
raise NotImplementedError()
def SetLrFieldsInTrainConf(self, train_conf) -> None:
if self.warmup_conf is not None:
train_conf.model_update_conf.warmup_conf.CopyFrom(self.warmup_conf)
if self.lr_lbn is not None:
assert self.learning_rate_decay_conf is None
assert self.base_lr is None
train_conf.primary_lr_lbn = self.lr_lbn
# primary_lr is a required field
train_conf.primary_lr = 0
else:
assert self.learning_rate_decay_conf is not None
train_conf.model_update_conf.learning_rate_decay.CopyFrom(
self.learning_rate_decay_conf
)
train_conf.primary_lr = self.base_lr
@property
def warmup_conf(self) -> op_conf_pb.WarmupConf:
if self.warmup is None:
return None
return self.warmup.warmup_conf
@oneflow_export("optimizer.CosineScheduler")
class CosineScheduler(LrScheduler):
def __init__(
self,
base_lr: float,
steps: int,
alpha: float = 0.0,
warmup: Optional[WarmupConf] = None,
):
super().__init__(base_lr=base_lr, warmup=warmup)
self.steps = steps
self.alpha = alpha
@property
def learning_rate_decay_conf(self) -> Optional[op_conf_pb.LearningRateDecayConf]:
learning_rate_decay_conf = op_conf_pb.LearningRateDecayConf()
learning_rate_decay_conf.cosine_conf.decay_batches = self.steps
learning_rate_decay_conf.cosine_conf.alpha = self.alpha
return learning_rate_decay_conf
@oneflow_export("optimizer.CustomScheduler")
class CustomScheduler(LrScheduler):
def __init__(self, lbn: Text):
super().__init__(lr_lbn=lbn)
@property
def learning_rate_decay_conf(self) -> op_conf_pb.LearningRateDecayConf:
return None
@oneflow_export("optimizer.PiecewiseConstantScheduler")
class PiecewiseConstantScheduler(LrScheduler):
def __init__(
self,
boundaries: Sequence[int],
values: Sequence[float],
warmup: Optional[WarmupConf] = None,
):
assert len(boundaries) + 1 == len(values)
super().__init__(base_lr=values[0], warmup=warmup)
self.boundaries = boundaries
self.values = values
@property
def learning_rate_decay_conf(self) -> Optional[op_conf_pb.LearningRateDecayConf]:
learning_rate_decay_conf = op_conf_pb.LearningRateDecayConf()
learning_rate_decay_conf.piecewise_constant_conf.boundaries.extend(
self.boundaries
)
learning_rate_decay_conf.piecewise_constant_conf.values.extend(self.values)
return learning_rate_decay_conf
@oneflow_export("optimizer.PiecewiseScalingScheduler")
class PiecewiseScalingScheduler(LrScheduler):
def __init__(
self,
base_lr: float,
boundaries: Sequence[int],
scale: Union[float, Sequence[float]],
warmup: Optional[WarmupConf] = None,
):
super().__init__(base_lr=base_lr, warmup=warmup)
self.boundaries = boundaries
if not isinstance(scale, collections.abc.Sequence):
scale = [scale] * len(boundaries)
assert len(boundaries) == len(scale)
self.scale = [1] + list(scale)
@property
def learning_rate_decay_conf(self) -> Optional[op_conf_pb.LearningRateDecayConf]:
learning_rate_decay_conf = op_conf_pb.LearningRateDecayConf()
learning_rate_decay_conf.piecewise_scaling_conf.boundaries.extend(
self.boundaries
)
learning_rate_decay_conf.piecewise_scaling_conf.scales.extend(self.scale)
return learning_rate_decay_conf
@oneflow_export("optimizer.PolynomialSchduler")
class PolynomialSchduler(LrScheduler):
def __init__(
self,
base_lr: float,
steps: int,
end_learning_rate: float = 0.0001,
power: float = 1.0,
cycle: bool = False,
warmup: Optional[WarmupConf] = None,
):
super().__init__(base_lr=base_lr, warmup=warmup)
self.steps = steps
self.end_learning_rate = end_learning_rate
self.power = power
self.cycle = cycle
@property
def learning_rate_decay_conf(self) -> Optional[op_conf_pb.LearningRateDecayConf]:
learning_rate_decay_conf = op_conf_pb.LearningRateDecayConf()
learning_rate_decay_conf.polynomial_conf.decay_batches = self.steps
learning_rate_decay_conf.polynomial_conf.end_learning_rate = (
self.end_learning_rate
)
learning_rate_decay_conf.polynomial_conf.power = self.power
learning_rate_decay_conf.polynomial_conf.cycle = self.cycle
return learning_rate_decay_conf
@oneflow_export("optimizer.LinearCosineScheduler")
class LinearCosineScheduler(LrScheduler):
def __init__(
self,
base_lr: float,
steps: int,
num_periods: float = 0.5,
alpha: float = 0.0,
beta: float = 0.001,
warmup: Optional[WarmupConf] = None,
):
super().__init__(base_lr=base_lr, warmup=warmup)
self.steps = steps
self.num_periods = num_periods
self.alpha = alpha
self.beta = beta
@property
def learning_rate_decay_conf(self) -> Optional[op_conf_pb.LearningRateDecayConf]:
learning_rate_decay_conf = op_conf_pb.LearningRateDecayConf()
learning_rate_decay_conf.linear_cosine_conf.decay_batches = self.steps
learning_rate_decay_conf.linear_cosine_conf.num_periods = self.num_periods
learning_rate_decay_conf.polynomial_conf.alpha = self.alpha
learning_rate_decay_conf.polynomial_conf.beta = self.beta
return learning_rate_decay_conf
@oneflow_export("optimizer.ExponentialScheduler")
class ExponentialScheduler(LrScheduler):
def __init__(
self,
base_lr: float,
steps: int,
decay_rate: float,
staircase=False,
warmup: Optional[WarmupConf] = None,
):
super().__init__(base_lr=base_lr, warmup=warmup)
self.steps = steps
self.decay_rate = decay_rate
self.staircase = staircase
@property
def learning_rate_decay_conf(self) -> Optional[op_conf_pb.LearningRateDecayConf]:
learning_rate_decay_conf = op_conf_pb.LearningRateDecayConf()
learning_rate_decay_conf.exponential_conf.decay_batches = self.steps
learning_rate_decay_conf.exponential_conf.decay_rate = self.decay_rate
learning_rate_decay_conf.exponential_conf.staircase = self.staircase
return learning_rate_decay_conf
@oneflow_export("optimizer.InverseTimeScheduler")
class InverseTimeScheduler(LrScheduler):
def __init__(
self,
base_lr: float,
steps: int,
decay_rate: float,
staircase: bool = False,
warmup: Optional[WarmupConf] = None,
):
super().__init__(base_lr=base_lr, warmup=warmup)
self.steps = steps
self.decay_rate = decay_rate
self.staircase = staircase
@property
def learning_rate_decay_conf(self) -> Optional[op_conf_pb.LearningRateDecayConf]:
learning_rate_decay_conf = op_conf_pb.LearningRateDecayConf()
learning_rate_decay_conf.inverse_time_conf.decay_batches = self.steps
learning_rate_decay_conf.inverse_time_conf.decay_rate = self.decay_rate
learning_rate_decay_conf.inverse_time_conf.staircase = self.staircase
return learning_rate_decay_conf
@oneflow_export("optimizer.NaturalExpScheduler")
class NaturalExpScheduler(LrScheduler):
def __init__(
self,
base_lr: float,
steps: int,
decay_rate: float,
staircase: bool = False,
warmup: Optional[WarmupConf] = None,
):
super().__init__(base_lr=base_lr, warmup=warmup)
self.steps = steps
self.decay_rate = decay_rate
self.staircase = staircase
@property
def learning_rate_decay_conf(self) -> Optional[op_conf_pb.LearningRateDecayConf]:
learning_rate_decay_conf = op_conf_pb.LearningRateDecayConf()
learning_rate_decay_conf.natural_exp_conf.decay_batches = self.steps
learning_rate_decay_conf.natural_exp_conf.decay_rate = self.decay_rate
learning_rate_decay_conf.natural_exp_conf.staircase = self.staircase
return learning_rate_decay_conf
class Optimizer:
def __init__(
self,
lr_scheduler: LrScheduler,
loss_scale_factor: Optional[int] = None,
grad_clipping: Optional[ClipGradientConf] = None,
train_step_lbn: Optional[Text] = None,
):
self.lr_scheduler = lr_scheduler
self.loss_scale_factor = loss_scale_factor
self.grad_clipping = grad_clipping
self.train_step_lbn = train_step_lbn
def _SetSpecificFieldsInTrainConf(self, train_conf):
raise NotImplementedError()
@property
def train_conf(self) -> job_conf_pb.TrainConf:
train_conf = job_conf_pb.TrainConf()
self.lr_scheduler.SetLrFieldsInTrainConf(train_conf)
update_conf = train_conf.model_update_conf
if self.grad_clipping is not None:
update_conf.clip_conf.CopyFrom(self.grad_clipping.clip_conf)
if self.train_step_lbn is not None:
train_conf.train_step_lbn = self.train_step_lbn
if self.loss_scale_factor is not None:
update_conf.loss_scale_factor = self.loss_scale_factor
self._SetSpecificFieldsInTrainConf(train_conf)
return train_conf
def minimize(
self, loss: Union[Sequence[remote_blob_util.BlobDef], remote_blob_util.BlobDef]
) -> None:
if not isinstance(loss, collections.abc.Sequence):
loss = [loss]
c_api_util.CurJobBuildAndInferCtx_SetTrainConf(self.train_conf)
for x in loss:
flow.losses.add_loss(x)
@oneflow_export("optimizer.SGD")
class SGD(Optimizer):
def __init__(
self,
lr_scheduler: LrScheduler,
loss_scale_factor: Optional[float] = None,
momentum: int = 0.9,
grad_clipping: Optional[ClipGradientConf] = None,
train_step_lbn: Optional[Text] = None,
):
super().__init__(
lr_scheduler, loss_scale_factor, grad_clipping, train_step_lbn,
)
self.momentum = momentum
def _SetSpecificFieldsInTrainConf(self, train_conf):
if self.momentum == 0:
train_conf.model_update_conf.naive_conf.SetInParent()
else:
train_conf.model_update_conf.momentum_conf.beta = self.momentum
@oneflow_export("optimizer.Adam")
class Adam(Optimizer):
def __init__(
self,
lr_scheduler: LrScheduler,
beta1=0.9,
beta2=0.999,
epsilon=1e-8,
do_bias_correction=False,
loss_scale_factor: Optional[float] = None,
grad_clipping: Optional[ClipGradientConf] = None,
train_step_lbn: Optional[Text] = None,
):
super().__init__(
lr_scheduler, loss_scale_factor, grad_clipping, train_step_lbn,
)
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.do_bias_correction = do_bias_correction
def _SetSpecificFieldsInTrainConf(self, train_conf):
train_conf.model_update_conf.adam_conf.beta1 = self.beta1
train_conf.model_update_conf.adam_conf.beta2 = self.beta2
train_conf.model_update_conf.adam_conf.epsilon = self.epsilon
train_conf.model_update_conf.adam_conf.do_bias_correction = (
self.do_bias_correction
)
@oneflow_export("optimizer.AdamW")
class AdamW(Optimizer):
def __init__(
self,
lr_scheduler: LrScheduler,
beta1=0.9,
beta2=0.999,
epsilon=1e-8,
do_bias_correction=False,
loss_scale_factor: Optional[float] = None,
weight_decay: Optional[float] = None,
weight_decay_includes: Optional[Union[Sequence[Text], Text]] = None,
weight_decay_excludes: Optional[Union[Sequence[Text], Text]] = None,
grad_clipping: Optional[ClipGradientConf] = None,
train_step_lbn: Optional[Text] = None,
):
super().__init__(
lr_scheduler, loss_scale_factor, grad_clipping, train_step_lbn,
)
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.do_bias_correction = do_bias_correction
self.weight_decay = weight_decay
if isinstance(weight_decay_includes, str):
weight_decay_includes = [weight_decay_includes]
if isinstance(weight_decay_excludes, str):
weight_decay_excludes = [weight_decay_excludes]
self.weight_decay_includes = weight_decay_includes
self.weight_decay_excludes = weight_decay_excludes
def _SetSpecificFieldsInTrainConf(self, train_conf):
train_conf.model_update_conf.adam_conf.beta1 = self.beta1
train_conf.model_update_conf.adam_conf.beta2 = self.beta2
train_conf.model_update_conf.adam_conf.epsilon = self.epsilon
train_conf.model_update_conf.adam_conf.do_bias_correction = (
self.do_bias_correction
)
if self.weight_decay is not None:
train_conf.model_update_conf.weight_decay_conf.weight_decay_rate = (
self.weight_decay
)
assert not (
self.weight_decay_excludes is not None
and self.weight_decay_includes is not None
)
if self.weight_decay_includes is not None:
train_conf.model_update_conf.weight_decay_conf.includes.pattern.extend(
self.weight_decay_includes
)
elif self.weight_decay_excludes is not None:
train_conf.model_update_conf.weight_decay_conf.excludes.pattern.extend(
self.weight_decay_excludes
)
@oneflow_export("optimizer.RMSProp")
class RMSProp(Optimizer):
def __init__(
self,
lr_scheduler: LrScheduler,
decay_rate: float = 0.99,
epsilon: float = 1e-8,
loss_scale_factor: Optional[float] = None,
grad_clipping: Optional[ClipGradientConf] = None,
train_step_lbn: Optional[Text] = None,
):
super().__init__(
lr_scheduler, loss_scale_factor, grad_clipping, train_step_lbn,
)
self.decay_rate = decay_rate
self.epsilon = epsilon
def _SetSpecificFieldsInTrainConf(self, train_conf):
train_conf.model_update_conf.rmsprop_conf.decay_rate = self.decay_rate
train_conf.model_update_conf.rmsprop_conf.epsilon = self.epsilon
@oneflow_export("optimizer.LARS")
class LARS(Optimizer):
def __init__(
self,
lr_scheduler: LrScheduler,
momentum_beta: float = 0.9,
epsilon: float = 1e-9,
lars_coefficient: float = 0.0001,
loss_scale_factor: Optional[float] = None,
grad_clipping: Optional[ClipGradientConf] = None,
train_step_lbn: Optional[Text] = None,
):
super().__init__(
lr_scheduler, loss_scale_factor, grad_clipping, train_step_lbn,
)
self.momentum_beta = momentum_beta
self.epsilon = epsilon
self.lars_coefficient = lars_coefficient
def _SetSpecificFieldsInTrainConf(self, train_conf):
train_conf.model_update_conf.lars_conf.momentum_beta = self.momentum_beta
train_conf.model_update_conf.lars_conf.epsilon = self.epsilon
train_conf.model_update_conf.lars_conf.lars_coefficient = self.lars_coefficient
@oneflow_export("optimizer.LazyAdam")
class LazyAdam(Optimizer):
def __init__(
self,
lr_scheduler: LrScheduler,
beta1: float = 0.9,
beta2: float = 0.999,
epsilon: float = 1e-8,
loss_scale_factor: Optional[float] = None,
grad_clipping: Optional[ClipGradientConf] = None,
train_step_lbn: Optional[Text] = None,
):
super().__init__(
lr_scheduler, loss_scale_factor, grad_clipping, train_step_lbn,
)
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
def _SetSpecificFieldsInTrainConf(self, train_conf):
train_conf.model_update_conf.lazy_adam_conf.beta1 = self.beta1
train_conf.model_update_conf.lazy_adam_conf.beta2 = self.beta2
train_conf.model_update_conf.lazy_adam_conf.epsilon = self.epsilon
| [
"oneflow.losses.add_loss",
"oneflow.python.framework.c_api_util.CurJobBuildAndInferCtx_SetTrainConf",
"oneflow.core.operator.op_conf_pb2.WarmupConf",
"oneflow.core.operator.op_conf_pb2.LearningRateDecayConf",
"oneflow.core.operator.op_conf_pb2.ClipConf",
"oneflow.core.job.job_conf_pb2.TrainConf",
"onefl... | [((1303, 1359), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""optimizer.grad_clipping.by_global_norm"""'], {}), "('optimizer.grad_clipping.by_global_norm')\n", (1317, 1359), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((1769, 1812), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""optimizer.warmup.constant"""'], {}), "('optimizer.warmup.constant')\n", (1783, 1812), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((2222, 2263), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""optimizer.warmup.linear"""'], {}), "('optimizer.warmup.linear')\n", (2236, 2263), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((3973, 4016), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""optimizer.CosineScheduler"""'], {}), "('optimizer.CosineScheduler')\n", (3987, 4016), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((4670, 4713), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""optimizer.CustomScheduler"""'], {}), "('optimizer.CustomScheduler')\n", (4684, 4713), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((4936, 4990), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""optimizer.PiecewiseConstantScheduler"""'], {}), "('optimizer.PiecewiseConstantScheduler')\n", (4950, 4990), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((5777, 5830), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""optimizer.PiecewiseScalingScheduler"""'], {}), "('optimizer.PiecewiseScalingScheduler')\n", (5791, 5830), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((6759, 6805), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""optimizer.PolynomialSchduler"""'], {}), "('optimizer.PolynomialSchduler')\n", (6773, 6805), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((7804, 7853), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""optimizer.LinearCosineScheduler"""'], {}), "('optimizer.LinearCosineScheduler')\n", (7818, 7853), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((8800, 8848), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""optimizer.ExponentialScheduler"""'], {}), "('optimizer.ExponentialScheduler')\n", (8814, 8848), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((9673, 9721), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""optimizer.InverseTimeScheduler"""'], {}), "('optimizer.InverseTimeScheduler')\n", (9687, 9721), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((10557, 10604), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""optimizer.NaturalExpScheduler"""'], {}), "('optimizer.NaturalExpScheduler')\n", (10571, 10604), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((12933, 12964), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""optimizer.SGD"""'], {}), "('optimizer.SGD')\n", (12947, 12964), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((13639, 13671), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""optimizer.Adam"""'], {}), "('optimizer.Adam')\n", (13653, 13671), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((14650, 14683), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""optimizer.AdamW"""'], {}), "('optimizer.AdamW')\n", (14664, 14683), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((16986, 17021), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""optimizer.RMSProp"""'], {}), "('optimizer.RMSProp')\n", (17000, 17021), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((17736, 17768), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""optimizer.LARS"""'], {}), "('optimizer.LARS')\n", (17750, 17768), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((18667, 18703), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""optimizer.LazyAdam"""'], {}), "('optimizer.LazyAdam')\n", (18681, 18703), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((1532, 1553), 'oneflow.core.operator.op_conf_pb2.ClipConf', 'op_conf_pb.ClipConf', ([], {}), '()\n', (1551, 1553), True, 'import oneflow.core.operator.op_conf_pb2 as op_conf_pb\n'), ((2043, 2066), 'oneflow.core.operator.op_conf_pb2.WarmupConf', 'op_conf_pb.WarmupConf', ([], {}), '()\n', (2064, 2066), True, 'import oneflow.core.operator.op_conf_pb2 as op_conf_pb\n'), ((2510, 2533), 'oneflow.core.operator.op_conf_pb2.WarmupConf', 'op_conf_pb.WarmupConf', ([], {}), '()\n', (2531, 2533), True, 'import oneflow.core.operator.op_conf_pb2 as op_conf_pb\n'), ((4456, 4490), 'oneflow.core.operator.op_conf_pb2.LearningRateDecayConf', 'op_conf_pb.LearningRateDecayConf', ([], {}), '()\n', (4488, 4490), True, 'import oneflow.core.operator.op_conf_pb2 as op_conf_pb\n'), ((5501, 5535), 'oneflow.core.operator.op_conf_pb2.LearningRateDecayConf', 'op_conf_pb.LearningRateDecayConf', ([], {}), '()\n', (5533, 5535), True, 'import oneflow.core.operator.op_conf_pb2 as op_conf_pb\n'), ((6486, 6520), 'oneflow.core.operator.op_conf_pb2.LearningRateDecayConf', 'op_conf_pb.LearningRateDecayConf', ([], {}), '()\n', (6518, 6520), True, 'import oneflow.core.operator.op_conf_pb2 as op_conf_pb\n'), ((7398, 7432), 'oneflow.core.operator.op_conf_pb2.LearningRateDecayConf', 'op_conf_pb.LearningRateDecayConf', ([], {}), '()\n', (7430, 7432), True, 'import oneflow.core.operator.op_conf_pb2 as op_conf_pb\n'), ((8426, 8460), 'oneflow.core.operator.op_conf_pb2.LearningRateDecayConf', 'op_conf_pb.LearningRateDecayConf', ([], {}), '()\n', (8458, 8460), True, 'import oneflow.core.operator.op_conf_pb2 as op_conf_pb\n'), ((9362, 9396), 'oneflow.core.operator.op_conf_pb2.LearningRateDecayConf', 'op_conf_pb.LearningRateDecayConf', ([], {}), '()\n', (9394, 9396), True, 'import oneflow.core.operator.op_conf_pb2 as op_conf_pb\n'), ((10243, 10277), 'oneflow.core.operator.op_conf_pb2.LearningRateDecayConf', 'op_conf_pb.LearningRateDecayConf', ([], {}), '()\n', (10275, 10277), True, 'import oneflow.core.operator.op_conf_pb2 as op_conf_pb\n'), ((11125, 11159), 'oneflow.core.operator.op_conf_pb2.LearningRateDecayConf', 'op_conf_pb.LearningRateDecayConf', ([], {}), '()\n', (11157, 11159), True, 'import oneflow.core.operator.op_conf_pb2 as op_conf_pb\n'), ((12041, 12064), 'oneflow.core.job.job_conf_pb2.TrainConf', 'job_conf_pb.TrainConf', ([], {}), '()\n', (12062, 12064), True, 'import oneflow.core.job.job_conf_pb2 as job_conf_pb\n'), ((12807, 12870), 'oneflow.python.framework.c_api_util.CurJobBuildAndInferCtx_SetTrainConf', 'c_api_util.CurJobBuildAndInferCtx_SetTrainConf', (['self.train_conf'], {}), '(self.train_conf)\n', (12853, 12870), True, 'import oneflow.python.framework.c_api_util as c_api_util\n'), ((12906, 12929), 'oneflow.losses.add_loss', 'flow.losses.add_loss', (['x'], {}), '(x)\n', (12926, 12929), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# RUN: python3 %s | FileCheck %s
# CHECK: jit
import unittest
import numpy as np
import os
os.environ["ONEFLOW_MLIR_ENABLE_ROUND_TRIP"] = "1"
os.environ["ONEFLOW_MLIR_ENABLE_CODEGEN_FUSERS"] = "1"
import oneflow as flow
import oneflow.unittest
class CastModule(flow.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, scale):
# TODO: also support scale as a scalar, for instance: scale = 7.7
return x.to(dtype=flow.float32) * scale
def do_relu_graph(test_case, data, with_cuda):
x = flow.tensor(data, dtype=flow.int64)
scale = flow.tensor([7.7], dtype=flow.float32)
if with_cuda:
x = x.cuda()
scale = scale.cuda()
module_to_run = CastModule()
y_eager = module_to_run(x, scale)
class GraphToRun(flow.nn.Graph):
def __init__(self):
super().__init__()
self.fw = module_to_run
def build(self, x, scale):
return self.fw(x, scale)
graph_to_run = GraphToRun()
y_lazy = graph_to_run(x, scale)
test_case.assertTrue(np.array_equal(y_eager.numpy(), y_lazy.numpy()))
@flow.unittest.skip_unless_1n1d()
class TestFuseCastScale(oneflow.unittest.TestCase):
def test_relu_graph(test_case):
do_relu_graph(test_case, np.array([2.0, 1.0, 0.0, -1.0, -2.0]), True)
do_relu_graph(
test_case,
np.array([[2.0, 1.0, 0.0, -1.0, -2.0], [2.0, 1.0, 0.0, -1.0, -2.0]]),
False,
)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.unittest.skip_unless_1n1d",
"oneflow.tensor"
] | [((1716, 1748), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (1746, 1748), True, 'import oneflow as flow\n'), ((1138, 1173), 'oneflow.tensor', 'flow.tensor', (['data'], {'dtype': 'flow.int64'}), '(data, dtype=flow.int64)\n', (1149, 1173), True, 'import oneflow as flow\n'), ((1186, 1224), 'oneflow.tensor', 'flow.tensor', (['[7.7]'], {'dtype': 'flow.float32'}), '([7.7], dtype=flow.float32)\n', (1197, 1224), True, 'import oneflow as flow\n'), ((2105, 2120), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2118, 2120), False, 'import unittest\n'), ((1870, 1907), 'numpy.array', 'np.array', (['[2.0, 1.0, 0.0, -1.0, -2.0]'], {}), '([2.0, 1.0, 0.0, -1.0, -2.0])\n', (1878, 1907), True, 'import numpy as np\n'), ((1973, 2041), 'numpy.array', 'np.array', (['[[2.0, 1.0, 0.0, -1.0, -2.0], [2.0, 1.0, 0.0, -1.0, -2.0]]'], {}), '([[2.0, 1.0, 0.0, -1.0, -2.0], [2.0, 1.0, 0.0, -1.0, -2.0]])\n', (1981, 2041), True, 'import numpy as np\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow
from oneflow.framework.docstr.utils import add_docstr
add_docstr(
oneflow._C.swapdims,
"""
swapdims(input, dim0, dim1) -> Tensor
This function is equivalent to torch’s swapdims function.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> x = flow.tensor([[[0,1],[2,3]],[[4,5],[6,7]]])
>>> x
tensor([[[0, 1],
[2, 3]],
<BLANKLINE>
[[4, 5],
[6, 7]]], dtype=oneflow.int64)
>>> flow.swapdims(x, 0, 1)
tensor([[[0, 1],
[4, 5]],
<BLANKLINE>
[[2, 3],
[6, 7]]], dtype=oneflow.int64)
>>> flow.swapdims(x, 0, 2)
tensor([[[0, 4],
[2, 6]],
<BLANKLINE>
[[1, 5],
[3, 7]]], dtype=oneflow.int64)
""",
)
| [
"oneflow.framework.docstr.utils.add_docstr"
] | [((660, 1481), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow._C.swapdims', '"""\n swapdims(input, dim0, dim1) -> Tensor\n\n This function is equivalent to torch’s swapdims function.\n\n For example:\n\n .. code-block:: python\n \n >>> import oneflow as flow\n\n >>> x = flow.tensor([[[0,1],[2,3]],[[4,5],[6,7]]])\n >>> x\n tensor([[[0, 1],\n [2, 3]],\n <BLANKLINE>\n [[4, 5],\n [6, 7]]], dtype=oneflow.int64)\n >>> flow.swapdims(x, 0, 1)\n tensor([[[0, 1],\n [4, 5]],\n <BLANKLINE>\n [[2, 3],\n [6, 7]]], dtype=oneflow.int64)\n >>> flow.swapdims(x, 0, 2)\n tensor([[[0, 4],\n [2, 6]],\n <BLANKLINE>\n [[1, 5],\n [3, 7]]], dtype=oneflow.int64)\n\n """'], {}), '(oneflow._C.swapdims,\n """\n swapdims(input, dim0, dim1) -> Tensor\n\n This function is equivalent to torch’s swapdims function.\n\n For example:\n\n .. code-block:: python\n \n >>> import oneflow as flow\n\n >>> x = flow.tensor([[[0,1],[2,3]],[[4,5],[6,7]]])\n >>> x\n tensor([[[0, 1],\n [2, 3]],\n <BLANKLINE>\n [[4, 5],\n [6, 7]]], dtype=oneflow.int64)\n >>> flow.swapdims(x, 0, 1)\n tensor([[[0, 1],\n [4, 5]],\n <BLANKLINE>\n [[2, 3],\n [6, 7]]], dtype=oneflow.int64)\n >>> flow.swapdims(x, 0, 2)\n tensor([[[0, 4],\n [2, 6]],\n <BLANKLINE>\n [[1, 5],\n [3, 7]]], dtype=oneflow.int64)\n\n """\n )\n', (670, 1481), False, 'from oneflow.framework.docstr.utils import add_docstr\n')] |
import oneflow as flow
import oneflow.typing as tp
@flow.global_function(type="train")
def train_job(
images: tp.Numpy.Placeholder((BATCH_SIZE, 1, 28, 28), dtype=flow.float),
labels: tp.Numpy.Placeholder((BATCH_SIZE,), dtype=flow.int32),
) -> tp.Numpy:
with flow.scope.placement("gpu", "0:0"):
logits = lenet(images, train=True)
loss = flow.nn.sparse_softmax_cross_entropy_with_logits(
labels, logits, name="softmax_loss"
)
# Set learning rate as 0.001
lr_scheduler = flow.optimizer.PiecewiseConstantScheduler([], [0.001])
# Set Adam optimizer
flow.optimizer.Adam(lr_scheduler, do_bias_correction=False).minimize(loss)
return loss | [
"oneflow.global_function",
"oneflow.optimizer.PiecewiseConstantScheduler",
"oneflow.typing.Numpy.Placeholder",
"oneflow.optimizer.Adam",
"oneflow.scope.placement",
"oneflow.nn.sparse_softmax_cross_entropy_with_logits"
] | [((53, 87), 'oneflow.global_function', 'flow.global_function', ([], {'type': '"""train"""'}), "(type='train')\n", (73, 87), True, 'import oneflow as flow\n'), ((526, 580), 'oneflow.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[0.001]'], {}), '([], [0.001])\n', (567, 580), True, 'import oneflow as flow\n'), ((115, 178), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', (['(BATCH_SIZE, 1, 28, 28)'], {'dtype': 'flow.float'}), '((BATCH_SIZE, 1, 28, 28), dtype=flow.float)\n', (135, 178), True, 'import oneflow.typing as tp\n'), ((192, 245), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', (['(BATCH_SIZE,)'], {'dtype': 'flow.int32'}), '((BATCH_SIZE,), dtype=flow.int32)\n', (212, 245), True, 'import oneflow.typing as tp\n'), ((271, 305), 'oneflow.scope.placement', 'flow.scope.placement', (['"""gpu"""', '"""0:0"""'], {}), "('gpu', '0:0')\n", (291, 305), True, 'import oneflow as flow\n'), ((365, 455), 'oneflow.nn.sparse_softmax_cross_entropy_with_logits', 'flow.nn.sparse_softmax_cross_entropy_with_logits', (['labels', 'logits'], {'name': '"""softmax_loss"""'}), "(labels, logits, name=\n 'softmax_loss')\n", (413, 455), True, 'import oneflow as flow\n'), ((610, 669), 'oneflow.optimizer.Adam', 'flow.optimizer.Adam', (['lr_scheduler'], {'do_bias_correction': '(False)'}), '(lr_scheduler, do_bias_correction=False)\n', (629, 669), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import math
import os
import unittest
import cv2
import numpy as np
import oneflow as flow
import oneflow.unittest
@flow.unittest.skip_unless_1n1d()
class TestOFRecordModule(flow.unittest.TestCase):
def test_record(test_case):
batch_size = 1
color_space = "RGB"
height = 224
width = 224
output_layout = "NCHW"
rgb_mean = [123.68, 116.779, 103.939]
rgb_std = [58.393, 57.12, 57.375]
record_reader = flow.nn.OfrecordReader(
"/dataset/imagenette/ofrecord",
batch_size=batch_size,
data_part_num=1,
part_name_suffix_length=5,
shuffle_after_epoch=False,
)
record_image_decoder = flow.nn.OFRecordImageDecoder(
"encoded", color_space=color_space
)
record_label_decoder = flow.nn.OfrecordRawDecoder(
"class/label", shape=(), dtype=flow.int32
)
resize = flow.nn.image.Resize(
resize_side="shorter", keep_aspect_ratio=True, target_size=256
)
crop_mirror_normal = flow.nn.CropMirrorNormalize(
color_space=color_space,
output_layout=output_layout,
crop_h=height,
crop_w=width,
crop_pos_y=0.5,
crop_pos_x=0.5,
mean=rgb_mean,
std=rgb_std,
output_dtype=flow.float,
)
val_record = record_reader()
label = record_label_decoder(val_record)
image_raw_buffer = record_image_decoder(val_record)
image_raw_buffer_nd = image_raw_buffer.numpy()
gt_np = cv2.imread("/dataset/imagenette/ofrecord/gt_tensor_buffer_image.png")
test_case.assertTrue(np.array_equal(image_raw_buffer_nd[0], gt_np))
image = resize(image_raw_buffer)[0]
resized_image_raw_buffer_nd = image.numpy()
gt_np = cv2.imread(
"/dataset/imagenette/ofrecord/gt_tensor_buffer_resized_image.png"
)
test_case.assertTrue(np.array_equal(resized_image_raw_buffer_nd[0], gt_np))
image = crop_mirror_normal(image)
image_np = image.numpy()
image_np = np.squeeze(image_np)
image_np = np.transpose(image_np, (1, 2, 0))
image_np = image_np * rgb_std + rgb_mean
image_np = cv2.cvtColor(np.float32(image_np), cv2.COLOR_RGB2BGR)
image_np = image_np.astype(np.uint8)
gt_np = cv2.imread("/dataset/imagenette/ofrecord/gt_val_image.png")
test_case.assertEqual(label.numpy(), 5)
test_case.assertTrue(np.array_equal(image_np, gt_np))
coco_dict = dict()
def _coco(anno_file):
global coco_dict
if anno_file not in coco_dict:
from pycocotools.coco import COCO
coco_dict[anno_file] = COCO(anno_file)
return coco_dict[anno_file]
def _get_coco_image_samples(anno_file, image_dir, image_ids):
coco = _coco(anno_file)
category_id_to_contiguous_id_map = _get_category_id_to_contiguous_id_map(coco)
(image, image_size) = _read_images_with_cv(coco, image_dir, image_ids)
bbox = _read_bbox(coco, image_ids)
label = _read_label(coco, image_ids, category_id_to_contiguous_id_map)
img_segm_poly_list = _read_segm_poly(coco, image_ids)
(poly, poly_index) = _segm_poly_list_to_tensor(img_segm_poly_list)
samples = []
for (im, ims, b, l, p, pi) in zip(image, image_size, bbox, label, poly, poly_index):
samples.append(
dict(image=im, image_size=ims, bbox=b, label=l, poly=p, poly_index=pi)
)
return samples
def _get_category_id_to_contiguous_id_map(coco):
return {v: i + 1 for (i, v) in enumerate(coco.getCatIds())}
def _read_images_with_cv(coco, image_dir, image_ids):
image_files = [
os.path.join(image_dir, coco.imgs[img_id]["file_name"]) for img_id in image_ids
]
image_size = [
(coco.imgs[img_id]["height"], coco.imgs[img_id]["width"])
for img_id in image_ids
]
return (
[cv2.imread(image_file).astype(np.single) for image_file in image_files],
image_size,
)
def _bbox_convert_from_xywh_to_xyxy(bbox, image_h, image_w):
(x, y, w, h) = bbox
(x1, y1) = (x, y)
x2 = x1 + max(w - 1, 0)
y2 = y1 + max(h - 1, 0)
x1 = min(max(x1, 0), image_w - 1)
y1 = min(max(y1, 0), image_h - 1)
x2 = min(max(x2, 0), image_w - 1)
y2 = min(max(y2, 0), image_h - 1)
if x1 >= x2 or y1 >= y2:
return None
return [x1, y1, x2, y2]
def _read_bbox(coco, image_ids):
img_bbox_list = []
for img_id in image_ids:
anno_ids = coco.getAnnIds(imgIds=[img_id])
assert len(anno_ids) > 0, "image with id {} has no anno".format(img_id)
image_h = coco.imgs[img_id]["height"]
image_w = coco.imgs[img_id]["width"]
bbox_list = []
for anno_id in anno_ids:
anno = coco.anns[anno_id]
if anno["iscrowd"] != 0:
continue
bbox = anno["bbox"]
assert isinstance(bbox, list)
bbox_ = _bbox_convert_from_xywh_to_xyxy(bbox, image_h, image_w)
if bbox_ is not None:
bbox_list.append(bbox_)
bbox_array = np.array(bbox_list, dtype=np.single)
img_bbox_list.append(bbox_array)
return img_bbox_list
def _read_label(coco, image_ids, category_id_to_contiguous_id_map):
img_label_list = []
for img_id in image_ids:
anno_ids = coco.getAnnIds(imgIds=[img_id])
assert len(anno_ids) > 0, "image with id {} has no anno".format(img_id)
label_list = []
for anno_id in anno_ids:
anno = coco.anns[anno_id]
if anno["iscrowd"] != 0:
continue
cate_id = anno["category_id"]
isinstance(cate_id, int)
label_list.append(category_id_to_contiguous_id_map[cate_id])
label_array = np.array(label_list, dtype=np.int32)
img_label_list.append(label_array)
return img_label_list
def _read_segm_poly(coco, image_ids):
img_segm_poly_list = []
for img_id in image_ids:
anno_ids = coco.getAnnIds(imgIds=[img_id])
assert len(anno_ids) > 0, "img {} has no anno".format(img_id)
segm_poly_list = []
for anno_id in anno_ids:
anno = coco.anns[anno_id]
if anno["iscrowd"] != 0:
continue
segm = anno["segmentation"]
assert isinstance(segm, list)
assert len(segm) > 0, str(len(segm))
assert all([len(poly) > 0 for poly in segm]), str(
[len(poly) for poly in segm]
)
segm_poly_list.append(segm)
img_segm_poly_list.append(segm_poly_list)
return img_segm_poly_list
def _segm_poly_list_to_tensor(img_segm_poly_list):
poly_array_list = []
poly_index_array_list = []
for (img_idx, segm_poly_list) in enumerate(img_segm_poly_list):
img_poly_elem_list = []
img_poly_index_list = []
for (obj_idx, poly_list) in enumerate(segm_poly_list):
for (poly_idx, poly) in enumerate(poly_list):
img_poly_elem_list.extend(poly)
for (pt_idx, pt) in enumerate(poly):
if pt_idx % 2 == 0:
img_poly_index_list.append([pt_idx / 2, poly_idx, obj_idx])
img_poly_array = np.array(img_poly_elem_list, dtype=np.single).reshape(-1, 2)
assert img_poly_array.size > 0, segm_poly_list
poly_array_list.append(img_poly_array)
img_poly_index_array = np.array(img_poly_index_list, dtype=np.int32)
assert img_poly_index_array.size > 0, segm_poly_list
poly_index_array_list.append(img_poly_index_array)
return (poly_array_list, poly_index_array_list)
@flow.unittest.skip_unless_1n1d()
class TestCocoReader(flow.unittest.TestCase):
def test_coco_reader(test_case):
anno_file = "/dataset/mscoco_2017/annotations/instances_val2017.json"
image_dir = "/dataset/mscoco_2017/val2017"
num_iterations = 100
coco_reader = flow.nn.COCOReader(
annotation_file=anno_file,
image_dir=image_dir,
batch_size=2,
shuffle=True,
stride_partition=True,
)
image_decoder = flow.nn.image.decode(dtype=flow.float)
for i in range(num_iterations):
(
image,
image_id,
image_size,
gt_bbox,
gt_label,
gt_segm,
gt_segm_index,
) = coco_reader()
decoded_image = image_decoder(image)
image_list = decoded_image.numpy()
image_id = image_id.numpy()
image_size = image_size.numpy()
bbox_list = gt_bbox.numpy()
label_list = gt_label.numpy()
segm_list = gt_segm.numpy()
segm_index_list = gt_segm_index.numpy()
samples = _get_coco_image_samples(anno_file, image_dir, image_id)
for (i, sample) in enumerate(samples):
test_case.assertTrue(np.array_equal(image_list[i], sample["image"]))
test_case.assertTrue(
np.array_equal(image_size[i], sample["image_size"])
)
test_case.assertTrue(np.allclose(bbox_list[i], sample["bbox"]))
cur_label = label_list[i]
if len(cur_label.shape) == 0:
cur_label = np.array([cur_label])
test_case.assertTrue(np.array_equal(cur_label, sample["label"]))
test_case.assertTrue(np.allclose(segm_list[i], sample["poly"]))
test_case.assertTrue(
np.array_equal(segm_index_list[i], sample["poly_index"])
)
@flow.unittest.skip_unless_1n1d()
class TestOFRecordBytesDecoder(flow.unittest.TestCase):
def test_OFRecordBytesDecoder(test_case):
batch_size = 16
record_reader = flow.nn.OfrecordReader(
"/dataset/imagenette/ofrecord",
batch_size=batch_size,
part_name_suffix_length=5,
)
val_record = record_reader()
bytesdecoder_img = flow.nn.OFRecordBytesDecoder("encoded")
image_raw_buffer = bytesdecoder_img(val_record)
image_raw_buffer_nd = image_raw_buffer.numpy()[0]
gt_np = cv2.imread("/dataset/imagenette/ofrecord/gt_tensor_buffer_image.png")
img = cv2.imdecode(image_raw_buffer_nd, cv2.IMREAD_COLOR)
test_case.assertTrue(np.array_equal(img, gt_np))
if __name__ == "__main__":
unittest.main()
| [
"oneflow.nn.OFRecordImageDecoder",
"oneflow.nn.COCOReader",
"oneflow.nn.OFRecordBytesDecoder",
"oneflow.unittest.skip_unless_1n1d",
"oneflow.nn.OfrecordRawDecoder",
"oneflow.nn.CropMirrorNormalize",
"oneflow.nn.OfrecordReader",
"oneflow.nn.image.decode",
"oneflow.nn.image.Resize"
] | [((711, 743), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (741, 743), True, 'import oneflow as flow\n'), ((8335, 8367), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (8365, 8367), True, 'import oneflow as flow\n'), ((10366, 10398), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (10396, 10398), True, 'import oneflow as flow\n'), ((11164, 11179), 'unittest.main', 'unittest.main', ([], {}), '()\n', (11177, 11179), False, 'import unittest\n'), ((1061, 1218), 'oneflow.nn.OfrecordReader', 'flow.nn.OfrecordReader', (['"""/dataset/imagenette/ofrecord"""'], {'batch_size': 'batch_size', 'data_part_num': '(1)', 'part_name_suffix_length': '(5)', 'shuffle_after_epoch': '(False)'}), "('/dataset/imagenette/ofrecord', batch_size=\n batch_size, data_part_num=1, part_name_suffix_length=5,\n shuffle_after_epoch=False)\n", (1083, 1218), True, 'import oneflow as flow\n'), ((1312, 1376), 'oneflow.nn.OFRecordImageDecoder', 'flow.nn.OFRecordImageDecoder', (['"""encoded"""'], {'color_space': 'color_space'}), "('encoded', color_space=color_space)\n", (1340, 1376), True, 'import oneflow as flow\n'), ((1430, 1499), 'oneflow.nn.OfrecordRawDecoder', 'flow.nn.OfrecordRawDecoder', (['"""class/label"""'], {'shape': '()', 'dtype': 'flow.int32'}), "('class/label', shape=(), dtype=flow.int32)\n", (1456, 1499), True, 'import oneflow as flow\n'), ((1539, 1627), 'oneflow.nn.image.Resize', 'flow.nn.image.Resize', ([], {'resize_side': '"""shorter"""', 'keep_aspect_ratio': '(True)', 'target_size': '(256)'}), "(resize_side='shorter', keep_aspect_ratio=True,\n target_size=256)\n", (1559, 1627), True, 'import oneflow as flow\n'), ((1675, 1880), 'oneflow.nn.CropMirrorNormalize', 'flow.nn.CropMirrorNormalize', ([], {'color_space': 'color_space', 'output_layout': 'output_layout', 'crop_h': 'height', 'crop_w': 'width', 'crop_pos_y': '(0.5)', 'crop_pos_x': '(0.5)', 'mean': 'rgb_mean', 'std': 'rgb_std', 'output_dtype': 'flow.float'}), '(color_space=color_space, output_layout=\n output_layout, crop_h=height, crop_w=width, crop_pos_y=0.5, crop_pos_x=\n 0.5, mean=rgb_mean, std=rgb_std, output_dtype=flow.float)\n', (1702, 1880), True, 'import oneflow as flow\n'), ((2207, 2276), 'cv2.imread', 'cv2.imread', (['"""/dataset/imagenette/ofrecord/gt_tensor_buffer_image.png"""'], {}), "('/dataset/imagenette/ofrecord/gt_tensor_buffer_image.png')\n", (2217, 2276), False, 'import cv2\n'), ((2465, 2542), 'cv2.imread', 'cv2.imread', (['"""/dataset/imagenette/ofrecord/gt_tensor_buffer_resized_image.png"""'], {}), "('/dataset/imagenette/ofrecord/gt_tensor_buffer_resized_image.png')\n", (2475, 2542), False, 'import cv2\n'), ((2743, 2763), 'numpy.squeeze', 'np.squeeze', (['image_np'], {}), '(image_np)\n', (2753, 2763), True, 'import numpy as np\n'), ((2783, 2816), 'numpy.transpose', 'np.transpose', (['image_np', '(1, 2, 0)'], {}), '(image_np, (1, 2, 0))\n', (2795, 2816), True, 'import numpy as np\n'), ((3000, 3059), 'cv2.imread', 'cv2.imread', (['"""/dataset/imagenette/ofrecord/gt_val_image.png"""'], {}), "('/dataset/imagenette/ofrecord/gt_val_image.png')\n", (3010, 3059), False, 'import cv2\n'), ((3345, 3360), 'pycocotools.coco.COCO', 'COCO', (['anno_file'], {}), '(anno_file)\n', (3349, 3360), False, 'from pycocotools.coco import COCO\n'), ((4327, 4382), 'os.path.join', 'os.path.join', (['image_dir', "coco.imgs[img_id]['file_name']"], {}), "(image_dir, coco.imgs[img_id]['file_name'])\n", (4339, 4382), False, 'import os\n'), ((5761, 5797), 'numpy.array', 'np.array', (['bbox_list'], {'dtype': 'np.single'}), '(bbox_list, dtype=np.single)\n', (5769, 5797), True, 'import numpy as np\n'), ((6449, 6485), 'numpy.array', 'np.array', (['label_list'], {'dtype': 'np.int32'}), '(label_list, dtype=np.int32)\n', (6457, 6485), True, 'import numpy as np\n'), ((8114, 8159), 'numpy.array', 'np.array', (['img_poly_index_list'], {'dtype': 'np.int32'}), '(img_poly_index_list, dtype=np.int32)\n', (8122, 8159), True, 'import numpy as np\n'), ((8631, 8752), 'oneflow.nn.COCOReader', 'flow.nn.COCOReader', ([], {'annotation_file': 'anno_file', 'image_dir': 'image_dir', 'batch_size': '(2)', 'shuffle': '(True)', 'stride_partition': '(True)'}), '(annotation_file=anno_file, image_dir=image_dir,\n batch_size=2, shuffle=True, stride_partition=True)\n', (8649, 8752), True, 'import oneflow as flow\n'), ((8844, 8882), 'oneflow.nn.image.decode', 'flow.nn.image.decode', ([], {'dtype': 'flow.float'}), '(dtype=flow.float)\n', (8864, 8882), True, 'import oneflow as flow\n'), ((10549, 10658), 'oneflow.nn.OfrecordReader', 'flow.nn.OfrecordReader', (['"""/dataset/imagenette/ofrecord"""'], {'batch_size': 'batch_size', 'part_name_suffix_length': '(5)'}), "('/dataset/imagenette/ofrecord', batch_size=\n batch_size, part_name_suffix_length=5)\n", (10571, 10658), True, 'import oneflow as flow\n'), ((10766, 10805), 'oneflow.nn.OFRecordBytesDecoder', 'flow.nn.OFRecordBytesDecoder', (['"""encoded"""'], {}), "('encoded')\n", (10794, 10805), True, 'import oneflow as flow\n'), ((10938, 11007), 'cv2.imread', 'cv2.imread', (['"""/dataset/imagenette/ofrecord/gt_tensor_buffer_image.png"""'], {}), "('/dataset/imagenette/ofrecord/gt_tensor_buffer_image.png')\n", (10948, 11007), False, 'import cv2\n'), ((11022, 11073), 'cv2.imdecode', 'cv2.imdecode', (['image_raw_buffer_nd', 'cv2.IMREAD_COLOR'], {}), '(image_raw_buffer_nd, cv2.IMREAD_COLOR)\n', (11034, 11073), False, 'import cv2\n'), ((2306, 2351), 'numpy.array_equal', 'np.array_equal', (['image_raw_buffer_nd[0]', 'gt_np'], {}), '(image_raw_buffer_nd[0], gt_np)\n', (2320, 2351), True, 'import numpy as np\n'), ((2594, 2647), 'numpy.array_equal', 'np.array_equal', (['resized_image_raw_buffer_nd[0]', 'gt_np'], {}), '(resized_image_raw_buffer_nd[0], gt_np)\n', (2608, 2647), True, 'import numpy as np\n'), ((2898, 2918), 'numpy.float32', 'np.float32', (['image_np'], {}), '(image_np)\n', (2908, 2918), True, 'import numpy as np\n'), ((3137, 3168), 'numpy.array_equal', 'np.array_equal', (['image_np', 'gt_np'], {}), '(image_np, gt_np)\n', (3151, 3168), True, 'import numpy as np\n'), ((11103, 11129), 'numpy.array_equal', 'np.array_equal', (['img', 'gt_np'], {}), '(img, gt_np)\n', (11117, 11129), True, 'import numpy as np\n'), ((7920, 7965), 'numpy.array', 'np.array', (['img_poly_elem_list'], {'dtype': 'np.single'}), '(img_poly_elem_list, dtype=np.single)\n', (7928, 7965), True, 'import numpy as np\n'), ((4558, 4580), 'cv2.imread', 'cv2.imread', (['image_file'], {}), '(image_file)\n', (4568, 4580), False, 'import cv2\n'), ((9671, 9717), 'numpy.array_equal', 'np.array_equal', (['image_list[i]', "sample['image']"], {}), "(image_list[i], sample['image'])\n", (9685, 9717), True, 'import numpy as np\n'), ((9777, 9828), 'numpy.array_equal', 'np.array_equal', (['image_size[i]', "sample['image_size']"], {}), "(image_size[i], sample['image_size'])\n", (9791, 9828), True, 'import numpy as np\n'), ((9884, 9925), 'numpy.allclose', 'np.allclose', (['bbox_list[i]', "sample['bbox']"], {}), "(bbox_list[i], sample['bbox'])\n", (9895, 9925), True, 'import numpy as np\n'), ((10047, 10068), 'numpy.array', 'np.array', (['[cur_label]'], {}), '([cur_label])\n', (10055, 10068), True, 'import numpy as np\n'), ((10106, 10148), 'numpy.array_equal', 'np.array_equal', (['cur_label', "sample['label']"], {}), "(cur_label, sample['label'])\n", (10120, 10148), True, 'import numpy as np\n'), ((10187, 10228), 'numpy.allclose', 'np.allclose', (['segm_list[i]', "sample['poly']"], {}), "(segm_list[i], sample['poly'])\n", (10198, 10228), True, 'import numpy as np\n'), ((10288, 10344), 'numpy.array_equal', 'np.array_equal', (['segm_index_list[i]', "sample['poly_index']"], {}), "(segm_index_list[i], sample['poly_index'])\n", (10302, 10344), True, 'import numpy as np\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from google.protobuf import text_format
import oneflow._oneflow_internal
import oneflow.core.job.placement_pb2 as placement_pb
import oneflow.core.job.scope_pb2 as scope_pb
import oneflow.core.operator.op_attribute_pb2 as op_attribute_pb
import oneflow.eager.gradient_util as gradient_util
import oneflow.eager.op_executor as op_executor
import oneflow.eager.symbol_storage as symbol_storage
import oneflow.framework.scope_util as scope_util
def MakeScopeSymbol(job_conf, parallel_conf, is_mirrored):
parallel_hierarchy = None
if parallel_conf.has_hierarchy():
parallel_hierarchy = oneflow._oneflow_internal.Size(
tuple(parallel_conf.hierarchy().dim())
)
return scope_util.MakeInitialScope(
job_conf,
parallel_conf.device_tag(),
list(parallel_conf.device_name()),
parallel_hierarchy,
is_mirrored,
).symbol_id
def MakeParallelDescSymbol(parallel_conf):
symbol_id = None
def BuildInstruction(builder):
nonlocal symbol_id
symbol_id = builder.GetParallelDescSymbol(parallel_conf).symbol_id
oneflow._oneflow_internal.deprecated.LogicalRun(BuildInstruction)
return symbol_id
def MirroredCast(op_attribute_str, parallel_conf):
op_attribute = text_format.Parse(op_attribute_str, op_attribute_pb.OpAttribute())
blob_register = oneflow._oneflow_internal.GetDefaultBlobRegister()
is_cast_to_mirrored = op_attribute.op_conf.HasField("cast_to_mirrored_conf")
is_cast_from_mirrored = op_attribute.op_conf.HasField("cast_from_mirrored_conf")
assert is_cast_to_mirrored or is_cast_from_mirrored
_MirroredCastAndAddOutputBlobReleaser(op_attribute, blob_register)
bw_blob_register = gradient_util.GetDefaultBackwardBlobRegister()
gradient_util.TrySetBackwardUsedBlobObject(
op_attribute, blob_register, bw_blob_register
)
def InterpretCompletedOp(op_attribute_str, parallel_conf):
op_attribute = text_format.Parse(op_attribute_str, op_attribute_pb.OpAttribute())
blob_register = gradient_util.GetDefaultBackwardBlobRegister()
_InterpretCompletedOp(op_attribute, parallel_conf, blob_register)
gradient_util.ReleaseUnusedBlobObject(op_attribute, blob_register)
def _InterpretCompletedOp(op_attribute, parallel_conf, blob_register):
return op_executor.Interpret(op_attribute, parallel_conf, blob_register)
def _MirroredCastAndAddOutputBlobReleaser(op_attribute, blob_register):
op_executor.MirroredCast(op_attribute, blob_register)
_AddOutputBlobObjectReleaser4InputBlobObject(op_attribute, blob_register)
def _AddOutputBlobObjectReleaser4InputBlobObject(op_attribute, blob_register):
in_lbi = op_attribute.arg_signature.bn_in_op2lbi["in"]
in_lbn = "%s/%s" % (in_lbi.op_name, in_lbi.blob_name)
in_blob_object = blob_register.GetObject4BlobName(in_lbn)
release = _MakeReleaser4MirroredCastBlobObject(op_attribute, blob_register)
in_blob_object.add_releaser(release)
def _MakeReleaser4MirroredCastBlobObject(op_attribute, blob_register):
def ReleaseMirroredBlobObject(obj):
for obn in op_attribute.output_bns:
lbi = op_attribute.arg_signature.bn_in_op2lbi[obn]
lbn = "%s/%s" % (lbi.op_name, lbi.blob_name)
blob_object = blob_register.GetObject4BlobName(lbn)
blob_register.ClearObject4BlobName(lbn)
return ReleaseMirroredBlobObject
| [
"oneflow.eager.op_executor.Interpret",
"oneflow.eager.gradient_util.GetDefaultBackwardBlobRegister",
"oneflow.core.operator.op_attribute_pb2.OpAttribute",
"oneflow.eager.op_executor.MirroredCast",
"oneflow.eager.gradient_util.ReleaseUnusedBlobObject",
"oneflow.eager.gradient_util.TrySetBackwardUsedBlobObj... | [((2308, 2354), 'oneflow.eager.gradient_util.GetDefaultBackwardBlobRegister', 'gradient_util.GetDefaultBackwardBlobRegister', ([], {}), '()\n', (2352, 2354), True, 'import oneflow.eager.gradient_util as gradient_util\n'), ((2359, 2452), 'oneflow.eager.gradient_util.TrySetBackwardUsedBlobObject', 'gradient_util.TrySetBackwardUsedBlobObject', (['op_attribute', 'blob_register', 'bw_blob_register'], {}), '(op_attribute, blob_register,\n bw_blob_register)\n', (2401, 2452), True, 'import oneflow.eager.gradient_util as gradient_util\n'), ((2630, 2676), 'oneflow.eager.gradient_util.GetDefaultBackwardBlobRegister', 'gradient_util.GetDefaultBackwardBlobRegister', ([], {}), '()\n', (2674, 2676), True, 'import oneflow.eager.gradient_util as gradient_util\n'), ((2751, 2817), 'oneflow.eager.gradient_util.ReleaseUnusedBlobObject', 'gradient_util.ReleaseUnusedBlobObject', (['op_attribute', 'blob_register'], {}), '(op_attribute, blob_register)\n', (2788, 2817), True, 'import oneflow.eager.gradient_util as gradient_util\n'), ((2902, 2967), 'oneflow.eager.op_executor.Interpret', 'op_executor.Interpret', (['op_attribute', 'parallel_conf', 'blob_register'], {}), '(op_attribute, parallel_conf, blob_register)\n', (2923, 2967), True, 'import oneflow.eager.op_executor as op_executor\n'), ((3046, 3099), 'oneflow.eager.op_executor.MirroredCast', 'op_executor.MirroredCast', (['op_attribute', 'blob_register'], {}), '(op_attribute, blob_register)\n', (3070, 3099), True, 'import oneflow.eager.op_executor as op_executor\n'), ((1890, 1919), 'oneflow.core.operator.op_attribute_pb2.OpAttribute', 'op_attribute_pb.OpAttribute', ([], {}), '()\n', (1917, 1919), True, 'import oneflow.core.operator.op_attribute_pb2 as op_attribute_pb\n'), ((2579, 2608), 'oneflow.core.operator.op_attribute_pb2.OpAttribute', 'op_attribute_pb.OpAttribute', ([], {}), '()\n', (2606, 2608), True, 'import oneflow.core.operator.op_attribute_pb2 as op_attribute_pb\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import oneflow.experimental as flow
def log_mean_exp(x, dim=None, keepdims=False):
"""
Oneflow numerically stable log mean of exps across the `dim`.
:param x: A Tensor.
:param dim: An int or list or tuple. The dimensions to reduce.
If `None` (the default), reduces all dimensions.
:param keepdims: Bool. If true, retains reduced dimensions with length 1.
Default to be False.
:return: A Tensor after the computation of log mean exp along given axes of
x.
"""
x_max = flow.max(x, dim=dim, keepdim=True)
ret = flow.log(flow.mean(flow.exp(x - x_max), dim=dim,
keepdim=True)) + x_max
if not keepdims:
ret = flow.mean(ret, dim=dim)
return ret
# TODO(<NAME>): delete the following test codes.
# flow.enable_eager_execution()
# import zhusuan # import log_mean_exp
# import paddle
# x = paddle.randn([4, 4], 'float32')
# x_nd = x.numpy()
# print(x_nd.shape)
# paddle_result = zhusuan.log_mean_exp(x, dim=1)
# print(paddle_result.numpy())
# of_x = flow.Tensor(x_nd)
# oneflow_result = log_mean_exp(of_x, dim=1)
# print(oneflow_result.numpy())
| [
"oneflow.experimental.mean",
"oneflow.experimental.max",
"oneflow.experimental.exp"
] | [((571, 605), 'oneflow.experimental.max', 'flow.max', (['x'], {'dim': 'dim', 'keepdim': '(True)'}), '(x, dim=dim, keepdim=True)\n', (579, 605), True, 'import oneflow.experimental as flow\n'), ((755, 778), 'oneflow.experimental.mean', 'flow.mean', (['ret'], {'dim': 'dim'}), '(ret, dim=dim)\n', (764, 778), True, 'import oneflow.experimental as flow\n'), ((635, 654), 'oneflow.experimental.exp', 'flow.exp', (['(x - x_max)'], {}), '(x - x_max)\n', (643, 654), True, 'import oneflow.experimental as flow\n')] |
import numpy as np
import oneflow as flow
import oneflow.nn as nn
class GLU(nn.Module):
def __init__(self):
super(GLU, self).__init__()
def forward(self, x):
return x * flow.sigmoid(x)
class PixelShuffle(nn.Module):
"""Custom implementation pf Pixel Shuffle since PyTorch's PixelShuffle
requires a 4D input (we have 3D inputs).
"""
def __init__(self, upscale_factor):
super(PixelShuffle, self).__init__()
self.upscale_factor = upscale_factor
def forward(self, x):
n = x.shape[0]
c_out = x.shape[1] // 2
w_new = x.shape[2] * 2
return x.view(n, c_out, w_new)
class ResidualLayer(nn.Module):
"""ResBlock.
"""
def __init__(self, in_channels, out_channels, kernel_size, stride, padding):
super(ResidualLayer, self).__init__()
self.conv1d_layer = nn.Sequential(
nn.Conv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=1,
padding=padding,
),
nn.InstanceNorm1d(num_features=out_channels, affine=True),
)
self.conv_layer_gates = nn.Sequential(
nn.Conv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=1,
padding=padding,
),
nn.InstanceNorm1d(num_features=out_channels, affine=True),
)
self.conv1d_out_layer = nn.Sequential(
nn.Conv1d(
in_channels=out_channels,
out_channels=in_channels,
kernel_size=kernel_size,
stride=1,
padding=padding,
),
nn.InstanceNorm1d(num_features=in_channels, affine=True),
)
def forward(self, x):
h1_norm = self.conv1d_layer(x)
h1_gates_norm = self.conv_layer_gates(x)
h1_glu = h1_norm * flow.sigmoid(h1_gates_norm) # GLU
h2_norm = self.conv1d_out_layer(h1_glu)
return x + h2_norm
class DownSampleGenerator(nn.Module):
"""Downsampling blocks of the Generator.
"""
def __init__(self, in_channels, out_channels, kernel_size, stride, padding):
super(DownSampleGenerator, self).__init__()
self.convLayer = nn.Sequential(
nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
),
nn.InstanceNorm2d(num_features=out_channels, affine=True),
)
self.convLayer_gates = nn.Sequential(
nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
),
nn.InstanceNorm2d(num_features=out_channels, affine=True),
)
def forward(self, x):
return self.convLayer(x) * flow.sigmoid(self.convLayer_gates(x))
class Generator(nn.Module):
"""Generator of MaskCycleGAN-VC
"""
def __init__(self, input_shape=(80, 64), residual_in_channels=256):
super(Generator, self).__init__()
Cx, Tx = input_shape
self.flattened_channels = (Cx // 4) * residual_in_channels
# 2D Conv Layer
self.conv1 = nn.Conv2d(
in_channels=2,
out_channels=residual_in_channels // 2,
kernel_size=(5, 15),
stride=(1, 1),
padding=(2, 7),
)
self.conv1_gates = nn.Conv2d(
in_channels=2,
out_channels=residual_in_channels // 2,
kernel_size=(5, 15),
stride=1,
padding=(2, 7),
)
# 2D Downsampling Layers
self.downSample1 = DownSampleGenerator(
in_channels=residual_in_channels // 2,
out_channels=residual_in_channels,
kernel_size=5,
stride=2,
padding=2,
)
self.downSample2 = DownSampleGenerator(
in_channels=residual_in_channels,
out_channels=residual_in_channels,
kernel_size=5,
stride=2,
padding=2,
)
# 2D -> 1D Conv
self.conv2dto1dLayer = nn.Conv1d(
in_channels=self.flattened_channels,
out_channels=residual_in_channels,
kernel_size=1,
stride=1,
padding=0,
)
self.conv2dto1dLayer_tfan = nn.InstanceNorm1d(
num_features=residual_in_channels, affine=True
)
# Residual Blocks
self.residualLayer1 = ResidualLayer(
in_channels=residual_in_channels,
out_channels=residual_in_channels * 2,
kernel_size=3,
stride=1,
padding=1,
)
self.residualLayer2 = ResidualLayer(
in_channels=residual_in_channels,
out_channels=residual_in_channels * 2,
kernel_size=3,
stride=1,
padding=1,
)
self.residualLayer3 = ResidualLayer(
in_channels=residual_in_channels,
out_channels=residual_in_channels * 2,
kernel_size=3,
stride=1,
padding=1,
)
self.residualLayer4 = ResidualLayer(
in_channels=residual_in_channels,
out_channels=residual_in_channels * 2,
kernel_size=3,
stride=1,
padding=1,
)
self.residualLayer5 = ResidualLayer(
in_channels=residual_in_channels,
out_channels=residual_in_channels * 2,
kernel_size=3,
stride=1,
padding=1,
)
self.residualLayer6 = ResidualLayer(
in_channels=residual_in_channels,
out_channels=residual_in_channels * 2,
kernel_size=3,
stride=1,
padding=1,
)
# 1D -> 2D Conv
self.conv1dto2dLayer = nn.Conv1d(
in_channels=residual_in_channels,
out_channels=self.flattened_channels,
kernel_size=1,
stride=1,
padding=0,
)
self.conv1dto2dLayer_tfan = nn.InstanceNorm1d(
num_features=self.flattened_channels, affine=True
)
# UpSampling Layers
self.upSample1 = self.upsample(
in_channels=residual_in_channels,
out_channels=residual_in_channels * 4,
kernel_size=5,
stride=1,
padding=2,
)
self.glu = GLU()
self.upSample2 = self.upsample(
in_channels=residual_in_channels,
out_channels=residual_in_channels * 2,
kernel_size=5,
stride=1,
padding=2,
)
# 2D Conv Layer
self.lastConvLayer = nn.Conv2d(
in_channels=residual_in_channels // 2,
out_channels=1,
kernel_size=(5, 15),
stride=(1, 1),
padding=(2, 7),
)
def downsample(self, in_channels, out_channels, kernel_size, stride, padding):
self.ConvLayer = nn.Sequential(
nn.Conv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
),
nn.InstanceNorm1d(num_features=out_channels, affine=True),
GLU(),
)
return self.ConvLayer
def upsample(self, in_channels, out_channels, kernel_size, stride, padding):
self.convLayer = nn.Sequential(
nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
),
nn.PixelShuffle(upscale_factor=2),
nn.InstanceNorm2d(num_features=out_channels // 4, affine=True),
GLU(),
)
return self.convLayer
def forward(self, x, mask):
# Conv2d
x = flow.stack((x * mask, mask), dim=1)
conv1 = self.conv1(x) * flow.sigmoid(self.conv1_gates(x)) # GLU
# Downsampling
downsample1 = self.downSample1(conv1)
downsample2 = self.downSample2(downsample1)
# Reshape
reshape2dto1d = downsample2.view(
downsample2.size(0), self.flattened_channels, 1, -1
)
reshape2dto1d = reshape2dto1d.squeeze(2)
# 2D -> 1D
conv2dto1d_layer = self.conv2dto1dLayer(reshape2dto1d)
conv2dto1d_layer = self.conv2dto1dLayer_tfan(conv2dto1d_layer)
# Residual Blocks
residual_layer_1 = self.residualLayer1(conv2dto1d_layer)
residual_layer_2 = self.residualLayer2(residual_layer_1)
residual_layer_3 = self.residualLayer3(residual_layer_2)
residual_layer_4 = self.residualLayer4(residual_layer_3)
residual_layer_5 = self.residualLayer5(residual_layer_4)
residual_layer_6 = self.residualLayer6(residual_layer_5)
# 1D -> 2D
conv1dto2d_layer = self.conv1dto2dLayer(residual_layer_6)
conv1dto2d_layer = self.conv1dto2dLayer_tfan(conv1dto2d_layer)
# Reshape
reshape1dto2d = conv1dto2d_layer.unsqueeze(2)
reshape1dto2d = reshape1dto2d.view(reshape1dto2d.size(0), 256, 20, -1)
# UpSampling
upsample_layer_1 = self.upSample1(reshape1dto2d)
upsample_layer_2 = self.upSample2(upsample_layer_1)
# Conv2d
output = self.lastConvLayer(upsample_layer_2)
output = output.squeeze(1)
return output
class Discriminator(nn.Module):
"""PatchGAN discriminator.
"""
def __init__(self, input_shape=(80, 64), residual_in_channels=256):
super(Discriminator, self).__init__()
self.convLayer1 = nn.Sequential(
nn.Conv2d(
in_channels=1,
out_channels=residual_in_channels // 2,
kernel_size=(3, 3),
stride=(1, 1),
padding=(1, 1),
),
GLU(),
)
# Downsampling Layers
self.downSample1 = self.downsample(
in_channels=residual_in_channels // 2,
out_channels=residual_in_channels,
kernel_size=(3, 3),
stride=(2, 2),
padding=1,
)
self.downSample2 = self.downsample(
in_channels=residual_in_channels,
out_channels=residual_in_channels * 2,
kernel_size=(3, 3),
stride=[2, 2],
padding=1,
)
self.downSample3 = self.downsample(
in_channels=residual_in_channels * 2,
out_channels=residual_in_channels * 4,
kernel_size=[3, 3],
stride=[2, 2],
padding=1,
)
self.downSample4 = self.downsample(
in_channels=residual_in_channels * 4,
out_channels=residual_in_channels * 4,
kernel_size=[1, 10],
stride=(1, 1),
padding=(0, 2),
)
# Conv Layer
self.outputConvLayer = nn.Sequential(
nn.Conv2d(
in_channels=residual_in_channels * 4,
out_channels=1,
kernel_size=(1, 3),
stride=[1, 1],
padding=[0, 1],
)
)
def downsample(self, in_channels, out_channels, kernel_size, stride, padding):
convLayer = nn.Sequential(
nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
),
nn.InstanceNorm2d(num_features=out_channels, affine=True),
GLU(),
)
return convLayer
def forward(self, x):
# x has shape [batch_size, num_features, frames]
# discriminator requires shape [batchSize, 1, num_features, frames]
x = x.unsqueeze(1)
conv_layer_1 = self.convLayer1(x)
downsample1 = self.downSample1(conv_layer_1)
downsample2 = self.downSample2(downsample1)
downsample3 = self.downSample3(downsample2)
output = flow.sigmoid(self.outputConvLayer(downsample3))
return output
| [
"oneflow.nn.InstanceNorm2d",
"oneflow.nn.InstanceNorm1d",
"oneflow.nn.Conv1d",
"oneflow.sigmoid",
"oneflow.stack",
"oneflow.nn.PixelShuffle",
"oneflow.nn.Conv2d"
] | [((3504, 3624), 'oneflow.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(2)', 'out_channels': '(residual_in_channels // 2)', 'kernel_size': '(5, 15)', 'stride': '(1, 1)', 'padding': '(2, 7)'}), '(in_channels=2, out_channels=residual_in_channels // 2,\n kernel_size=(5, 15), stride=(1, 1), padding=(2, 7))\n', (3513, 3624), True, 'import oneflow.nn as nn\n'), ((3720, 3835), 'oneflow.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(2)', 'out_channels': '(residual_in_channels // 2)', 'kernel_size': '(5, 15)', 'stride': '(1)', 'padding': '(2, 7)'}), '(in_channels=2, out_channels=residual_in_channels // 2,\n kernel_size=(5, 15), stride=1, padding=(2, 7))\n', (3729, 3835), True, 'import oneflow.nn as nn\n'), ((4445, 4567), 'oneflow.nn.Conv1d', 'nn.Conv1d', ([], {'in_channels': 'self.flattened_channels', 'out_channels': 'residual_in_channels', 'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)'}), '(in_channels=self.flattened_channels, out_channels=\n residual_in_channels, kernel_size=1, stride=1, padding=0)\n', (4454, 4567), True, 'import oneflow.nn as nn\n'), ((4670, 4735), 'oneflow.nn.InstanceNorm1d', 'nn.InstanceNorm1d', ([], {'num_features': 'residual_in_channels', 'affine': '(True)'}), '(num_features=residual_in_channels, affine=True)\n', (4687, 4735), True, 'import oneflow.nn as nn\n'), ((6185, 6307), 'oneflow.nn.Conv1d', 'nn.Conv1d', ([], {'in_channels': 'residual_in_channels', 'out_channels': 'self.flattened_channels', 'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)'}), '(in_channels=residual_in_channels, out_channels=self.\n flattened_channels, kernel_size=1, stride=1, padding=0)\n', (6194, 6307), True, 'import oneflow.nn as nn\n'), ((6410, 6478), 'oneflow.nn.InstanceNorm1d', 'nn.InstanceNorm1d', ([], {'num_features': 'self.flattened_channels', 'affine': '(True)'}), '(num_features=self.flattened_channels, affine=True)\n', (6427, 6478), True, 'import oneflow.nn as nn\n'), ((7049, 7169), 'oneflow.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(residual_in_channels // 2)', 'out_channels': '(1)', 'kernel_size': '(5, 15)', 'stride': '(1, 1)', 'padding': '(2, 7)'}), '(in_channels=residual_in_channels // 2, out_channels=1,\n kernel_size=(5, 15), stride=(1, 1), padding=(2, 7))\n', (7058, 7169), True, 'import oneflow.nn as nn\n'), ((8312, 8347), 'oneflow.stack', 'flow.stack', (['(x * mask, mask)'], {'dim': '(1)'}), '((x * mask, mask), dim=1)\n', (8322, 8347), True, 'import oneflow as flow\n'), ((196, 211), 'oneflow.sigmoid', 'flow.sigmoid', (['x'], {}), '(x)\n', (208, 211), True, 'import oneflow as flow\n'), ((899, 1017), 'oneflow.nn.Conv1d', 'nn.Conv1d', ([], {'in_channels': 'in_channels', 'out_channels': 'out_channels', 'kernel_size': 'kernel_size', 'stride': '(1)', 'padding': 'padding'}), '(in_channels=in_channels, out_channels=out_channels, kernel_size=\n kernel_size, stride=1, padding=padding)\n', (908, 1017), True, 'import oneflow.nn as nn\n'), ((1121, 1178), 'oneflow.nn.InstanceNorm1d', 'nn.InstanceNorm1d', ([], {'num_features': 'out_channels', 'affine': '(True)'}), '(num_features=out_channels, affine=True)\n', (1138, 1178), True, 'import oneflow.nn as nn\n'), ((1250, 1368), 'oneflow.nn.Conv1d', 'nn.Conv1d', ([], {'in_channels': 'in_channels', 'out_channels': 'out_channels', 'kernel_size': 'kernel_size', 'stride': '(1)', 'padding': 'padding'}), '(in_channels=in_channels, out_channels=out_channels, kernel_size=\n kernel_size, stride=1, padding=padding)\n', (1259, 1368), True, 'import oneflow.nn as nn\n'), ((1472, 1529), 'oneflow.nn.InstanceNorm1d', 'nn.InstanceNorm1d', ([], {'num_features': 'out_channels', 'affine': '(True)'}), '(num_features=out_channels, affine=True)\n', (1489, 1529), True, 'import oneflow.nn as nn\n'), ((1601, 1719), 'oneflow.nn.Conv1d', 'nn.Conv1d', ([], {'in_channels': 'out_channels', 'out_channels': 'in_channels', 'kernel_size': 'kernel_size', 'stride': '(1)', 'padding': 'padding'}), '(in_channels=out_channels, out_channels=in_channels, kernel_size=\n kernel_size, stride=1, padding=padding)\n', (1610, 1719), True, 'import oneflow.nn as nn\n'), ((1823, 1879), 'oneflow.nn.InstanceNorm1d', 'nn.InstanceNorm1d', ([], {'num_features': 'in_channels', 'affine': '(True)'}), '(num_features=in_channels, affine=True)\n', (1840, 1879), True, 'import oneflow.nn as nn\n'), ((2033, 2060), 'oneflow.sigmoid', 'flow.sigmoid', (['h1_gates_norm'], {}), '(h1_gates_norm)\n', (2045, 2060), True, 'import oneflow as flow\n'), ((2423, 2546), 'oneflow.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'in_channels', 'out_channels': 'out_channels', 'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding'}), '(in_channels=in_channels, out_channels=out_channels, kernel_size=\n kernel_size, stride=stride, padding=padding)\n', (2432, 2546), True, 'import oneflow.nn as nn\n'), ((2650, 2707), 'oneflow.nn.InstanceNorm2d', 'nn.InstanceNorm2d', ([], {'num_features': 'out_channels', 'affine': '(True)'}), '(num_features=out_channels, affine=True)\n', (2667, 2707), True, 'import oneflow.nn as nn\n'), ((2777, 2900), 'oneflow.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'in_channels', 'out_channels': 'out_channels', 'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding'}), '(in_channels=in_channels, out_channels=out_channels, kernel_size=\n kernel_size, stride=stride, padding=padding)\n', (2786, 2900), True, 'import oneflow.nn as nn\n'), ((3004, 3061), 'oneflow.nn.InstanceNorm2d', 'nn.InstanceNorm2d', ([], {'num_features': 'out_channels', 'affine': '(True)'}), '(num_features=out_channels, affine=True)\n', (3021, 3061), True, 'import oneflow.nn as nn\n'), ((7373, 7496), 'oneflow.nn.Conv1d', 'nn.Conv1d', ([], {'in_channels': 'in_channels', 'out_channels': 'out_channels', 'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding'}), '(in_channels=in_channels, out_channels=out_channels, kernel_size=\n kernel_size, stride=stride, padding=padding)\n', (7382, 7496), True, 'import oneflow.nn as nn\n'), ((7600, 7657), 'oneflow.nn.InstanceNorm1d', 'nn.InstanceNorm1d', ([], {'num_features': 'out_channels', 'affine': '(True)'}), '(num_features=out_channels, affine=True)\n', (7617, 7657), True, 'import oneflow.nn as nn\n'), ((7853, 7976), 'oneflow.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'in_channels', 'out_channels': 'out_channels', 'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding'}), '(in_channels=in_channels, out_channels=out_channels, kernel_size=\n kernel_size, stride=stride, padding=padding)\n', (7862, 7976), True, 'import oneflow.nn as nn\n'), ((8080, 8113), 'oneflow.nn.PixelShuffle', 'nn.PixelShuffle', ([], {'upscale_factor': '(2)'}), '(upscale_factor=2)\n', (8095, 8113), True, 'import oneflow.nn as nn\n'), ((8127, 8189), 'oneflow.nn.InstanceNorm2d', 'nn.InstanceNorm2d', ([], {'num_features': '(out_channels // 4)', 'affine': '(True)'}), '(num_features=out_channels // 4, affine=True)\n', (8144, 8189), True, 'import oneflow.nn as nn\n'), ((10121, 10240), 'oneflow.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(1)', 'out_channels': '(residual_in_channels // 2)', 'kernel_size': '(3, 3)', 'stride': '(1, 1)', 'padding': '(1, 1)'}), '(in_channels=1, out_channels=residual_in_channels // 2,\n kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n', (10130, 10240), True, 'import oneflow.nn as nn\n'), ((11423, 11542), 'oneflow.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(residual_in_channels * 4)', 'out_channels': '(1)', 'kernel_size': '(1, 3)', 'stride': '[1, 1]', 'padding': '[0, 1]'}), '(in_channels=residual_in_channels * 4, out_channels=1, kernel_size\n =(1, 3), stride=[1, 1], padding=[0, 1])\n', (11432, 11542), True, 'import oneflow.nn as nn\n'), ((11774, 11897), 'oneflow.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'in_channels', 'out_channels': 'out_channels', 'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding'}), '(in_channels=in_channels, out_channels=out_channels, kernel_size=\n kernel_size, stride=stride, padding=padding)\n', (11783, 11897), True, 'import oneflow.nn as nn\n'), ((12001, 12058), 'oneflow.nn.InstanceNorm2d', 'nn.InstanceNorm2d', ([], {'num_features': 'out_channels', 'affine': '(True)'}), '(num_features=out_channels, affine=True)\n', (12018, 12058), True, 'import oneflow.nn as nn\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import oneflow as flow
import oneflow.unittest
binary_ops = [
flow.add,
flow.sub,
flow.mul,
flow.div,
flow.min,
flow.minimum,
flow.max,
flow.maximum,
flow.fmod,
flow.pow,
flow.eq,
flow.ne,
flow.gt,
flow.ge,
flow.lt,
flow.le,
flow.logical_and,
flow.logical_or,
flow.logical_xor,
]
@flow.unittest.skip_unless_1n1d()
class TestBroadcastOps(flow.unittest.TestCase):
def test_broadcast_binary_ops(test_case):
x = flow.Tensor(8, 10)
y = flow.Tensor(8)
for op in binary_ops:
with test_case.assertRaises(RuntimeError) as ctx:
op(x, y)
test_case.assertTrue(
"The size of tensor a (10) must match the size of tensor b (8) at non-singleton dimension 1"
in str(ctx.exception)
)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.unittest.skip_unless_1n1d",
"oneflow.Tensor"
] | [((968, 1000), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (998, 1000), True, 'import oneflow as flow\n'), ((1498, 1513), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1511, 1513), False, 'import unittest\n'), ((1107, 1125), 'oneflow.Tensor', 'flow.Tensor', (['(8)', '(10)'], {}), '(8, 10)\n', (1118, 1125), True, 'import oneflow as flow\n'), ((1138, 1152), 'oneflow.Tensor', 'flow.Tensor', (['(8)'], {}), '(8)\n', (1149, 1152), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import random
import sys
import traceback
from typing import List, Optional, Sequence, Tuple, Union
import oneflow as flow
import oneflow._oneflow_internal._C as _C
from oneflow.framework.tensor import Tensor
from oneflow.nn.common_types import _size_1_t, _size_2_t, _size_3_t, _size_any_t
from oneflow.nn.module import Module
from oneflow.nn.modules.utils import _pair, _reverse_repeat_tuple, _single, _triple
import oneflow.framework.id_util as id_util
def mirrored_gen_random_seed(seed=None):
if seed is None:
seed = -1
has_seed = False
else:
has_seed = True
return (seed, has_seed)
class OFRecordReader(Module):
def __init__(
self,
ofrecord_dir: str,
batch_size: int = 1,
data_part_num: int = 1,
part_name_prefix: str = "part-",
part_name_suffix_length: int = -1,
random_shuffle: bool = False,
shuffle_buffer_size: int = 1024,
shuffle_after_epoch: bool = False,
random_seed: int = -1,
device: Union[flow.device, str] = None,
placement: flow.placement = None,
sbp: Union[flow.sbp.sbp, List[flow.sbp.sbp]] = None,
name: Optional[str] = None,
):
super().__init__()
if name is not None:
print("WARNING: name has been deprecated and has NO effect.\n")
self.ofrecord_dir = ofrecord_dir
self.batch_size = batch_size
self.data_part_num = data_part_num
self.part_name_prefix = part_name_prefix
self.part_name_suffix_length = part_name_suffix_length
self.random_shuffle = random_shuffle
self.shuffle_buffer_size = shuffle_buffer_size
self.shuffle_after_epoch = shuffle_after_epoch
self.placement = placement
if placement is None:
self.device = device or flow.device("cpu")
else:
assert device is None
if placement is not None:
assert isinstance(sbp, (flow.sbp.sbp, tuple, list)), "sbp: %s" % sbp
if isinstance(sbp, flow.sbp.sbp):
sbp = (sbp,)
else:
for elem in sbp:
assert isinstance(elem, flow.sbp.sbp), "sbp: %s" % sbp
assert len(sbp) == len(placement.hierarchy)
else:
assert sbp is None, "sbp: %s" % sbp
self.sbp = sbp
(self.seed, self.has_seed) = mirrored_gen_random_seed(random_seed)
self._op = flow.stateful_op("OFRecordReader").Output("out").Build()
def forward(self):
if self.placement is not None:
res = _C.dispatch_ofrecord_reader(
self._op,
data_dir=self.ofrecord_dir,
data_part_num=self.data_part_num,
part_name_prefix=self.part_name_prefix,
part_name_suffix_length=self.part_name_suffix_length,
batch_size=self.batch_size,
shuffle_buffer_size=self.shuffle_buffer_size,
random_shuffle=self.random_shuffle,
shuffle_after_epoch=self.shuffle_after_epoch,
seed=self.seed,
sbp=self.sbp,
placement=self.placement,
)
else:
res = _C.dispatch_ofrecord_reader(
self._op,
data_dir=self.ofrecord_dir,
data_part_num=self.data_part_num,
part_name_prefix=self.part_name_prefix,
part_name_suffix_length=self.part_name_suffix_length,
batch_size=self.batch_size,
shuffle_buffer_size=self.shuffle_buffer_size,
random_shuffle=self.random_shuffle,
shuffle_after_epoch=self.shuffle_after_epoch,
seed=self.seed,
device=self.device,
)
return res
class OFRecordRawDecoder(Module):
def __init__(
self,
blob_name: str,
shape: Sequence[int],
dtype: flow.dtype,
dim1_varying_length: bool = False,
truncate: bool = False,
auto_zero_padding: bool = False,
name: Optional[str] = None,
):
super().__init__()
if auto_zero_padding:
print(
"WARNING: auto_zero_padding has been deprecated, Please use truncate instead.\n"
)
if name is not None:
print("WARNING: name has been deprecated and has NO effect.\n")
self.blob_name = blob_name
self.shape = shape
self.dtype = dtype
self.dim1_varying_length = dim1_varying_length
self.truncate = truncate
self.auto_zero_padding = auto_zero_padding
self._op = (
flow.stateful_op("ofrecord_raw_decoder").Input("in").Output("out").Build()
)
def forward(self, input):
res = _C.dispatch_ofrecord_raw_decoder(
self._op,
input,
name=self.blob_name,
shape=self.shape,
data_type=self.dtype,
dim1_varying_length=self.dim1_varying_length,
truncate=self.truncate or self.auto_zero_padding,
)
return res
class CoinFlip(Module):
def __init__(
self,
batch_size: int = 1,
random_seed: Optional[int] = None,
probability: float = 0.5,
device: Union[flow.device, str] = None,
placement: flow.placement = None,
sbp: Union[flow.sbp.sbp, List[flow.sbp.sbp]] = None,
):
super().__init__()
self.batch_size = batch_size
self.probability = probability
self.placement = placement
if placement is None:
if device is None:
self.device = flow.device("cpu")
else:
assert device is None
if placement is not None:
assert isinstance(sbp, (flow.sbp.sbp, tuple, list)), "sbp: %s" % sbp
if isinstance(sbp, flow.sbp.sbp):
sbp = (sbp,)
else:
for elem in sbp:
assert isinstance(elem, flow.sbp.sbp), "sbp: %s" % sbp
assert len(sbp) == len(placement.hierarchy)
else:
assert sbp is None, "sbp: %s" % sbp
self.sbp = sbp
(self.seed, self.has_seed) = mirrored_gen_random_seed(random_seed)
self._op = flow.stateful_op("coin_flip").Output("out").Build()
def forward(self):
if self.placement is not None:
res = _C.dispatch_coin_flip(
self._op,
batch_size=self.batch_size,
probability=self.probability,
has_seed=self.has_seed,
seed=self.seed,
placement=self.placement,
sbp=self.sbp,
)
else:
res = _C.dispatch_coin_flip(
self._op,
batch_size=self.batch_size,
probability=self.probability,
has_seed=self.has_seed,
seed=self.seed,
device=self.device,
)
return res
class CropMirrorNormalize(Module):
def __init__(
self,
color_space: str = "BGR",
output_layout: str = "NCHW",
crop_h: int = 0,
crop_w: int = 0,
crop_pos_y: float = 0.5,
crop_pos_x: float = 0.5,
mean: Sequence[float] = [0.0],
std: Sequence[float] = [1.0],
output_dtype: flow.dtype = flow.float,
):
super().__init__()
if output_layout != "NCHW":
print(
"WARNING: output_layout has been deprecated. Please use Environment Variable ONEFLOW_ENABLE_NHWC, and make it equals 1."
)
if os.getenv("ONEFLOW_ENABLE_NHWC") == "1":
output_layout = "NHWC"
else:
output_layout = "NCHW"
self.color_space = color_space
self.output_layout = output_layout
self.mean = mean
self.std = std
self.crop_h = crop_h
self.crop_w = crop_w
self.crop_pos_y = crop_pos_y
self.crop_pos_x = crop_pos_x
self.output_dtype = output_dtype
self._op_uint8_with_mirror = (
flow.stateful_op("crop_mirror_normalize_from_uint8")
.Input("in")
.Input("mirror")
.Output("out")
.Build()
)
self._op_uint8_no_mirror = (
flow.stateful_op("crop_mirror_normalize_from_uint8")
.Input("in")
.Output("out")
.Build()
)
self._op_buffer_with_mirror = (
flow.stateful_op("crop_mirror_normalize_from_tensorbuffer")
.Input("in")
.Input("mirror")
.Output("out")
.Build()
)
self._op_buffer_no_mirror = (
flow.stateful_op("crop_mirror_normalize_from_tensorbuffer")
.Input("in")
.Output("out")
.Build()
)
def forward(self, input, mirror=None):
if input.dtype is flow.uint8:
if mirror is not None:
res = _C.dispatch_crop_mirror_normalize_from_uint8(
self._op_uint8_with_mirror,
(input, mirror),
color_space=self.color_space,
output_layout=self.output_layout,
mean=self.mean,
std=self.std,
crop_h=self.crop_h,
crop_w=self.crop_w,
crop_pos_x=self.crop_pos_x,
crop_pos_y=self.crop_pos_y,
output_dtype=self.output_dtype,
)
else:
res = _C.dispatch_crop_mirror_normalize_from_uint8(
self._op_uint8_no_mirror,
(input,),
color_space=self.color_space,
output_layout=self.output_layout,
mean=self.mean,
std=self.std,
crop_h=self.crop_h,
crop_w=self.crop_w,
crop_pos_x=self.crop_pos_x,
crop_pos_y=self.crop_pos_y,
output_dtype=self.output_dtype,
)
elif input.dtype is flow.tensor_buffer:
if mirror is not None:
res = _C.dispatch_crop_mirror_normalize_from_tensorbuffer(
self._op_buffer_with_mirror,
(input, mirror),
color_space=self.color_space,
output_layout=self.output_layout,
mean=self.mean,
std=self.std,
crop_h=self.crop_h,
crop_w=self.crop_w,
crop_pos_x=self.crop_pos_x,
crop_pos_y=self.crop_pos_y,
output_dtype=self.output_dtype,
)
else:
res = _C.dispatch_crop_mirror_normalize_from_tensorbuffer(
self._op_buffer_no_mirror,
(input,),
color_space=self.color_space,
output_layout=self.output_layout,
mean=self.mean,
std=self.std,
crop_h=self.crop_h,
crop_w=self.crop_w,
crop_pos_x=self.crop_pos_x,
crop_pos_y=self.crop_pos_y,
output_dtype=self.output_dtype,
)
else:
print(
"ERROR! oneflow.nn.CropMirrorNormalize module NOT support input dtype = ",
input.dtype,
)
raise NotImplementedError
return res
class OFRecordImageDecoderRandomCrop(Module):
def __init__(
self,
blob_name: str,
color_space: str = "BGR",
num_attempts: int = 10,
random_seed: Optional[int] = None,
random_area: Sequence[float] = [0.08, 1.0],
random_aspect_ratio: Sequence[float] = [0.75, 1.333333],
):
super().__init__()
self.blob_name = blob_name
self.color_space = color_space
self.num_attempts = num_attempts
self.random_area = random_area
self.random_aspect_ratio = random_aspect_ratio
(self.seed, self.has_seed) = mirrored_gen_random_seed(random_seed)
self._op = (
flow.stateful_op("ofrecord_image_decoder_random_crop")
.Input("in")
.Output("out")
.Build()
)
def forward(self, input):
res = _C.dispatch_ofrecord_image_decoder_random_crop(
self._op,
input,
name=self.blob_name,
color_space=self.color_space,
num_attempts=self.num_attempts,
random_area=self.random_area,
random_aspect_ratio=self.random_aspect_ratio,
has_seed=self.has_seed,
seed=self.seed,
)
return res
class OFRecordImageDecoder(Module):
def __init__(self, blob_name: str, color_space: str = "BGR"):
super().__init__()
self._op = (
flow.stateful_op("ofrecord_image_decoder").Input("in").Output("out").Build()
)
self.blob_name = blob_name
self.color_space = color_space
def forward(self, input):
res = _C.dispatch_ofrecord_image_decoder(
self._op, input, name=self.blob_name, color_space=self.color_space
)
return res
class OFRecordImageGpuDecoderRandomCropResize(Module):
def __init__(
self,
target_width: int,
target_height: int,
num_attempts: Optional[int] = 10,
seed: Optional[int] = 0,
random_area: Optional[Sequence[float]] = [0.08, 1.0],
random_aspect_ratio: Optional[Sequence[float]] = [0.75, 1.333333],
num_workers: Optional[int] = 3,
warmup_size: Optional[int] = 6400,
max_num_pixels: Optional[int] = 67108864,
):
super().__init__()
self.target_width = target_width
self.target_height = target_height
self.num_attempts = num_attempts
self.seed = seed
assert len(random_area) == 2
self.random_area = random_area
assert len(random_aspect_ratio) == 2
self.random_aspect_ratio = random_aspect_ratio
self.num_workers = num_workers
self.warmup_size = warmup_size
self.max_num_pixels = max_num_pixels
gpu_decoder_conf = (
flow._oneflow_internal.oneflow.core.operator.op_conf.ImageDecoderRandomCropResizeOpConf()
)
gpu_decoder_conf.set_in("error_input_need_to_be_replaced")
gpu_decoder_conf.set_out("out")
self._op = flow._oneflow_internal.one.ImageDecoderRandomCropResizeOpExpr(
id_util.UniqueStr("ImageGpuDecoder"), gpu_decoder_conf, ["in"], ["out"]
)
def forward(self, input):
if not input.is_lazy:
print(
"ERROR! oneflow.nn.OFRecordImageGpuDecoderRandomCropResize module ",
"NOT support run as eager module, please use it in nn.Graph.",
)
raise NotImplementedError
res = _C.dispatch_image_decoder_random_crop_resize(
self._op,
input,
target_width=self.target_width,
target_height=self.target_height,
num_attempts=self.num_attempts,
seed=self.seed,
random_area_min=self.random_area[0],
random_area_max=self.random_area[1],
random_aspect_ratio_min=self.random_aspect_ratio[0],
random_aspect_ratio_max=self.random_aspect_ratio[1],
num_workers=self.num_workers,
warmup_size=self.warmup_size,
max_num_pixels=self.max_num_pixels,
)
if not res.is_cuda:
print(
"WARNING! oneflow.nn.OFRecordImageGpuDecoderRandomCropResize ONLY support ",
"CUDA runtime version >= 10.2, so now it degenerates into CPU decode version.",
)
return res
class TensorBufferToListOfTensors(Module):
def __init__(
self, out_shapes, out_dtypes, out_num: int = 1, dynamic_out: bool = False
):
super().__init__()
self._op = (
flow.stateful_op("tensor_buffer_to_list_of_tensors_v2")
.Input("in")
.Output("out", out_num)
.Build()
)
self.out_shapes = out_shapes
self.out_dtypes = out_dtypes
self.dynamic_out = dynamic_out
def forward(self, input):
return _C.dispatch_tensor_buffer_to_list_of_tensors_v2(
self._op,
input,
out_shapes=self.out_shapes,
out_dtypes=self.out_dtypes,
dynamic_out=self.dynamic_out,
)
def tensor_buffer_to_list_of_tensors(tensor, out_shapes, out_dtypes):
return TensorBufferToListOfTensors(
[list(out_shape) for out_shape in out_shapes], out_dtypes, len(out_shapes)
)(tensor)
class ImageResize(Module):
def __init__(
self,
target_size: Union[int, Sequence[int]] = None,
min_size: Optional[int] = None,
max_size: Optional[int] = None,
keep_aspect_ratio: bool = False,
resize_side: str = "shorter",
channels: int = 3,
dtype: Optional[flow.dtype] = None,
interpolation_type: str = "auto",
name: Optional[str] = None,
color_space: Optional[str] = None,
interp_type: Optional[str] = None,
resize_shorter: int = 0,
resize_x: int = 0,
resize_y: int = 0,
):
super().__init__()
if name is not None:
print("WARNING: name has been deprecated and has NO effect.\n")
deprecated_param_used = False
if color_space is not None:
print(
"WARNING: color_space has been deprecated. Please use channels instead."
)
print(traceback.format_stack()[-2])
deprecated_param_used = True
assert isinstance(color_space, str)
if color_space.upper() == "RGB" or color_space.upper() == "BGR":
channels = 3
elif color_space.upper() == "GRAY":
channels = 1
else:
raise ValueError("invalid color_space")
self.channels = channels
if interp_type is not None:
print(
"WARNING: interp_type has been deprecated. Please use interpolation_type instead."
)
print(traceback.format_stack()[-2])
deprecated_param_used = True
assert isinstance(interp_type, str)
if interp_type == "Linear":
interpolation_type = "bilinear"
elif interp_type == "NN":
interpolation_type = "nearest_neighbor"
elif interp_type == "Cubic":
interpolation_type = "bicubic"
else:
raise ValueError("invalid interp_type")
self.interpolation_type = interpolation_type
if resize_x > 0 and resize_y > 0:
print(
"WARNING: resize_x and resize_y has been deprecated. Please use target_size instead."
)
print(traceback.format_stack()[-2])
deprecated_param_used = True
target_size = (resize_x, resize_y)
keep_aspect_ratio = False
if resize_shorter > 0:
print(
"WARNING: resize_shorter has been deprecated. Please use target_size instead."
)
print(traceback.format_stack()[-2])
deprecated_param_used = True
target_size = resize_shorter
keep_aspect_ratio = True
resize_side = "shorter"
self.keep_aspect_ratio = keep_aspect_ratio
if self.keep_aspect_ratio:
if not isinstance(target_size, int):
raise ValueError(
"target_size must be an int when keep_aspect_ratio is True"
)
if min_size is None:
min_size = 0
if max_size is None:
max_size = 0
if resize_side == "shorter":
resize_longer = False
elif resize_side == "longer":
resize_longer = True
else:
raise ValueError('resize_side must be "shorter" or "longer"')
self.target_size = target_size
self.min_size = min_size
self.max_size = max_size
self.resize_longer = resize_longer
self._op = (
flow.stateful_op("image_resize_keep_aspect_ratio")
.Input("in")
.Output("out")
.Output("size")
.Output("scale")
.Build()
)
else:
if (
not isinstance(target_size, (list, tuple))
or len(target_size) != 2
or (not all((isinstance(size, int) for size in target_size)))
):
raise ValueError(
"target_size must be a form like (width, height) when keep_aspect_ratio is False"
)
if dtype is None:
dtype = flow.uint8
self.dtype = dtype
(self.target_w, self.target_h) = target_size
self._op = (
flow.stateful_op("image_resize_to_fixed")
.Input("in")
.Output("out")
.Output("scale")
.Build()
)
def forward(self, input):
if self.keep_aspect_ratio:
res = _C.dispatch_image_resize_keep_aspect_ratio(
self._op,
input,
target_size=self.target_size,
min_size=self.min_size,
max_size=self.max_size,
resize_longer=self.resize_longer,
interpolation_type=self.interpolation_type,
)
new_size = flow.tensor_buffer_to_tensor(
res[1], dtype=flow.int32, instance_shape=(2,)
)
scale = flow.tensor_buffer_to_tensor(
res[2], dtype=flow.float32, instance_shape=(2,)
)
else:
res = _C.dispatch_image_resize_to_fixed(
self._op,
input,
target_width=self.target_w,
target_height=self.target_h,
channels=self.channels,
data_type=self.dtype,
interpolation_type=self.interpolation_type,
)
new_size = None
scale = res[1]
res_image = res[0]
return (res_image, scale, new_size)
def raw_decoder(
input_record,
blob_name: str,
shape: Sequence[int],
dtype: flow.dtype,
dim1_varying_length: bool = False,
truncate: bool = False,
auto_zero_padding: bool = False,
name: Optional[str] = None,
):
if auto_zero_padding:
print(
"WARNING: auto_zero_padding has been deprecated, Please use truncate instead.\n "
)
return OFRecordRawDecoder(
blob_name,
shape,
dtype,
dim1_varying_length,
truncate or auto_zero_padding,
name,
).forward(input_record)
def get_ofrecord_handle(
ofrecord_dir: str,
batch_size: int = 1,
data_part_num: int = 1,
part_name_prefix: str = "part-",
part_name_suffix_length: int = -1,
random_shuffle: bool = False,
shuffle_buffer_size: int = 1024,
shuffle_after_epoch: bool = False,
name: Optional[str] = None,
):
return OFRecordReader(
ofrecord_dir,
batch_size,
data_part_num,
part_name_prefix,
part_name_suffix_length,
random_shuffle,
shuffle_buffer_size,
shuffle_after_epoch,
name,
)()
class ImageFlip(Module):
"""This operator flips the images.
The flip code corresponds to the different flip mode:
0 (0x00): Non Flip
1 (0x01): Horizontal Flip
2 (0x02): Vertical Flip
3 (0x03): Both Horizontal and Vertical Flip
Args:
images: The input images.
flip_code: The flip code.
Returns:
The result image.
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow as flow
>>> import oneflow.nn as nn
>>> arr = np.array([
... [[[1, 2, 3], [3, 2, 1]],
... [[2, 3, 4], [4, 3, 2]]],
... [[[3, 4, 5], [5, 4, 3]],
... [[4, 5, 6], [6, 5, 4]]]])
>>> image_tensors = flow.Tensor(arr, device=flow.device("cpu"))
>>> image_tensor_buffer = flow.tensor_to_tensor_buffer(image_tensors, instance_dims=3)
>>> flip_code = flow.ones(arr.shape[0], dtype=flow.int8)
>>> output = nn.image.flip()(image_tensor_buffer, flip_code).numpy()
>>> output[0]
array([[[3., 2., 1.],
[1., 2., 3.]],
<BLANKLINE>
[[4., 3., 2.],
[2., 3., 4.]]], dtype=float32)
>>> output[1]
array([[[5., 4., 3.],
[3., 4., 5.]],
<BLANKLINE>
[[6., 5., 4.],
[4., 5., 6.]]], dtype=float32)
"""
def __init__(self):
super().__init__()
def forward(self, images, flip_code):
return flow._C.image_flip(images, flip_code=flip_code)
class ImageDecode(Module):
def __init__(self, dtype: flow.dtype = flow.uint8, color_space: str = "BGR"):
super().__init__()
self.color_space = color_space
self.dtype = dtype
self._op = flow.stateful_op("image_decode").Input("in").Output("out").Build()
def forward(self, input):
return _C.dispatch_image_decode(
self._op, input, color_space=self.color_space, data_type=self.dtype
)
class ImageNormalize(Module):
def __init__(self, std: Sequence[float], mean: Sequence[float]):
super().__init__()
self.std = std
self.mean = mean
self._op = flow.stateful_op("image_normalize").Input("in").Output("out").Build()
def forward(self, input):
return _C.dispatch_image_normalize(
self._op, input, mean=self.mean, std=self.std
)
class COCOReader(Module):
def __init__(
self,
annotation_file: str,
image_dir: str,
batch_size: int,
shuffle: bool = True,
random_seed: Optional[int] = None,
group_by_aspect_ratio: bool = True,
remove_images_without_annotations: bool = True,
stride_partition: bool = True,
device: Union[flow.device, str] = None,
placement: flow.placement = None,
sbp: Union[flow.sbp.sbp, List[flow.sbp.sbp]] = None,
):
super().__init__()
_handle_shuffle_args(self, shuffle, random_seed)
_handle_distributed_args(self, device, placement, sbp)
self.annotation_file = annotation_file
self.image_dir = image_dir
self.batch_size = batch_size
self.group_by_aspect_ratio = group_by_aspect_ratio
self.remove_images_without_annotations = remove_images_without_annotations
self.stride_partition = stride_partition
self._op = (
flow.stateful_op("COCOReader")
.Output("image")
.Output("image_id")
.Output("image_size")
.Output("gt_bbox")
.Output("gt_label")
.Output("gt_segm")
.Output("gt_segm_index")
.Build()
)
def forward(self):
if self.placement is None:
# local apply
outputs = _C.dispatch_coco_reader(
self._op,
session_id=flow.current_scope().session_id,
annotation_file=self.annotation_file,
image_dir=self.image_dir,
batch_size=self.batch_size,
shuffle_after_epoch=self.shuffle,
random_seed=self.random_seed,
group_by_ratio=self.group_by_aspect_ratio,
remove_images_without_annotations=self.remove_images_without_annotations,
stride_partition=self.stride_partition,
device=self.device,
)
else:
# consistent apply
outputs = _C.dispatch_coco_reader(
self._op,
session_id=flow.current_scope().session_id,
annotation_file=self.annotation_file,
image_dir=self.image_dir,
batch_size=self.batch_size,
shuffle_after_epoch=self.shuffle,
random_seed=self.random_seed,
group_by_ratio=self.group_by_aspect_ratio,
remove_images_without_annotations=self.remove_images_without_annotations,
stride_partition=self.stride_partition,
placement=self.placement,
sbp=self.sbp,
)
return outputs
class ImageBatchAlign(Module):
def __init__(self, shape: Sequence[int], dtype: flow.dtype, alignment: int):
super().__init__()
self._op = (
flow.stateful_op("image_batch_align").Input("in").Output("out").Build()
)
self.shape = shape
self.dtype = dtype
self.alignment = alignment
def forward(self, input):
return _C.dispatch_image_batch_align(
self._op,
input,
shape=self.shape,
data_type=self.dtype,
alignment=self.alignment,
dynamic_out=False,
)
class OFRecordBytesDecoder(Module):
r"""This operator reads an tensor as bytes. The output might need
further decoding process like cv2.imdecode() for images and decode("utf-8")
for characters,depending on the downstream task.
Args:
blob_name: The name of the target feature in OFRecord.
name: The name for this component in the graph.
input: the Tensor which might be provided by an OFRecordReader.
Returns:
The result Tensor encoded with bytes.
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow as flow
>>> def example():
... batch_size = 16
... record_reader = flow.nn.OFRecordReader(
... "dataset/",
... batch_size=batch_size,
... part_name_suffix_length=5,
... )
... val_record = record_reader()
... bytesdecoder_img = flow.nn.OFRecordBytesDecoder("encoded")
... image_bytes_batch = bytesdecoder_img(val_record)
... image_bytes = image_bytes_batch.numpy()[0]
... return image_bytes
... example() # doctest: +SKIP
array([255 216 255 ... 79 255 217], dtype=uint8)
"""
def __init__(self, blob_name: str, name: Optional[str] = None):
super().__init__()
if name is not None:
print("WARNING: name has been deprecated and has NO effect.\n")
self._op = (
flow.stateful_op("ofrecord_bytes_decoder").Input("in").Output("out").Build()
)
self.blob_name = blob_name
def forward(self, input):
return _C.dispatch_ofrecord_bytes_decoder(self._op, input, name=self.blob_name)
class OneRecReader(Module):
r"""
nn.OneRecReader read from OneRec format files into a Tensor carrying TensorBuffer which can be decoded by decode_onerec API afterwards.
Parameters:
files (List[str]): The file list to be read from filesystem
batch_size (int): batch size
shuffle (bool): shuffle or not
shuffle_mode (str): can be "batch" or "instance"
shuffle_buffer_size (int): shuffle buffer size, default to 1024
shuffle_after_epoch (bool): if shuffle after each epoch
verify_example (bool): if verify example, defaults to True
placement (Optional[oneflow._oneflow_internal.placement]): The placement attribute allows you to specify which physical device the output tensor is stored on.
sbp (Optional[Union[oneflow._oneflow_internal.sbp.sbp, List[oneflow._oneflow_internal.sbp.sbp]]]): When creating a consistent tensor, specify the SBP of the output tensor.
For example:
.. code-block:: python
import oneflow as flow
files = ['file01.onerec', 'file02.onerec']
onerec_reader = flow.nn.OneRecReader(files, 10, True, "batch")
readdata_1 = onerec_reader()
# then decode readdata_1 ...
.. code-block:: python
import oneflow as flow
files = ['file01.onerec', 'file02.onerec']
onerec_reader2 = flow.nn.OneRecReader(
files,
batch_size=10,
shuffle=True,
shuffle_mode="batch",
placement=flow.env.all_device_placement("cpu") ,
sbp=[flow.sbp.split(0)],
)
readdata_2 = onerec_reader2()
# then decode readdata_2 ...
"""
def __init__(
self,
files: List[str],
batch_size: int,
shuffle: bool,
shuffle_mode: str,
random_seed: Optional[int] = None,
shuffle_buffer_size: int = 1024,
shuffle_after_epoch: bool = False,
verify_example: bool = True,
placement: flow.placement = None,
sbp: Union[flow.sbp.sbp, List[flow.sbp.sbp]] = None,
):
super().__init__()
_handle_shuffle_args(self, shuffle, random_seed)
_handle_distributed_args(self, None, placement, sbp)
if shuffle_mode not in ["batch", "instance"]:
raise ValueError("shuffle_mode should be 'batch' or 'instance'")
self.files = files
self.batch_size = batch_size
self.shuffle_mode = shuffle_mode
self.shuffle_buffer_size = shuffle_buffer_size
self.shuffle_after_epoch = shuffle_after_epoch
self.verify_example = verify_example
self.op = flow.stateful_op("OneRecReader").Output("out").Build()
def forward(self):
if self.placement is None:
output = _C.dispatch_onerec_reader(
self.op,
files=self.files,
batch_size=self.batch_size,
random_shuffle=self.shuffle,
shuffle_mode=self.shuffle_mode,
shuffle_buffer_size=self.shuffle_buffer_size,
shuffle_after_epoch=self.shuffle_after_epoch,
random_seed=self.random_seed,
verify_example=self.verify_example,
device=self.device,
)
else:
output = _C.dispatch_onerec_reader(
self.op,
files=self.files,
batch_size=self.batch_size,
random_shuffle=self.shuffle,
shuffle_mode=self.shuffle_mode,
shuffle_buffer_size=self.shuffle_buffer_size,
shuffle_after_epoch=self.shuffle_after_epoch,
random_seed=self.random_seed,
verify_example=self.verify_example,
placement=self.placement,
sbp=self.sbp,
)
return output
class GPTIndexedBinDataReader(Module):
def __init__(
self,
data_file_prefix: str,
seq_length: int,
num_samples: int,
batch_size: int,
dtype: flow.dtype = flow.int64,
shuffle: bool = True,
random_seed: Optional[int] = None,
split_sizes: Optional[Sequence[str]] = None,
split_index: Optional[int] = None,
device: Union[flow.device, str] = None,
placement: flow.placement = None,
sbp: Union[flow.sbp.sbp, List[flow.sbp.sbp]] = None,
):
super().__init__()
_handle_shuffle_args(self, shuffle, random_seed)
_handle_distributed_args(self, device, placement, sbp)
self.data_file_prefix = data_file_prefix
self.batch_size = batch_size
self.num_samples = num_samples
self.seq_length = seq_length
self.dtype = dtype
if split_index is None:
split_index = 0
self.split_index = split_index
if split_sizes is None:
split_sizes = (1,)
self.split_sizes = split_sizes
if split_index >= len(split_sizes):
raise ValueError(
"split index {} is out of range, split_sizes {}".formart(
split_index, split_sizes
)
)
self.op_ = (
flow.stateful_op("megatron_gpt_mmap_data_loader").Output("out").Build()
)
def forward(self):
if self.placement is None:
output = _C.dispatch_megatron_gpt_mmap_data_loader(
self.op_,
data_file_prefix=self.data_file_prefix,
seq_length=self.seq_length,
label_length=1,
num_samples=self.num_samples,
batch_size=self.batch_size,
dtype=self.dtype,
shuffle=self.shuffle,
random_seed=self.random_seed,
split_sizes=self.split_sizes,
split_index=self.split_index,
device=self.device,
)
else:
output = _C.dispatch_megatron_gpt_mmap_data_loader(
self.op_,
data_file_prefix=self.data_file_prefix,
seq_length=self.seq_length,
label_length=1,
num_samples=self.num_samples,
batch_size=self.batch_size,
dtype=self.dtype,
shuffle=self.shuffle,
random_seed=self.random_seed,
split_sizes=self.split_sizes,
split_index=self.split_index,
placement=self.placement,
sbp=self.sbp,
)
return output
def _handle_distributed_args(module, device, placement, sbp):
module.placement = placement
if placement is None:
module.device = device or flow.device("cpu")
else:
if device is not None:
raise ValueError(
"The 'device' and 'placement' arguments can't be specified at the same time."
)
module.device = None
if isinstance(sbp, (tuple, list)):
for sbp_item in sbp:
if not isinstance(sbp_item, flow.sbp.sbp):
raise ValueError(f"invalid sbp item: {sbp_item}")
elif isinstance(sbp, flow.sbp.sbp):
sbp = (sbp,)
else:
raise ValueError(f"invalid 'sbp' argument: {sbp}")
if len(sbp) != len(placement.hierarchy):
raise ValueError(
"Number of SBP's dimensions of sbp and number of placement hierarchy'dimensions must equal."
f" {len(sbp)} vs. {len(placement.hierarchy)}"
)
module.sbp = sbp
def _handle_shuffle_args(module, shuffle, random_seed):
module.shuffle = shuffle
if random_seed is None:
if shuffle:
module.random_seed = random.randrange(sys.maxsize)
else:
module.random_seed = -1
else:
assert isinstance(random_seed, int)
module.random_seed = random_seed
if __name__ == "__main__":
import doctest
doctest.testmod(raise_on_error=True)
| [
"oneflow._oneflow_internal._C.dispatch_coin_flip",
"oneflow._oneflow_internal._C.dispatch_ofrecord_reader",
"oneflow._oneflow_internal._C.dispatch_crop_mirror_normalize_from_uint8",
"oneflow._oneflow_internal._C.dispatch_ofrecord_image_decoder",
"oneflow._oneflow_internal._C.dispatch_tensor_buffer_to_list_o... | [((40022, 40058), 'doctest.testmod', 'doctest.testmod', ([], {'raise_on_error': '(True)'}), '(raise_on_error=True)\n', (40037, 40058), False, 'import doctest\n'), ((5422, 5637), 'oneflow._oneflow_internal._C.dispatch_ofrecord_raw_decoder', '_C.dispatch_ofrecord_raw_decoder', (['self._op', 'input'], {'name': 'self.blob_name', 'shape': 'self.shape', 'data_type': 'self.dtype', 'dim1_varying_length': 'self.dim1_varying_length', 'truncate': '(self.truncate or self.auto_zero_padding)'}), '(self._op, input, name=self.blob_name,\n shape=self.shape, data_type=self.dtype, dim1_varying_length=self.\n dim1_varying_length, truncate=self.truncate or self.auto_zero_padding)\n', (5454, 5637), True, 'import oneflow._oneflow_internal._C as _C\n'), ((13153, 13429), 'oneflow._oneflow_internal._C.dispatch_ofrecord_image_decoder_random_crop', '_C.dispatch_ofrecord_image_decoder_random_crop', (['self._op', 'input'], {'name': 'self.blob_name', 'color_space': 'self.color_space', 'num_attempts': 'self.num_attempts', 'random_area': 'self.random_area', 'random_aspect_ratio': 'self.random_aspect_ratio', 'has_seed': 'self.has_seed', 'seed': 'self.seed'}), '(self._op, input, name=self.\n blob_name, color_space=self.color_space, num_attempts=self.num_attempts,\n random_area=self.random_area, random_aspect_ratio=self.\n random_aspect_ratio, has_seed=self.has_seed, seed=self.seed)\n', (13199, 13429), True, 'import oneflow._oneflow_internal._C as _C\n'), ((13924, 14030), 'oneflow._oneflow_internal._C.dispatch_ofrecord_image_decoder', '_C.dispatch_ofrecord_image_decoder', (['self._op', 'input'], {'name': 'self.blob_name', 'color_space': 'self.color_space'}), '(self._op, input, name=self.blob_name,\n color_space=self.color_space)\n', (13958, 14030), True, 'import oneflow._oneflow_internal._C as _C\n'), ((15081, 15175), 'oneflow._oneflow_internal.oneflow.core.operator.op_conf.ImageDecoderRandomCropResizeOpConf', 'flow._oneflow_internal.oneflow.core.operator.op_conf.ImageDecoderRandomCropResizeOpConf', ([], {}), '(\n )\n', (15168, 15175), True, 'import oneflow as flow\n'), ((15774, 16254), 'oneflow._oneflow_internal._C.dispatch_image_decoder_random_crop_resize', '_C.dispatch_image_decoder_random_crop_resize', (['self._op', 'input'], {'target_width': 'self.target_width', 'target_height': 'self.target_height', 'num_attempts': 'self.num_attempts', 'seed': 'self.seed', 'random_area_min': 'self.random_area[0]', 'random_area_max': 'self.random_area[1]', 'random_aspect_ratio_min': 'self.random_aspect_ratio[0]', 'random_aspect_ratio_max': 'self.random_aspect_ratio[1]', 'num_workers': 'self.num_workers', 'warmup_size': 'self.warmup_size', 'max_num_pixels': 'self.max_num_pixels'}), '(self._op, input, target_width=\n self.target_width, target_height=self.target_height, num_attempts=self.\n num_attempts, seed=self.seed, random_area_min=self.random_area[0],\n random_area_max=self.random_area[1], random_aspect_ratio_min=self.\n random_aspect_ratio[0], random_aspect_ratio_max=self.\n random_aspect_ratio[1], num_workers=self.num_workers, warmup_size=self.\n warmup_size, max_num_pixels=self.max_num_pixels)\n', (15818, 16254), True, 'import oneflow._oneflow_internal._C as _C\n'), ((17181, 17336), 'oneflow._oneflow_internal._C.dispatch_tensor_buffer_to_list_of_tensors_v2', '_C.dispatch_tensor_buffer_to_list_of_tensors_v2', (['self._op', 'input'], {'out_shapes': 'self.out_shapes', 'out_dtypes': 'self.out_dtypes', 'dynamic_out': 'self.dynamic_out'}), '(self._op, input, out_shapes\n =self.out_shapes, out_dtypes=self.out_dtypes, dynamic_out=self.dynamic_out)\n', (17228, 17336), True, 'import oneflow._oneflow_internal._C as _C\n'), ((26041, 26088), 'oneflow._C.image_flip', 'flow._C.image_flip', (['images'], {'flip_code': 'flip_code'}), '(images, flip_code=flip_code)\n', (26059, 26088), True, 'import oneflow as flow\n'), ((26425, 26522), 'oneflow._oneflow_internal._C.dispatch_image_decode', '_C.dispatch_image_decode', (['self._op', 'input'], {'color_space': 'self.color_space', 'data_type': 'self.dtype'}), '(self._op, input, color_space=self.color_space,\n data_type=self.dtype)\n', (26449, 26522), True, 'import oneflow._oneflow_internal._C as _C\n'), ((26852, 26926), 'oneflow._oneflow_internal._C.dispatch_image_normalize', '_C.dispatch_image_normalize', (['self._op', 'input'], {'mean': 'self.mean', 'std': 'self.std'}), '(self._op, input, mean=self.mean, std=self.std)\n', (26879, 26926), True, 'import oneflow._oneflow_internal._C as _C\n'), ((30067, 30203), 'oneflow._oneflow_internal._C.dispatch_image_batch_align', '_C.dispatch_image_batch_align', (['self._op', 'input'], {'shape': 'self.shape', 'data_type': 'self.dtype', 'alignment': 'self.alignment', 'dynamic_out': '(False)'}), '(self._op, input, shape=self.shape, data_type=\n self.dtype, alignment=self.alignment, dynamic_out=False)\n', (30096, 30203), True, 'import oneflow._oneflow_internal._C as _C\n'), ((31960, 32032), 'oneflow._oneflow_internal._C.dispatch_ofrecord_bytes_decoder', '_C.dispatch_ofrecord_bytes_decoder', (['self._op', 'input'], {'name': 'self.blob_name'}), '(self._op, input, name=self.blob_name)\n', (31994, 32032), True, 'import oneflow._oneflow_internal._C as _C\n'), ((3190, 3622), 'oneflow._oneflow_internal._C.dispatch_ofrecord_reader', '_C.dispatch_ofrecord_reader', (['self._op'], {'data_dir': 'self.ofrecord_dir', 'data_part_num': 'self.data_part_num', 'part_name_prefix': 'self.part_name_prefix', 'part_name_suffix_length': 'self.part_name_suffix_length', 'batch_size': 'self.batch_size', 'shuffle_buffer_size': 'self.shuffle_buffer_size', 'random_shuffle': 'self.random_shuffle', 'shuffle_after_epoch': 'self.shuffle_after_epoch', 'seed': 'self.seed', 'sbp': 'self.sbp', 'placement': 'self.placement'}), '(self._op, data_dir=self.ofrecord_dir,\n data_part_num=self.data_part_num, part_name_prefix=self.\n part_name_prefix, part_name_suffix_length=self.part_name_suffix_length,\n batch_size=self.batch_size, shuffle_buffer_size=self.\n shuffle_buffer_size, random_shuffle=self.random_shuffle,\n shuffle_after_epoch=self.shuffle_after_epoch, seed=self.seed, sbp=self.\n sbp, placement=self.placement)\n', (3217, 3622), True, 'import oneflow._oneflow_internal._C as _C\n'), ((3835, 4247), 'oneflow._oneflow_internal._C.dispatch_ofrecord_reader', '_C.dispatch_ofrecord_reader', (['self._op'], {'data_dir': 'self.ofrecord_dir', 'data_part_num': 'self.data_part_num', 'part_name_prefix': 'self.part_name_prefix', 'part_name_suffix_length': 'self.part_name_suffix_length', 'batch_size': 'self.batch_size', 'shuffle_buffer_size': 'self.shuffle_buffer_size', 'random_shuffle': 'self.random_shuffle', 'shuffle_after_epoch': 'self.shuffle_after_epoch', 'seed': 'self.seed', 'device': 'self.device'}), '(self._op, data_dir=self.ofrecord_dir,\n data_part_num=self.data_part_num, part_name_prefix=self.\n part_name_prefix, part_name_suffix_length=self.part_name_suffix_length,\n batch_size=self.batch_size, shuffle_buffer_size=self.\n shuffle_buffer_size, random_shuffle=self.random_shuffle,\n shuffle_after_epoch=self.shuffle_after_epoch, seed=self.seed, device=\n self.device)\n', (3862, 4247), True, 'import oneflow._oneflow_internal._C as _C\n'), ((7050, 7229), 'oneflow._oneflow_internal._C.dispatch_coin_flip', '_C.dispatch_coin_flip', (['self._op'], {'batch_size': 'self.batch_size', 'probability': 'self.probability', 'has_seed': 'self.has_seed', 'seed': 'self.seed', 'placement': 'self.placement', 'sbp': 'self.sbp'}), '(self._op, batch_size=self.batch_size, probability=\n self.probability, has_seed=self.has_seed, seed=self.seed, placement=\n self.placement, sbp=self.sbp)\n', (7071, 7229), True, 'import oneflow._oneflow_internal._C as _C\n'), ((7379, 7538), 'oneflow._oneflow_internal._C.dispatch_coin_flip', '_C.dispatch_coin_flip', (['self._op'], {'batch_size': 'self.batch_size', 'probability': 'self.probability', 'has_seed': 'self.has_seed', 'seed': 'self.seed', 'device': 'self.device'}), '(self._op, batch_size=self.batch_size, probability=\n self.probability, has_seed=self.has_seed, seed=self.seed, device=self.\n device)\n', (7400, 7538), True, 'import oneflow._oneflow_internal._C as _C\n'), ((8290, 8322), 'os.getenv', 'os.getenv', (['"""ONEFLOW_ENABLE_NHWC"""'], {}), "('ONEFLOW_ENABLE_NHWC')\n", (8299, 8322), False, 'import os\n'), ((15382, 15418), 'oneflow.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""ImageGpuDecoder"""'], {}), "('ImageGpuDecoder')\n", (15399, 15418), True, 'import oneflow.framework.id_util as id_util\n'), ((22282, 22511), 'oneflow._oneflow_internal._C.dispatch_image_resize_keep_aspect_ratio', '_C.dispatch_image_resize_keep_aspect_ratio', (['self._op', 'input'], {'target_size': 'self.target_size', 'min_size': 'self.min_size', 'max_size': 'self.max_size', 'resize_longer': 'self.resize_longer', 'interpolation_type': 'self.interpolation_type'}), '(self._op, input, target_size=\n self.target_size, min_size=self.min_size, max_size=self.max_size,\n resize_longer=self.resize_longer, interpolation_type=self.\n interpolation_type)\n', (22324, 22511), True, 'import oneflow._oneflow_internal._C as _C\n'), ((22648, 22723), 'oneflow.tensor_buffer_to_tensor', 'flow.tensor_buffer_to_tensor', (['res[1]'], {'dtype': 'flow.int32', 'instance_shape': '(2,)'}), '(res[1], dtype=flow.int32, instance_shape=(2,))\n', (22676, 22723), True, 'import oneflow as flow\n'), ((22774, 22851), 'oneflow.tensor_buffer_to_tensor', 'flow.tensor_buffer_to_tensor', (['res[2]'], {'dtype': 'flow.float32', 'instance_shape': '(2,)'}), '(res[2], dtype=flow.float32, instance_shape=(2,))\n', (22802, 22851), True, 'import oneflow as flow\n'), ((22914, 23120), 'oneflow._oneflow_internal._C.dispatch_image_resize_to_fixed', '_C.dispatch_image_resize_to_fixed', (['self._op', 'input'], {'target_width': 'self.target_w', 'target_height': 'self.target_h', 'channels': 'self.channels', 'data_type': 'self.dtype', 'interpolation_type': 'self.interpolation_type'}), '(self._op, input, target_width=self.\n target_w, target_height=self.target_h, channels=self.channels,\n data_type=self.dtype, interpolation_type=self.interpolation_type)\n', (22947, 23120), True, 'import oneflow._oneflow_internal._C as _C\n'), ((34814, 35152), 'oneflow._oneflow_internal._C.dispatch_onerec_reader', '_C.dispatch_onerec_reader', (['self.op'], {'files': 'self.files', 'batch_size': 'self.batch_size', 'random_shuffle': 'self.shuffle', 'shuffle_mode': 'self.shuffle_mode', 'shuffle_buffer_size': 'self.shuffle_buffer_size', 'shuffle_after_epoch': 'self.shuffle_after_epoch', 'random_seed': 'self.random_seed', 'verify_example': 'self.verify_example', 'device': 'self.device'}), '(self.op, files=self.files, batch_size=self.\n batch_size, random_shuffle=self.shuffle, shuffle_mode=self.shuffle_mode,\n shuffle_buffer_size=self.shuffle_buffer_size, shuffle_after_epoch=self.\n shuffle_after_epoch, random_seed=self.random_seed, verify_example=self.\n verify_example, device=self.device)\n', (34839, 35152), True, 'import oneflow._oneflow_internal._C as _C\n'), ((35344, 35702), 'oneflow._oneflow_internal._C.dispatch_onerec_reader', '_C.dispatch_onerec_reader', (['self.op'], {'files': 'self.files', 'batch_size': 'self.batch_size', 'random_shuffle': 'self.shuffle', 'shuffle_mode': 'self.shuffle_mode', 'shuffle_buffer_size': 'self.shuffle_buffer_size', 'shuffle_after_epoch': 'self.shuffle_after_epoch', 'random_seed': 'self.random_seed', 'verify_example': 'self.verify_example', 'placement': 'self.placement', 'sbp': 'self.sbp'}), '(self.op, files=self.files, batch_size=self.\n batch_size, random_shuffle=self.shuffle, shuffle_mode=self.shuffle_mode,\n shuffle_buffer_size=self.shuffle_buffer_size, shuffle_after_epoch=self.\n shuffle_after_epoch, random_seed=self.random_seed, verify_example=self.\n verify_example, placement=self.placement, sbp=self.sbp)\n', (35369, 35702), True, 'import oneflow._oneflow_internal._C as _C\n'), ((37407, 37769), 'oneflow._oneflow_internal._C.dispatch_megatron_gpt_mmap_data_loader', '_C.dispatch_megatron_gpt_mmap_data_loader', (['self.op_'], {'data_file_prefix': 'self.data_file_prefix', 'seq_length': 'self.seq_length', 'label_length': '(1)', 'num_samples': 'self.num_samples', 'batch_size': 'self.batch_size', 'dtype': 'self.dtype', 'shuffle': 'self.shuffle', 'random_seed': 'self.random_seed', 'split_sizes': 'self.split_sizes', 'split_index': 'self.split_index', 'device': 'self.device'}), '(self.op_, data_file_prefix=self.\n data_file_prefix, seq_length=self.seq_length, label_length=1,\n num_samples=self.num_samples, batch_size=self.batch_size, dtype=self.\n dtype, shuffle=self.shuffle, random_seed=self.random_seed, split_sizes=\n self.split_sizes, split_index=self.split_index, device=self.device)\n', (37448, 37769), True, 'import oneflow._oneflow_internal._C as _C\n'), ((37993, 38380), 'oneflow._oneflow_internal._C.dispatch_megatron_gpt_mmap_data_loader', '_C.dispatch_megatron_gpt_mmap_data_loader', (['self.op_'], {'data_file_prefix': 'self.data_file_prefix', 'seq_length': 'self.seq_length', 'label_length': '(1)', 'num_samples': 'self.num_samples', 'batch_size': 'self.batch_size', 'dtype': 'self.dtype', 'shuffle': 'self.shuffle', 'random_seed': 'self.random_seed', 'split_sizes': 'self.split_sizes', 'split_index': 'self.split_index', 'placement': 'self.placement', 'sbp': 'self.sbp'}), '(self.op_, data_file_prefix=self.\n data_file_prefix, seq_length=self.seq_length, label_length=1,\n num_samples=self.num_samples, batch_size=self.batch_size, dtype=self.\n dtype, shuffle=self.shuffle, random_seed=self.random_seed, split_sizes=\n self.split_sizes, split_index=self.split_index, placement=self.\n placement, sbp=self.sbp)\n', (38034, 38380), True, 'import oneflow._oneflow_internal._C as _C\n'), ((38759, 38777), 'oneflow.device', 'flow.device', (['"""cpu"""'], {}), "('cpu')\n", (38770, 38777), True, 'import oneflow as flow\n'), ((39794, 39823), 'random.randrange', 'random.randrange', (['sys.maxsize'], {}), '(sys.maxsize)\n', (39810, 39823), False, 'import random\n'), ((2431, 2449), 'oneflow.device', 'flow.device', (['"""cpu"""'], {}), "('cpu')\n", (2442, 2449), True, 'import oneflow as flow\n'), ((6295, 6313), 'oneflow.device', 'flow.device', (['"""cpu"""'], {}), "('cpu')\n", (6306, 6313), True, 'import oneflow as flow\n'), ((9678, 10008), 'oneflow._oneflow_internal._C.dispatch_crop_mirror_normalize_from_uint8', '_C.dispatch_crop_mirror_normalize_from_uint8', (['self._op_uint8_with_mirror', '(input, mirror)'], {'color_space': 'self.color_space', 'output_layout': 'self.output_layout', 'mean': 'self.mean', 'std': 'self.std', 'crop_h': 'self.crop_h', 'crop_w': 'self.crop_w', 'crop_pos_x': 'self.crop_pos_x', 'crop_pos_y': 'self.crop_pos_y', 'output_dtype': 'self.output_dtype'}), '(self._op_uint8_with_mirror, (\n input, mirror), color_space=self.color_space, output_layout=self.\n output_layout, mean=self.mean, std=self.std, crop_h=self.crop_h, crop_w\n =self.crop_w, crop_pos_x=self.crop_pos_x, crop_pos_y=self.crop_pos_y,\n output_dtype=self.output_dtype)\n', (9722, 10008), True, 'import oneflow._oneflow_internal._C as _C\n'), ((10269, 10589), 'oneflow._oneflow_internal._C.dispatch_crop_mirror_normalize_from_uint8', '_C.dispatch_crop_mirror_normalize_from_uint8', (['self._op_uint8_no_mirror', '(input,)'], {'color_space': 'self.color_space', 'output_layout': 'self.output_layout', 'mean': 'self.mean', 'std': 'self.std', 'crop_h': 'self.crop_h', 'crop_w': 'self.crop_w', 'crop_pos_x': 'self.crop_pos_x', 'crop_pos_y': 'self.crop_pos_y', 'output_dtype': 'self.output_dtype'}), '(self._op_uint8_no_mirror, (\n input,), color_space=self.color_space, output_layout=self.output_layout,\n mean=self.mean, std=self.std, crop_h=self.crop_h, crop_w=self.crop_w,\n crop_pos_x=self.crop_pos_x, crop_pos_y=self.crop_pos_y, output_dtype=\n self.output_dtype)\n', (10313, 10589), True, 'import oneflow._oneflow_internal._C as _C\n'), ((10916, 11254), 'oneflow._oneflow_internal._C.dispatch_crop_mirror_normalize_from_tensorbuffer', '_C.dispatch_crop_mirror_normalize_from_tensorbuffer', (['self._op_buffer_with_mirror', '(input, mirror)'], {'color_space': 'self.color_space', 'output_layout': 'self.output_layout', 'mean': 'self.mean', 'std': 'self.std', 'crop_h': 'self.crop_h', 'crop_w': 'self.crop_w', 'crop_pos_x': 'self.crop_pos_x', 'crop_pos_y': 'self.crop_pos_y', 'output_dtype': 'self.output_dtype'}), '(self.\n _op_buffer_with_mirror, (input, mirror), color_space=self.color_space,\n output_layout=self.output_layout, mean=self.mean, std=self.std, crop_h=\n self.crop_h, crop_w=self.crop_w, crop_pos_x=self.crop_pos_x, crop_pos_y\n =self.crop_pos_y, output_dtype=self.output_dtype)\n', (10967, 11254), True, 'import oneflow._oneflow_internal._C as _C\n'), ((11515, 11844), 'oneflow._oneflow_internal._C.dispatch_crop_mirror_normalize_from_tensorbuffer', '_C.dispatch_crop_mirror_normalize_from_tensorbuffer', (['self._op_buffer_no_mirror', '(input,)'], {'color_space': 'self.color_space', 'output_layout': 'self.output_layout', 'mean': 'self.mean', 'std': 'self.std', 'crop_h': 'self.crop_h', 'crop_w': 'self.crop_w', 'crop_pos_x': 'self.crop_pos_x', 'crop_pos_y': 'self.crop_pos_y', 'output_dtype': 'self.output_dtype'}), '(self.\n _op_buffer_no_mirror, (input,), color_space=self.color_space,\n output_layout=self.output_layout, mean=self.mean, std=self.std, crop_h=\n self.crop_h, crop_w=self.crop_w, crop_pos_x=self.crop_pos_x, crop_pos_y\n =self.crop_pos_y, output_dtype=self.output_dtype)\n', (11566, 11844), True, 'import oneflow._oneflow_internal._C as _C\n'), ((18562, 18586), 'traceback.format_stack', 'traceback.format_stack', ([], {}), '()\n', (18584, 18586), False, 'import traceback\n'), ((19157, 19181), 'traceback.format_stack', 'traceback.format_stack', ([], {}), '()\n', (19179, 19181), False, 'import traceback\n'), ((19869, 19893), 'traceback.format_stack', 'traceback.format_stack', ([], {}), '()\n', (19891, 19893), False, 'import traceback\n'), ((20202, 20226), 'traceback.format_stack', 'traceback.format_stack', ([], {}), '()\n', (20224, 20226), False, 'import traceback\n'), ((3052, 3086), 'oneflow.stateful_op', 'flow.stateful_op', (['"""OFRecordReader"""'], {}), "('OFRecordReader')\n", (3068, 3086), True, 'import oneflow as flow\n'), ((6917, 6946), 'oneflow.stateful_op', 'flow.stateful_op', (['"""coin_flip"""'], {}), "('coin_flip')\n", (6933, 6946), True, 'import oneflow as flow\n'), ((28424, 28444), 'oneflow.current_scope', 'flow.current_scope', ([], {}), '()\n', (28442, 28444), True, 'import oneflow as flow\n'), ((29093, 29113), 'oneflow.current_scope', 'flow.current_scope', ([], {}), '()\n', (29111, 29113), True, 'import oneflow as flow\n'), ((34679, 34711), 'oneflow.stateful_op', 'flow.stateful_op', (['"""OneRecReader"""'], {}), "('OneRecReader')\n", (34695, 34711), True, 'import oneflow as flow\n'), ((37245, 37294), 'oneflow.stateful_op', 'flow.stateful_op', (['"""megatron_gpt_mmap_data_loader"""'], {}), "('megatron_gpt_mmap_data_loader')\n", (37261, 37294), True, 'import oneflow as flow\n'), ((5292, 5332), 'oneflow.stateful_op', 'flow.stateful_op', (['"""ofrecord_raw_decoder"""'], {}), "('ofrecord_raw_decoder')\n", (5308, 5332), True, 'import oneflow as flow\n'), ((8985, 9037), 'oneflow.stateful_op', 'flow.stateful_op', (['"""crop_mirror_normalize_from_uint8"""'], {}), "('crop_mirror_normalize_from_uint8')\n", (9001, 9037), True, 'import oneflow as flow\n'), ((9396, 9455), 'oneflow.stateful_op', 'flow.stateful_op', (['"""crop_mirror_normalize_from_tensorbuffer"""'], {}), "('crop_mirror_normalize_from_tensorbuffer')\n", (9412, 9455), True, 'import oneflow as flow\n'), ((12970, 13024), 'oneflow.stateful_op', 'flow.stateful_op', (['"""ofrecord_image_decoder_random_crop"""'], {}), "('ofrecord_image_decoder_random_crop')\n", (12986, 13024), True, 'import oneflow as flow\n'), ((13718, 13760), 'oneflow.stateful_op', 'flow.stateful_op', (['"""ofrecord_image_decoder"""'], {}), "('ofrecord_image_decoder')\n", (13734, 13760), True, 'import oneflow as flow\n'), ((16874, 16929), 'oneflow.stateful_op', 'flow.stateful_op', (['"""tensor_buffer_to_list_of_tensors_v2"""'], {}), "('tensor_buffer_to_list_of_tensors_v2')\n", (16890, 16929), True, 'import oneflow as flow\n'), ((26312, 26344), 'oneflow.stateful_op', 'flow.stateful_op', (['"""image_decode"""'], {}), "('image_decode')\n", (26328, 26344), True, 'import oneflow as flow\n'), ((26736, 26771), 'oneflow.stateful_op', 'flow.stateful_op', (['"""image_normalize"""'], {}), "('image_normalize')\n", (26752, 26771), True, 'import oneflow as flow\n'), ((29850, 29887), 'oneflow.stateful_op', 'flow.stateful_op', (['"""image_batch_align"""'], {}), "('image_batch_align')\n", (29866, 29887), True, 'import oneflow as flow\n'), ((31792, 31834), 'oneflow.stateful_op', 'flow.stateful_op', (['"""ofrecord_bytes_decoder"""'], {}), "('ofrecord_bytes_decoder')\n", (31808, 31834), True, 'import oneflow as flow\n'), ((8771, 8823), 'oneflow.stateful_op', 'flow.stateful_op', (['"""crop_mirror_normalize_from_uint8"""'], {}), "('crop_mirror_normalize_from_uint8')\n", (8787, 8823), True, 'import oneflow as flow\n'), ((9173, 9232), 'oneflow.stateful_op', 'flow.stateful_op', (['"""crop_mirror_normalize_from_tensorbuffer"""'], {}), "('crop_mirror_normalize_from_tensorbuffer')\n", (9189, 9232), True, 'import oneflow as flow\n'), ((22024, 22065), 'oneflow.stateful_op', 'flow.stateful_op', (['"""image_resize_to_fixed"""'], {}), "('image_resize_to_fixed')\n", (22040, 22065), True, 'import oneflow as flow\n'), ((21237, 21287), 'oneflow.stateful_op', 'flow.stateful_op', (['"""image_resize_keep_aspect_ratio"""'], {}), "('image_resize_keep_aspect_ratio')\n", (21253, 21287), True, 'import oneflow as flow\n'), ((27951, 27981), 'oneflow.stateful_op', 'flow.stateful_op', (['"""COCOReader"""'], {}), "('COCOReader')\n", (27967, 27981), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow
from oneflow.framework.docstr.utils import add_docstr
add_docstr(
oneflow.linalg.vector_norm,
"""linalg.vector_norm(input, ord=2, dim=None, keepdim=False, *, dtype=None, out=None) -> Tensor
Computes a vector norm.
Supports input of float, double dtypes.
This function does not necessarily treat multidimensonal attr:`input` as a batch of
vectors, instead:
- If :attr:`dim`\\ `= None`, :attr:`input` will be flattened before the norm is computed.
- If :attr:`dim` is an `int` or a `tuple`, the norm will be computed over these dimensions and the other dimensions will be treated as batch dimensions.
This behavior is for consistency with :func:`flow.linalg.norm`.
:attr:`ord` defines the vector norm that is computed. The following norms are supported:
====================== ========================================================
:attr:`ord` vector norm
====================== ========================================================
`2` (default) `2`-norm (see below)
`inf` `max(abs(x))`
`-inf` `min(abs(x))`
`0` `sum(x != 0)`
other `int` or `float` `sum(abs(x)^{ord})^{(1 / ord)}`
====================== ========================================================
where `inf` refers to `float('inf')`, NumPy's `inf` object, or any equivalent object.
Args:
input (Tensor): tensor, flattened by default, but this behavior can be
controlled using :attr:`dim`.
ord (int, float, inf, -inf, 'fro', 'nuc', optional): order of norm. Default: `2`
dim (int, Tuple[int], optional): dimensions over which to compute
the norm. See above for the behavior when :attr:`dim`\\ `= None`.
Default: `None`
keepdim (bool, optional): If set to `True`, the reduced dimensions are retained
in the result as dimensions with size one. Default: `False`
Returns:
A real-valued tensor.
Examples:
.. code-block:: python
>>> import oneflow as flow
>>> from oneflow import linalg as LA
>>> import numpy as np
>>> a = flow.tensor(np.arange(9, dtype=np.float32) - 4)
>>> a
tensor([-4., -3., -2., -1., 0., 1., 2., 3., 4.], dtype=oneflow.float32)
>>> b = a.reshape(3, 3)
>>> b
tensor([[-4., -3., -2.],
[-1., 0., 1.],
[ 2., 3., 4.]], dtype=oneflow.float32)
>>> LA.vector_norm(a, ord=3.5)
tensor(5.4345, dtype=oneflow.float32)
>>> LA.vector_norm(b, ord=3.5)
tensor(5.4345, dtype=oneflow.float32)
""",
)
add_docstr(
oneflow.linalg.matrix_norm,
"""linalg.matrix_norm(input, ord='fro', dim=(-2, -1), keepdim=False, *, dtype=None, out=None) -> Tensor
Computes a matrix norm.
Support input of float, double, cfloat and cdouble dtypes.
Also supports batches of matrices: the norm will be computed over the
dimensions specified by the 2-tuple :attr:`dim` and the other dimensions will
be treated as batch dimensions. The output will have the same batch dimensions.
:attr:`ord` defines the matrix norm that is computed. The following norms are supported:
====================== ========================================================
:attr:`ord` matrix norm
====================== ========================================================
`'fro'` (default) Frobenius norm
`'nuc'` -- not supported yet --
`inf` `max(sum(abs(x), dim=1))`
`-inf` `min(sum(abs(x), dim=1))`
`1` `max(sum(abs(x), dim=0))`
`-1` `min(sum(abs(x), dim=0))`
`2` -- not supported yet --
`-2` -- not supported yet --
====================== ========================================================
where `inf` refers to `float('inf')`, NumPy's `inf` object, or any equivalent object.
Args:
input (Tensor): tensor with two or more dimensions. By default its
shape is interpreted as `(*, m, n)` where `*` is zero or more
batch dimensions, but this behavior can be controlled using :attr:`dim`.
ord (int, inf, -inf, 'fro', 'nuc', optional): order of norm. Default: `'fro'`
dim (Tuple[int, int], optional): dimensions over which to compute the norm. Default: `(-2, -1)`
keepdim (bool, optional): If set to `True`, the reduced dimensions are retained
in the result as dimensions with size one. Default: `False`
Returns:
A real-valued tensor.
Examples:
.. code-block:: python
>>> import oneflow as flow
>>> from oneflow import linalg as LA
>>> import numpy as np
>>> a = flow.tensor(np.arange(9, dtype=np.float32)).reshape(3,3)
>>> a
tensor([[0., 1., 2.],
[3., 4., 5.],
[6., 7., 8.]], dtype=oneflow.float32)
>>> LA.matrix_norm(a)
tensor(14.2829, dtype=oneflow.float32)
>>> LA.matrix_norm(a, ord=-1)
tensor(9., dtype=oneflow.float32)
>>> b = a.expand(2, -1, -1)
>>> b
tensor([[[0., 1., 2.],
[3., 4., 5.],
[6., 7., 8.]],
<BLANKLINE>
[[0., 1., 2.],
[3., 4., 5.],
[6., 7., 8.]]], dtype=oneflow.float32)
>>> LA.matrix_norm(b, dim=(0, 2))
tensor([ 3.1623, 10.0000, 17.2627], dtype=oneflow.float32)
""",
)
add_docstr(
oneflow.linalg.norm,
"""linalg.norm(input, ord=None, dim=None, keepdim=False, *, dtype=None, out=None) -> Tensor
Returns the matrix norm or vector norm of a given tensor.
This function can calculate one of eight different types of matrix norms, or one
of an infinite number of vector norms, depending on both the number of reduction
dimensions and the value of the `ord` parameter.
Args:
input (Tensor): The input tensor. If dim is None, input must be 1-D or 2-D, unless :attr:`ord`
is None. If both :attr:`dim` and :attr:`ord` are None, the 2-norm of the input flattened to 1-D
will be returned. Its data type must be either a floating point or complex type. For complex
inputs, the norm is calculated on of the absolute values of each element. If the input is
complex and neither :attr:`dtype` nor :attr:`out` is specified, the result's data type will
be the corresponding floating point type (e.g. float if :attr:`input` is complexfloat).
ord (int, inf, -inf, 'fro', 'nuc', optional): order of norm. Default: `'None'`
The following norms can be calculated:
============== ============================ =================================
:attr:`ord` norm for matrices norm for vectors
============== ============================ =================================
None Frobenius norm `2`-norm
`'fro'` Frobenius norm -- not supported --
`'nuc'` -- not supported yet -- -- not supported --
`inf` `max(sum(abs(x), dim=1))` `max(abs(x))`
`-inf` `min(sum(abs(x), dim=1))` `min(abs(x))`
`0` -- not supported -- `sum(x != 0)`
`1` `max(sum(abs(x), dim=0))` as below
`-1` `min(sum(abs(x), dim=0))` as below
`2` -- not supported yet -- as below
`-2` -- not supported yet -- as below
other -- not supported -- `sum(abs(x)^{ord})^{(1 / ord)}`
============== ============================ =================================
where `inf` refers to `float('inf')`, NumPy's `inf` object, or any equivalent object.
dim (int, 2-tuple of ints, 2-list of ints, optional): If :attr:`dim` is an int,
vector norm will be calculated over the specified dimension. If :attr:`dim`
is a 2-tuple of ints, matrix norm will be calculated over the specified
dimensions. If :attr:`dim` is None, matrix norm will be calculated
when the input tensor has two dimensions, and vector norm will be
calculated when the input tensor has one dimension. Default: ``None``
keepdim (bool, optional): If set to True, the reduced dimensions are retained
in the result as dimensions with size one. Default: ``False``
out (Tensor, optional): The output tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> from oneflow import linalg as LA
>>> import numpy as np
>>> a = flow.tensor(np.arange(9, dtype=np.float32) - 4)
>>> a
tensor([-4., -3., -2., -1., 0., 1., 2., 3., 4.], dtype=oneflow.float32)
>>> b = a.reshape(3, 3)
>>> b
tensor([[-4., -3., -2.],
[-1., 0., 1.],
[ 2., 3., 4.]], dtype=oneflow.float32)
>>> LA.norm(a)
tensor(7.7460, dtype=oneflow.float32)
>>> LA.norm(b)
tensor(7.7460, dtype=oneflow.float32)
>>> LA.norm(b, 'fro')
tensor(7.7460, dtype=oneflow.float32)
>>> LA.norm(a, float('inf'))
tensor(4., dtype=oneflow.float32)
>>> LA.norm(b, float('inf'))
tensor(9., dtype=oneflow.float32)
>>> LA.norm(a, -float('inf'))
tensor(0., dtype=oneflow.float32)
>>> LA.norm(b, -float('inf'))
tensor(2., dtype=oneflow.float32)
>>> LA.norm(a, 1)
tensor(20., dtype=oneflow.float32)
>>> LA.norm(b, 1)
tensor(7., dtype=oneflow.float32)
>>> LA.norm(a, -1)
tensor(0., dtype=oneflow.float32)
>>> LA.norm(b, -1)
tensor(6., dtype=oneflow.float32)
>>> LA.norm(a, 2)
tensor(7.7460, dtype=oneflow.float32)
>>> LA.norm(a, -2)
tensor(0., dtype=oneflow.float32)
>>> LA.norm(a, 3)
tensor(5.8480, dtype=oneflow.float32)
>>> LA.norm(a, -3)
tensor(0., dtype=oneflow.float32)
>>> c = flow.tensor([[1., 2., 3.],
... [-1, 1, 4]])
>>> LA.norm(c, dim=0)
tensor([1.4142, 2.2361, 5.0000], dtype=oneflow.float32)
>>> LA.norm(c, dim=1, keepdim = True)
tensor([[3.7417],
[4.2426]], dtype=oneflow.float32)
>>> LA.norm(c, ord=1, dim=1)
tensor([6., 6.], dtype=oneflow.float32)
""",
)
add_docstr(
oneflow._C.normalize,
"""nn.functional.normalize(input: Tensor, p: float=2.0, dim: int=0, epsilon: float=1e-12) -> Tensor
Performs :math:`L_p` normalization of inputs over specified dimension
For a tensor :attr:`input` of sizes :math:`(n_0, ..., n_{dim}, ..., n_k)`, each
:math:`n_{dim}` -element vector :math:`v` along dimension :attr:`dim` is transformed as:
.. math::
v = \\frac{v}{\max(\\lVert v \\rVert_p, \\epsilon)}.
With the default arguments it uses the Euclidean norm over vectors along dimension :math:`1` for normalization.
But note that the gradient calculation of the input tensor has different results on different frameworks
when `input.shape[dim] = 1`.
Args:
input (oneflow.Tensor): input tensor of any shape
p (float): the exponent value in the norm formulation. Default: 2
dim (int): the dimension to reduce. Default: 1
eps (float): small value to avoid division by zero. Default: 1e-12
For example:
.. code-block:: python
>>> import oneflow as flow
>>> x = flow.tensor([[1, 2], [3, 4]], dtype=flow.float32)
>>> out = flow.nn.functional.normalize(x, 2, 0)
>>> out
tensor([[0.3162, 0.4472],
[0.9487, 0.8944]], dtype=oneflow.float32)
>>> out = flow.nn.functional.normalize(x, 2, 1)
>>> out
tensor([[0.4472, 0.8944],
[0.6000, 0.8000]], dtype=oneflow.float32)
""",
)
| [
"oneflow.framework.docstr.utils.add_docstr"
] | [((661, 3311), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.linalg.vector_norm', '"""linalg.vector_norm(input, ord=2, dim=None, keepdim=False, *, dtype=None, out=None) -> Tensor\n\n Computes a vector norm.\n\n Supports input of float, double dtypes.\n\n This function does not necessarily treat multidimensonal attr:`input` as a batch of\n vectors, instead:\n\n - If :attr:`dim`\\\\ `= None`, :attr:`input` will be flattened before the norm is computed.\n - If :attr:`dim` is an `int` or a `tuple`, the norm will be computed over these dimensions and the other dimensions will be treated as batch dimensions.\n\n This behavior is for consistency with :func:`flow.linalg.norm`.\n\n :attr:`ord` defines the vector norm that is computed. The following norms are supported:\n\n ====================== ========================================================\n :attr:`ord` vector norm\n ====================== ========================================================\n `2` (default) `2`-norm (see below)\n `inf` `max(abs(x))`\n `-inf` `min(abs(x))`\n `0` `sum(x != 0)`\n other `int` or `float` `sum(abs(x)^{ord})^{(1 / ord)}`\n ====================== ========================================================\n\n where `inf` refers to `float(\'inf\')`, NumPy\'s `inf` object, or any equivalent object.\n\n Args:\n input (Tensor): tensor, flattened by default, but this behavior can be\n controlled using :attr:`dim`.\n ord (int, float, inf, -inf, \'fro\', \'nuc\', optional): order of norm. Default: `2`\n dim (int, Tuple[int], optional): dimensions over which to compute\n the norm. See above for the behavior when :attr:`dim`\\\\ `= None`.\n Default: `None`\n keepdim (bool, optional): If set to `True`, the reduced dimensions are retained\n in the result as dimensions with size one. Default: `False`\n\n Returns:\n A real-valued tensor.\n\n Examples:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> from oneflow import linalg as LA\n >>> import numpy as np\n >>> a = flow.tensor(np.arange(9, dtype=np.float32) - 4)\n >>> a\n tensor([-4., -3., -2., -1., 0., 1., 2., 3., 4.], dtype=oneflow.float32)\n >>> b = a.reshape(3, 3)\n >>> b\n tensor([[-4., -3., -2.],\n [-1., 0., 1.],\n [ 2., 3., 4.]], dtype=oneflow.float32)\n >>> LA.vector_norm(a, ord=3.5)\n tensor(5.4345, dtype=oneflow.float32)\n >>> LA.vector_norm(b, ord=3.5)\n tensor(5.4345, dtype=oneflow.float32)\n \n """'], {}), '(oneflow.linalg.vector_norm,\n """linalg.vector_norm(input, ord=2, dim=None, keepdim=False, *, dtype=None, out=None) -> Tensor\n\n Computes a vector norm.\n\n Supports input of float, double dtypes.\n\n This function does not necessarily treat multidimensonal attr:`input` as a batch of\n vectors, instead:\n\n - If :attr:`dim`\\\\ `= None`, :attr:`input` will be flattened before the norm is computed.\n - If :attr:`dim` is an `int` or a `tuple`, the norm will be computed over these dimensions and the other dimensions will be treated as batch dimensions.\n\n This behavior is for consistency with :func:`flow.linalg.norm`.\n\n :attr:`ord` defines the vector norm that is computed. The following norms are supported:\n\n ====================== ========================================================\n :attr:`ord` vector norm\n ====================== ========================================================\n `2` (default) `2`-norm (see below)\n `inf` `max(abs(x))`\n `-inf` `min(abs(x))`\n `0` `sum(x != 0)`\n other `int` or `float` `sum(abs(x)^{ord})^{(1 / ord)}`\n ====================== ========================================================\n\n where `inf` refers to `float(\'inf\')`, NumPy\'s `inf` object, or any equivalent object.\n\n Args:\n input (Tensor): tensor, flattened by default, but this behavior can be\n controlled using :attr:`dim`.\n ord (int, float, inf, -inf, \'fro\', \'nuc\', optional): order of norm. Default: `2`\n dim (int, Tuple[int], optional): dimensions over which to compute\n the norm. See above for the behavior when :attr:`dim`\\\\ `= None`.\n Default: `None`\n keepdim (bool, optional): If set to `True`, the reduced dimensions are retained\n in the result as dimensions with size one. Default: `False`\n\n Returns:\n A real-valued tensor.\n\n Examples:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> from oneflow import linalg as LA\n >>> import numpy as np\n >>> a = flow.tensor(np.arange(9, dtype=np.float32) - 4)\n >>> a\n tensor([-4., -3., -2., -1., 0., 1., 2., 3., 4.], dtype=oneflow.float32)\n >>> b = a.reshape(3, 3)\n >>> b\n tensor([[-4., -3., -2.],\n [-1., 0., 1.],\n [ 2., 3., 4.]], dtype=oneflow.float32)\n >>> LA.vector_norm(a, ord=3.5)\n tensor(5.4345, dtype=oneflow.float32)\n >>> LA.vector_norm(b, ord=3.5)\n tensor(5.4345, dtype=oneflow.float32)\n \n """\n )\n', (671, 3311), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((3316, 6265), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.linalg.matrix_norm', '"""linalg.matrix_norm(input, ord=\'fro\', dim=(-2, -1), keepdim=False, *, dtype=None, out=None) -> Tensor\n\n Computes a matrix norm.\n\n Support input of float, double, cfloat and cdouble dtypes.\n Also supports batches of matrices: the norm will be computed over the\n dimensions specified by the 2-tuple :attr:`dim` and the other dimensions will\n be treated as batch dimensions. The output will have the same batch dimensions.\n\n :attr:`ord` defines the matrix norm that is computed. The following norms are supported:\n\n ====================== ========================================================\n :attr:`ord` matrix norm\n ====================== ========================================================\n `\'fro\'` (default) Frobenius norm\n `\'nuc\'` -- not supported yet --\n `inf` `max(sum(abs(x), dim=1))`\n `-inf` `min(sum(abs(x), dim=1))`\n `1` `max(sum(abs(x), dim=0))`\n `-1` `min(sum(abs(x), dim=0))`\n `2` -- not supported yet --\n `-2` -- not supported yet --\n ====================== ========================================================\n\n where `inf` refers to `float(\'inf\')`, NumPy\'s `inf` object, or any equivalent object.\n\n Args:\n input (Tensor): tensor with two or more dimensions. By default its\n shape is interpreted as `(*, m, n)` where `*` is zero or more\n batch dimensions, but this behavior can be controlled using :attr:`dim`.\n ord (int, inf, -inf, \'fro\', \'nuc\', optional): order of norm. Default: `\'fro\'`\n dim (Tuple[int, int], optional): dimensions over which to compute the norm. Default: `(-2, -1)`\n keepdim (bool, optional): If set to `True`, the reduced dimensions are retained\n in the result as dimensions with size one. Default: `False`\n\n Returns:\n A real-valued tensor.\n\n Examples:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> from oneflow import linalg as LA\n >>> import numpy as np\n >>> a = flow.tensor(np.arange(9, dtype=np.float32)).reshape(3,3)\n >>> a\n tensor([[0., 1., 2.],\n [3., 4., 5.],\n [6., 7., 8.]], dtype=oneflow.float32)\n >>> LA.matrix_norm(a)\n tensor(14.2829, dtype=oneflow.float32)\n >>> LA.matrix_norm(a, ord=-1)\n tensor(9., dtype=oneflow.float32)\n >>> b = a.expand(2, -1, -1)\n >>> b\n tensor([[[0., 1., 2.],\n [3., 4., 5.],\n [6., 7., 8.]],\n <BLANKLINE>\n [[0., 1., 2.],\n [3., 4., 5.],\n [6., 7., 8.]]], dtype=oneflow.float32)\n >>> LA.matrix_norm(b, dim=(0, 2))\n tensor([ 3.1623, 10.0000, 17.2627], dtype=oneflow.float32)\n \n """'], {}), '(oneflow.linalg.matrix_norm,\n """linalg.matrix_norm(input, ord=\'fro\', dim=(-2, -1), keepdim=False, *, dtype=None, out=None) -> Tensor\n\n Computes a matrix norm.\n\n Support input of float, double, cfloat and cdouble dtypes.\n Also supports batches of matrices: the norm will be computed over the\n dimensions specified by the 2-tuple :attr:`dim` and the other dimensions will\n be treated as batch dimensions. The output will have the same batch dimensions.\n\n :attr:`ord` defines the matrix norm that is computed. The following norms are supported:\n\n ====================== ========================================================\n :attr:`ord` matrix norm\n ====================== ========================================================\n `\'fro\'` (default) Frobenius norm\n `\'nuc\'` -- not supported yet --\n `inf` `max(sum(abs(x), dim=1))`\n `-inf` `min(sum(abs(x), dim=1))`\n `1` `max(sum(abs(x), dim=0))`\n `-1` `min(sum(abs(x), dim=0))`\n `2` -- not supported yet --\n `-2` -- not supported yet --\n ====================== ========================================================\n\n where `inf` refers to `float(\'inf\')`, NumPy\'s `inf` object, or any equivalent object.\n\n Args:\n input (Tensor): tensor with two or more dimensions. By default its\n shape is interpreted as `(*, m, n)` where `*` is zero or more\n batch dimensions, but this behavior can be controlled using :attr:`dim`.\n ord (int, inf, -inf, \'fro\', \'nuc\', optional): order of norm. Default: `\'fro\'`\n dim (Tuple[int, int], optional): dimensions over which to compute the norm. Default: `(-2, -1)`\n keepdim (bool, optional): If set to `True`, the reduced dimensions are retained\n in the result as dimensions with size one. Default: `False`\n\n Returns:\n A real-valued tensor.\n\n Examples:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> from oneflow import linalg as LA\n >>> import numpy as np\n >>> a = flow.tensor(np.arange(9, dtype=np.float32)).reshape(3,3)\n >>> a\n tensor([[0., 1., 2.],\n [3., 4., 5.],\n [6., 7., 8.]], dtype=oneflow.float32)\n >>> LA.matrix_norm(a)\n tensor(14.2829, dtype=oneflow.float32)\n >>> LA.matrix_norm(a, ord=-1)\n tensor(9., dtype=oneflow.float32)\n >>> b = a.expand(2, -1, -1)\n >>> b\n tensor([[[0., 1., 2.],\n [3., 4., 5.],\n [6., 7., 8.]],\n <BLANKLINE>\n [[0., 1., 2.],\n [3., 4., 5.],\n [6., 7., 8.]]], dtype=oneflow.float32)\n >>> LA.matrix_norm(b, dim=(0, 2))\n tensor([ 3.1623, 10.0000, 17.2627], dtype=oneflow.float32)\n \n """\n )\n', (3326, 6265), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((6270, 11410), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.linalg.norm', '"""linalg.norm(input, ord=None, dim=None, keepdim=False, *, dtype=None, out=None) -> Tensor\n Returns the matrix norm or vector norm of a given tensor.\n\n This function can calculate one of eight different types of matrix norms, or one\n of an infinite number of vector norms, depending on both the number of reduction\n dimensions and the value of the `ord` parameter.\n\n Args:\n input (Tensor): The input tensor. If dim is None, input must be 1-D or 2-D, unless :attr:`ord`\n is None. If both :attr:`dim` and :attr:`ord` are None, the 2-norm of the input flattened to 1-D\n will be returned. Its data type must be either a floating point or complex type. For complex\n inputs, the norm is calculated on of the absolute values of each element. If the input is\n complex and neither :attr:`dtype` nor :attr:`out` is specified, the result\'s data type will\n be the corresponding floating point type (e.g. float if :attr:`input` is complexfloat).\n\n ord (int, inf, -inf, \'fro\', \'nuc\', optional): order of norm. Default: `\'None\'`\n The following norms can be calculated:\n\n ============== ============================ =================================\n :attr:`ord` norm for matrices norm for vectors\n ============== ============================ =================================\n None Frobenius norm `2`-norm\n `\'fro\'` Frobenius norm -- not supported --\n `\'nuc\'` -- not supported yet -- -- not supported --\n `inf` `max(sum(abs(x), dim=1))` `max(abs(x))`\n `-inf` `min(sum(abs(x), dim=1))` `min(abs(x))`\n `0` -- not supported -- `sum(x != 0)`\n `1` `max(sum(abs(x), dim=0))` as below\n `-1` `min(sum(abs(x), dim=0))` as below\n `2` -- not supported yet -- as below\n `-2` -- not supported yet -- as below\n other -- not supported -- `sum(abs(x)^{ord})^{(1 / ord)}`\n ============== ============================ =================================\n\n where `inf` refers to `float(\'inf\')`, NumPy\'s `inf` object, or any equivalent object.\n\n dim (int, 2-tuple of ints, 2-list of ints, optional): If :attr:`dim` is an int,\n vector norm will be calculated over the specified dimension. If :attr:`dim`\n is a 2-tuple of ints, matrix norm will be calculated over the specified\n dimensions. If :attr:`dim` is None, matrix norm will be calculated\n when the input tensor has two dimensions, and vector norm will be\n calculated when the input tensor has one dimension. Default: ``None``\n\n keepdim (bool, optional): If set to True, the reduced dimensions are retained\n in the result as dimensions with size one. Default: ``False``\n\n out (Tensor, optional): The output tensor.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> from oneflow import linalg as LA\n >>> import numpy as np\n >>> a = flow.tensor(np.arange(9, dtype=np.float32) - 4)\n >>> a\n tensor([-4., -3., -2., -1., 0., 1., 2., 3., 4.], dtype=oneflow.float32)\n >>> b = a.reshape(3, 3)\n >>> b\n tensor([[-4., -3., -2.],\n [-1., 0., 1.],\n [ 2., 3., 4.]], dtype=oneflow.float32)\n >>> LA.norm(a)\n tensor(7.7460, dtype=oneflow.float32)\n >>> LA.norm(b)\n tensor(7.7460, dtype=oneflow.float32)\n >>> LA.norm(b, \'fro\')\n tensor(7.7460, dtype=oneflow.float32)\n >>> LA.norm(a, float(\'inf\'))\n tensor(4., dtype=oneflow.float32)\n >>> LA.norm(b, float(\'inf\'))\n tensor(9., dtype=oneflow.float32)\n >>> LA.norm(a, -float(\'inf\'))\n tensor(0., dtype=oneflow.float32)\n >>> LA.norm(b, -float(\'inf\'))\n tensor(2., dtype=oneflow.float32)\n >>> LA.norm(a, 1)\n tensor(20., dtype=oneflow.float32)\n >>> LA.norm(b, 1)\n tensor(7., dtype=oneflow.float32)\n >>> LA.norm(a, -1)\n tensor(0., dtype=oneflow.float32)\n >>> LA.norm(b, -1)\n tensor(6., dtype=oneflow.float32)\n >>> LA.norm(a, 2)\n tensor(7.7460, dtype=oneflow.float32)\n >>> LA.norm(a, -2)\n tensor(0., dtype=oneflow.float32)\n >>> LA.norm(a, 3)\n tensor(5.8480, dtype=oneflow.float32)\n >>> LA.norm(a, -3)\n tensor(0., dtype=oneflow.float32)\n >>> c = flow.tensor([[1., 2., 3.],\n ... [-1, 1, 4]])\n >>> LA.norm(c, dim=0)\n tensor([1.4142, 2.2361, 5.0000], dtype=oneflow.float32)\n >>> LA.norm(c, dim=1, keepdim = True)\n tensor([[3.7417],\n [4.2426]], dtype=oneflow.float32)\n >>> LA.norm(c, ord=1, dim=1)\n tensor([6., 6.], dtype=oneflow.float32)\n\n """'], {}), '(oneflow.linalg.norm,\n """linalg.norm(input, ord=None, dim=None, keepdim=False, *, dtype=None, out=None) -> Tensor\n Returns the matrix norm or vector norm of a given tensor.\n\n This function can calculate one of eight different types of matrix norms, or one\n of an infinite number of vector norms, depending on both the number of reduction\n dimensions and the value of the `ord` parameter.\n\n Args:\n input (Tensor): The input tensor. If dim is None, input must be 1-D or 2-D, unless :attr:`ord`\n is None. If both :attr:`dim` and :attr:`ord` are None, the 2-norm of the input flattened to 1-D\n will be returned. Its data type must be either a floating point or complex type. For complex\n inputs, the norm is calculated on of the absolute values of each element. If the input is\n complex and neither :attr:`dtype` nor :attr:`out` is specified, the result\'s data type will\n be the corresponding floating point type (e.g. float if :attr:`input` is complexfloat).\n\n ord (int, inf, -inf, \'fro\', \'nuc\', optional): order of norm. Default: `\'None\'`\n The following norms can be calculated:\n\n ============== ============================ =================================\n :attr:`ord` norm for matrices norm for vectors\n ============== ============================ =================================\n None Frobenius norm `2`-norm\n `\'fro\'` Frobenius norm -- not supported --\n `\'nuc\'` -- not supported yet -- -- not supported --\n `inf` `max(sum(abs(x), dim=1))` `max(abs(x))`\n `-inf` `min(sum(abs(x), dim=1))` `min(abs(x))`\n `0` -- not supported -- `sum(x != 0)`\n `1` `max(sum(abs(x), dim=0))` as below\n `-1` `min(sum(abs(x), dim=0))` as below\n `2` -- not supported yet -- as below\n `-2` -- not supported yet -- as below\n other -- not supported -- `sum(abs(x)^{ord})^{(1 / ord)}`\n ============== ============================ =================================\n\n where `inf` refers to `float(\'inf\')`, NumPy\'s `inf` object, or any equivalent object.\n\n dim (int, 2-tuple of ints, 2-list of ints, optional): If :attr:`dim` is an int,\n vector norm will be calculated over the specified dimension. If :attr:`dim`\n is a 2-tuple of ints, matrix norm will be calculated over the specified\n dimensions. If :attr:`dim` is None, matrix norm will be calculated\n when the input tensor has two dimensions, and vector norm will be\n calculated when the input tensor has one dimension. Default: ``None``\n\n keepdim (bool, optional): If set to True, the reduced dimensions are retained\n in the result as dimensions with size one. Default: ``False``\n\n out (Tensor, optional): The output tensor.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> from oneflow import linalg as LA\n >>> import numpy as np\n >>> a = flow.tensor(np.arange(9, dtype=np.float32) - 4)\n >>> a\n tensor([-4., -3., -2., -1., 0., 1., 2., 3., 4.], dtype=oneflow.float32)\n >>> b = a.reshape(3, 3)\n >>> b\n tensor([[-4., -3., -2.],\n [-1., 0., 1.],\n [ 2., 3., 4.]], dtype=oneflow.float32)\n >>> LA.norm(a)\n tensor(7.7460, dtype=oneflow.float32)\n >>> LA.norm(b)\n tensor(7.7460, dtype=oneflow.float32)\n >>> LA.norm(b, \'fro\')\n tensor(7.7460, dtype=oneflow.float32)\n >>> LA.norm(a, float(\'inf\'))\n tensor(4., dtype=oneflow.float32)\n >>> LA.norm(b, float(\'inf\'))\n tensor(9., dtype=oneflow.float32)\n >>> LA.norm(a, -float(\'inf\'))\n tensor(0., dtype=oneflow.float32)\n >>> LA.norm(b, -float(\'inf\'))\n tensor(2., dtype=oneflow.float32)\n >>> LA.norm(a, 1)\n tensor(20., dtype=oneflow.float32)\n >>> LA.norm(b, 1)\n tensor(7., dtype=oneflow.float32)\n >>> LA.norm(a, -1)\n tensor(0., dtype=oneflow.float32)\n >>> LA.norm(b, -1)\n tensor(6., dtype=oneflow.float32)\n >>> LA.norm(a, 2)\n tensor(7.7460, dtype=oneflow.float32)\n >>> LA.norm(a, -2)\n tensor(0., dtype=oneflow.float32)\n >>> LA.norm(a, 3)\n tensor(5.8480, dtype=oneflow.float32)\n >>> LA.norm(a, -3)\n tensor(0., dtype=oneflow.float32)\n >>> c = flow.tensor([[1., 2., 3.],\n ... [-1, 1, 4]])\n >>> LA.norm(c, dim=0)\n tensor([1.4142, 2.2361, 5.0000], dtype=oneflow.float32)\n >>> LA.norm(c, dim=1, keepdim = True)\n tensor([[3.7417],\n [4.2426]], dtype=oneflow.float32)\n >>> LA.norm(c, ord=1, dim=1)\n tensor([6., 6.], dtype=oneflow.float32)\n\n """\n )\n', (6280, 11410), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((11414, 12905), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow._C.normalize', '"""nn.functional.normalize(input: Tensor, p: float=2.0, dim: int=0, epsilon: float=1e-12) -> Tensor\n\n Performs :math:`L_p` normalization of inputs over specified dimension\n\n For a tensor :attr:`input` of sizes :math:`(n_0, ..., n_{dim}, ..., n_k)`, each\n :math:`n_{dim}` -element vector :math:`v` along dimension :attr:`dim` is transformed as:\n\n .. math::\n v = \\\\frac{v}{\\\\max(\\\\lVert v \\\\rVert_p, \\\\epsilon)}.\n\n With the default arguments it uses the Euclidean norm over vectors along dimension :math:`1` for normalization.\n\n But note that the gradient calculation of the input tensor has different results on different frameworks\n when `input.shape[dim] = 1`.\n\n Args:\n input (oneflow.Tensor): input tensor of any shape\n p (float): the exponent value in the norm formulation. Default: 2\n dim (int): the dimension to reduce. Default: 1\n eps (float): small value to avoid division by zero. Default: 1e-12 \n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> x = flow.tensor([[1, 2], [3, 4]], dtype=flow.float32)\n >>> out = flow.nn.functional.normalize(x, 2, 0)\n >>> out\n tensor([[0.3162, 0.4472],\n [0.9487, 0.8944]], dtype=oneflow.float32)\n >>> out = flow.nn.functional.normalize(x, 2, 1)\n >>> out\n tensor([[0.4472, 0.8944],\n [0.6000, 0.8000]], dtype=oneflow.float32)\n\n """'], {}), '(oneflow._C.normalize,\n """nn.functional.normalize(input: Tensor, p: float=2.0, dim: int=0, epsilon: float=1e-12) -> Tensor\n\n Performs :math:`L_p` normalization of inputs over specified dimension\n\n For a tensor :attr:`input` of sizes :math:`(n_0, ..., n_{dim}, ..., n_k)`, each\n :math:`n_{dim}` -element vector :math:`v` along dimension :attr:`dim` is transformed as:\n\n .. math::\n v = \\\\frac{v}{\\\\max(\\\\lVert v \\\\rVert_p, \\\\epsilon)}.\n\n With the default arguments it uses the Euclidean norm over vectors along dimension :math:`1` for normalization.\n\n But note that the gradient calculation of the input tensor has different results on different frameworks\n when `input.shape[dim] = 1`.\n\n Args:\n input (oneflow.Tensor): input tensor of any shape\n p (float): the exponent value in the norm formulation. Default: 2\n dim (int): the dimension to reduce. Default: 1\n eps (float): small value to avoid division by zero. Default: 1e-12 \n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> x = flow.tensor([[1, 2], [3, 4]], dtype=flow.float32)\n >>> out = flow.nn.functional.normalize(x, 2, 0)\n >>> out\n tensor([[0.3162, 0.4472],\n [0.9487, 0.8944]], dtype=oneflow.float32)\n >>> out = flow.nn.functional.normalize(x, 2, 1)\n >>> out\n tensor([[0.4472, 0.8944],\n [0.6000, 0.8000]], dtype=oneflow.float32)\n\n """\n )\n', (11424, 12905), False, 'from oneflow.framework.docstr.utils import add_docstr\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow as flow
import oneflow._oneflow_internal
from oneflow.python.nn.module import Module
from oneflow.python.oneflow_export import oneflow_export, experimental_api
from oneflow.python.framework.tensor import register_tensor_op
from typing import Optional
def _softmax_need_transpose(x, axis):
assert type(axis) is int
dim_num = len(x.shape)
assert dim_num >= 2
if axis < 0:
axis += dim_num
assert axis >= 0
assert axis < dim_num
need_transpose = False
permute = list(range(dim_num))
if axis != dim_num - 1:
need_transpose = True
permute[axis] = permute[-1]
permute[-1] = axis
return need_transpose, permute
@oneflow_export("nn.ReLU")
@experimental_api
class ReLU(Module):
r"""Applies the rectified linear unit function element-wise:
:math:`\text{ReLU}(x) = (x)^+ = \max(0, x)`
Args:
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
For example:
.. code-block:: python
>>> import oneflow.experimental as flow
>>> import numpy as np
>>> flow.enable_eager_execution()
>>> relu = flow.nn.ReLU()
>>> ndarr = np.asarray([1, -2, 3])
>>> x = flow.Tensor(ndarr)
>>> relu(x).numpy()
array([1., 0., 3.], dtype=float32)
"""
def __init__(self, inplace: bool = False):
super().__init__()
self._op = flow.builtin_op("relu").Input("in").Output("out").Build()
def forward(self, x):
res = self._op(x)[0]
return res
@oneflow_export("nn.ReLU6")
@experimental_api
class ReLU6(Module):
r"""Applies the element-wise function:
.. math::
\text{Relu6}(x) = \begin{cases}
6 & \text{ if } x > 6 \\
0 & \text{ if } x < 0 \\
x & \text{ otherwise } \\
\end{cases}
Args:
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow.experimental as flow
>>> flow.enable_eager_execution()
>>> x = np.array([-0.5, 0, 0.5]).astype(np.float32)
>>> input = flow.Tensor(x)
>>> relu6 = flow.nn.ReLU6()
>>> out = relu6(input).numpy()
>>> print(out)
[0. 0. 0.5]
"""
def __init__(self, inplace: bool = False):
super().__init__()
self._op = (
flow.builtin_op("hardtanh")
.Input("in")
.Attr("min_val", 0.0)
.Attr("max_val", 6.0)
.Output("out")
.Build()
)
def forward(self, x):
res = self._op(x)[0]
return res
@oneflow_export("nn.Tanh")
@experimental_api
class Tanh(Module):
r"""This operator computes the hyperbolic tangent value of Tensor.
The equation is:
.. math::
out = \frac{e^x-e^{-x}}{e^x+e^{-x}}
Args:
x (oneflow.Tensor): A Tensor
Returns:
oneflow.Tensor: The result Tensor
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow.experimental as flow
>>> flow.enable_eager_execution()
>>> x = np.array([-1, 0, 1]).astype(np.float32)
>>> input = flow.Tensor(x)
>>> tanh = flow.nn.Tanh()
>>> out = tanh(input).numpy()
>>> print(out)
[-0.7615942 0. 0.7615942]
"""
def __init__(self):
super().__init__()
self._op = flow.builtin_op("tanh").Input("x").Output("y").Build()
def forward(self, x):
res = self._op(x)[0]
return res
@oneflow_export("tanh")
@register_tensor_op("tanh")
@experimental_api
def tanh_op(x):
r"""This operator computes the hyperbolic tangent value of Tensor.
The equation is:
.. math::
out = \frac{e^x-e^{-x}}{e^x+e^{-x}}
Args:
x (oneflow.Tensor): A Tensor
Returns:
oneflow.Tensor: The result Tensor
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
x = np.array([-1, 0, 1]).astype(np.float32)
input = flow.Tensor(x)
tanh = flow.nn.Tanh()
out = tanh(input).numpy()
# out [-0.7615942 0. 0.7615942]
"""
return Tanh()(x)
@oneflow_export("nn.ELU")
@experimental_api
class ELU(Module):
r"""Applies the element-wise function:
.. math::
\text{ELU}(x) = \begin{cases}
x & \text{ if } x \gt 0 \\
\alpha*(exp(x)-1) & \text{ if } x \le 0 \\
\end{cases}
Args:
alpha: the :math:`\alpha` value for the ELU formulation. Default: 1.0
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow.experimental as flow
>>> flow.enable_eager_execution()
>>> x = np.array([-0.5, 0, 0.5]).astype(np.float32)
>>> input = flow.Tensor(x)
>>> elu = flow.nn.ELU()
>>> out = elu(input).numpy()
>>> print(out)
[-0.39346933 0. 0.5 ]
"""
def __init__(self, alpha: float = 1.0, inplace: bool = False):
super().__init__()
self._op = (
flow.builtin_op("elu")
.Input("in")
.Attr("alpha", alpha)
.Output("out")
.Build()
)
def forward(self, x):
res = self._op(x)[0]
return res
@oneflow_export("nn.GELU")
@experimental_api
class GELU(Module):
r"""Gelu activation operator.
The equation is:
.. math::
out = 0.5 * x * (1 + tanh(\sqrt{\frac{2}{\pi}} * (x + 0.044715x^{3})))
Args:
x (oneflow.Tensor): Input Tensor
Returns:
oneflow.Tensor: A Tensor.
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow.experimental as flow
>>> flow.enable_eager_execution()
>>> x = np.array([-0.5, 0, 0.5]).astype(np.float32)
>>> input = flow.Tensor(x)
>>> gelu = flow.nn.GELU()
>>> out = gelu(input).numpy()
>>> print(out)
[-0.15426877 0. 0.34573123]
"""
def __init__(self):
super().__init__()
self._op = flow.builtin_op("gelu").Input("in").Output("out").Build()
def forward(self, x):
res = self._op(x)[0]
return res
@oneflow_export("gelu")
@register_tensor_op("gelu")
@experimental_api
def gelu_op(x):
r"""Gelu activation operator.
The equation is:
.. math::
out = 0.5 * x * (1 + tanh(\sqrt{\frac{2}{\pi}} * (x + 0.044715x^{3})))
Args:
x (oneflow.Tensor): Input Tensor
Returns:
oneflow.Tensor: A Tensor.
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow.experimental as flow
>>> flow.enable_eager_execution()
>>> x = np.array([-0.5, 0, 0.5]).astype(np.float32)
>>> input = flow.Tensor(x)
>>> gelu = flow.nn.GELU()
>>> out = gelu(input).numpy()
>>> print(out)
[-0.15426877 0. 0.34573123]
"""
return GELU()(x)
@oneflow_export("nn.Sigmoid")
@experimental_api
class Sigmoid(Module):
r"""Applies the element-wise function:
.. math::
\text{Sigmoid}(x) = \sigma(x) = \frac{1}{1 + \exp(-x)}
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
For example:
.. code-block:: python
import oneflow.experimental as flow
import numpy as np
x = flow.Tensor(
np.array(
[
[0.81733328, 0.43621480, 0.10351428],
[-1.15555191, -0.67776406, 0.27372134],
]
)
)
m = flow.nn.Sigmoid() # or y = flow.sigmoid(x)
y = m(x)
# [[0.69366997, 0.60735673, 0.52585548],
# [0.23947647, 0.33676055, 0.56800622]]
"""
def __init__(self):
super().__init__()
self._op = flow.builtin_op("sigmoid").Input("in").Output("out").Build()
def forward(self, x):
return self._op(x)[0]
@oneflow_export("sigmoid")
@register_tensor_op("sigmoid")
@experimental_api
def sigmoid_op(x):
r"""Applies the element-wise function:
.. math::
\text{Sigmoid}(x) = \sigma(x) = \frac{1}{1 + \exp(-x)}
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
For example:
.. code-block:: python
import oneflow.experimental as flow
import numpy as np
x = flow.Tensor(
np.array(
[
[0.81733328, 0.43621480, 0.10351428],
[-1.15555191, -0.67776406, 0.27372134],
]
)
)
y = x.sigmoid()
# [[0.69366997, 0.60735673, 0.52585548],
# [0.23947647, 0.33676055, 0.56800622]]
"""
return Sigmoid()(x)
@oneflow_export("nn.Hardsigmoid")
@experimental_api
class Hardsigmoid(Module):
r"""Applies the element-wise function:
.. math::
\text{Hardsigmoid}(x) = \begin{cases}
0 & \text{ if } x \le -3 \\
1 & \text{ if } x \ge +3 \\
\frac{x}{6} + \frac{1}{2} & \text{ otherwise } \\
\end{cases}
Args:
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow.experimental as flow
>>> flow.enable_eager_execution()
>>> x = np.array([-0.5, 0, 0.5]).astype(np.float32)
>>> input = flow.Tensor(x)
>>> hardsigmoid = flow.nn.Hardsigmoid()
>>> out = hardsigmoid(input).numpy()
>>> print(out)
[0.41666666 0.5 0.5833333 ]
"""
def __init__(self, inplace: bool = False):
super().__init__()
self._op = flow.builtin_op("hardsigmoid").Input("in").Output("out").Build()
def forward(self, x):
res = self._op(x)[0]
return res
@oneflow_export("nn.Softmax")
@experimental_api
class Softmax(Module):
def __init__(self, dim: Optional[int] = None):
super().__init__()
self.axis = -1 if dim is None else dim
self._op = flow.builtin_op("softmax").Input("in").Output("out").Build()
self._transpose_op = (
flow.builtin_op("transpose")
.Input("input")
.Output("output")
.Attr("perm", [])
.Build()
)
def forward(self, x):
need_transpose, permute = _softmax_need_transpose(x, self.axis)
if need_transpose:
x = self._transpose_op(x, perm=permute)[0]
res = self._op(x)[0]
if need_transpose:
res = self._transpose_op(res, perm=permute)[0]
return res
@oneflow_export("softmax")
@register_tensor_op("softmax")
@experimental_api
def softmax_op(tensor, dim=None):
r"""Applies the Softmax function to an n-dimensional input Tensor
rescaling them so that the elements of the n-dimensional output Tensor
lie in the range [0,1] and sum to 1.
Softmax is defined as:
.. math::
\text{Softmax}(x_{i}) = \frac{\exp(x_i)}{\sum_j \exp(x_j)}
When the input Tensor is a sparse tensor then the unspecifed
values are treated as ``-inf``.
Shape:
- Input: :math:`(*)` where `*` means, any number of additional
dimensions
- Output: :math:`(*)`, same shape as the input
Returns:
a Tensor of the same dimension and shape as the input with
values in the range [0, 1]
Args:
dim (int): A dimension along which Softmax will be computed (so every slice
along dim will sum to 1).
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
m = flow.nn.Softmax(dim = 2)
x = flow.Tensor(
np.array(
[[[[-0.46716809, 0.40112534, 0.61984003],
[-1.31244969, -0.42528763, 1.47953856]]],
[[[ 1.02978742, -0.49383053, 1.88214159],
[ 1.35351622, -1.46251285, -1.40751374]]]]
)
)
y = m(x)
# [[[[0.6995764 0.6955959 0.29740235]
# [0.3004236 0.30440408 0.7025977 ]]]
# [[[0.4197673 0.7248568 0.96407217]
# [0.58023274 0.27514324 0.03592779]]]]
"""
return Softmax(dim)(tensor)
@oneflow_export("nn.LogSoftmax")
@experimental_api
class LogSoftmax(Module):
r"""Applies the :math:`\log(\text{Softmax}(x))` function to an n-dimensional
input Tensor.
The LogSoftmax formulation can be simplified as:
.. math::
\text{LogSoftmax}(x_{i}) = \log\left(\frac{\exp(x_i) }{ \sum_j \exp(x_j)} \right)
Args:
dim (int): A dimension along which LogSoftmax will be computed.
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
For example:
.. code-block:: python
import oneflow.experimental as flow
import numpy as np
m = flow.nn.LogSoftmax(dim=1)
x = flow.Tensor(
np.array(
[[ 0.4296, -1.1957, 2.5463],
[ 1.2552, -1.5747, 0.6923]]
)
)
y = m(x)
# [[-2.251349 -3.8766491 -0.13464898]
# [-0.48770458 -3.3176045 -1.0506046 ]]
"""
def __init__(
self, dim: Optional[int] = 1,
):
super().__init__()
self.dim = dim
self._op = (
flow.builtin_op("transpose")
.Input("input")
.Output("output")
.Attr("perm", [])
.Build()
)
def __setstate__(self, state):
self.__dict__.update(state)
if not hasattr(self, "dim"):
self.dim = None
def forward(self, x):
need_transpose, permute = _softmax_need_transpose(x, self.dim)
if need_transpose:
x = self._op(x, perm=permute)[0]
x = x.softmax()
res = x.log()
if need_transpose:
res = self._op(res, perm=permute)[0]
return res
def extra_repr(self):
return "dim={dim}".format(dim=self.dim)
@oneflow_export("nn.LogSigmoid")
@experimental_api
class LogSigmoid(Module):
r"""Applies the element-wise function:
.. math::
\text{LogSigmoid}(x) = \log\left(\frac{ 1 }{ 1 + \exp(-x)}\right)
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow.experimental as flow
>>> flow.enable_eager_execution()
>>> x = np.array([-0.5, 0, 0.5]).astype(np.float32)
>>> input = flow.Tensor(x)
>>> logsigmoid = flow.nn.LogSigmoid()
>>> out = logsigmoid(input).numpy()
>>> print(out)
[-0.974077 -0.6931472 -0.47407696]
"""
def __init__(self):
super().__init__()
def forward(self, x):
sigmoid_res = flow.experimental.sigmoid(x)
res = flow.experimental.log(sigmoid_res)
return res
@oneflow_export("nn.Softplus")
@experimental_api
class Softplus(Module):
r"""Applies the element-wise function:
.. math::
\text{Softplus}(x) = \frac{1}{\beta} * \log(1 + \exp(\beta * x))
SoftPlus is a smooth approximation to the ReLU function and can be used
to constrain the output of a machine to always be positive.
For numerical stability the implementation reverts to the linear function
when :math:`input \times \beta > threshold`.
Args:
beta: the :math:`\beta` value for the Softplus formulation. Default: 1
threshold: values above this revert to a linear function. Default: 20
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow.experimental as flow
>>> flow.enable_eager_execution()
>>> x = np.array([-0.5, 0, 0.5]).astype(np.float32)
>>> input = flow.Tensor(x)
>>> softplus = flow.nn.Softplus()
>>> out = softplus(input).numpy()
>>> print(out)
[0.474077 0.6931472 0.974077 ]
"""
def __init__(self, beta: int = 1, threshold: int = 20):
super().__init__()
self.beta = beta
self.threshold = threshold
def forward(self, x):
return flow.experimental.where(
x * self.beta > self.threshold,
x,
1
/ self.beta
* flow.experimental.log(1.0 + flow.experimental.exp(self.beta * x)),
)
@oneflow_export("nn.Hardswish")
@experimental_api
class Hardswish(Module):
r"""Applies the hardswish function, element-wise, as described in the paper:
`Searching for MobileNetV3`_.
.. math::
\text{Hardswish}(x) = \begin{cases}
0 & \text{ if } x \le -3 \\
x & \text{ if } x \ge +3 \\
x*(x+3)/6 & \text{ otherwise } \\
\end{cases}
Args:
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
.. code-block:: python
>>> import numpy as np
>>> import oneflow.experimental as flow
>>> flow.enable_eager_execution()
>>> x = np.array([-0.5, 0, 0.5]).astype(np.float32)
>>> input = flow.Tensor(x)
>>> hardswish = flow.nn.Hardswish()
>>> out = hardswish(input).numpy()
>>> print(out)
[-0.20833333 0. 0.29166666]
.. _`Searching for MobileNetV3`:
https://arxiv.org/abs/1905.02244
"""
def __init__(self, inplace: bool = False):
super().__init__()
self._op = flow.builtin_op("hardswish").Input("in").Output("out").Build()
def forward(self, x):
res = self._op(x)[0]
return res
@oneflow_export("nn.Hardtanh")
@experimental_api
class Hardtanh(Module):
r"""
Applies the HardTanh function element-wise
HardTanh is defined as:
.. math::
\text{HardTanh}(x) = \begin{cases}
1 & \text{ if } x > 1 \\
-1 & \text{ if } x < -1 \\
x & \text{ otherwise } \\
\end{cases}
The range of the linear region :math:`[-1, 1]` can be adjusted using
:attr:`min_val` and :attr:`max_val`.
Args:
min_val: minimum value of the linear region range. Default: -1
max_val: maximum value of the linear region range. Default: 1
inplace: can optionally do the operation in-place. Default: ``False``
Keyword arguments :attr:`min_value` and :attr:`max_value`
have been deprecated in favor of :attr:`min_val` and :attr:`max_val`.
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow.experimental as flow
>>> flow.enable_eager_execution()
>>> m = flow.nn.Hardtanh()
>>> arr = np.array([0.2, 0.3, 3.0, 4.0])
>>> x = flow.Tensor(arr)
>>> out = m(x).numpy()
>>> print(out)
[0.2 0.3 1. 1. ]
"""
def __init__(
self,
min_val: float = -1,
max_val: float = 1,
inplace: bool = False,
min_value: Optional[float] = None,
max_value: Optional[float] = None,
):
super().__init__()
if min_value is not None:
warnings.warn(
"keyword argument min_value is deprecated and rename to min_val"
)
min_val = min_value
if max_value is not None:
warnings.warn(
"keyword argument max_value is deprecated and rename to max_val"
)
max_val = max_value
self._op = (
flow.builtin_op("hardtanh")
.Input("in")
.Attr("min_val", min_val)
.Attr("max_val", max_val)
.Output("out")
.Build()
)
def forward(self, x):
res = self._op(x)[0]
return res
@oneflow_export("nn.LeakyReLU")
@experimental_api
class LeakyReLU(Module):
r"""Applies the element-wise function:
.. math::
\text{LeakyReLU}(x) = \max(0, x) + \text{negative_slope} * \min(0, x)
or
.. math::
\text{LeakyRELU}(x) = \begin{cases}
x, & \text{ if } x \geq 0 \\
\text{negative_slope} \times x, & \text{ otherwise }
\end{cases}
Args:
negative_slope: Controls the angle of the negative slope. Default: 1e-2
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow.experimental as flow
>>> flow.enable_eager_execution()
>>> m = flow.nn.LeakyReLU(0.1)
>>> arr = np.array([0.2, 0.3, 3.0, 4.0])
>>> x = flow.Tensor(arr)
>>> out = m(x).numpy()
>>> print(out)
[0.2 0.3 3. 4. ]
"""
def __init__(self, negative_slope: float = 1e-2, inplace: bool = False):
super().__init__()
self._op = (
flow.builtin_op("leaky_relu")
.Input("x")
.Attr("alpha", negative_slope)
.Output("y")
.Build()
)
def forward(self, x):
res = self._op(x)[0]
return res
if __name__ == "__main__":
import doctest
doctest.testmod()
| [
"oneflow.python.framework.tensor.register_tensor_op",
"oneflow.experimental.exp",
"oneflow.builtin_op",
"oneflow.experimental.sigmoid",
"oneflow.python.oneflow_export.oneflow_export",
"oneflow.experimental.log"
] | [((1286, 1311), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.ReLU"""'], {}), "('nn.ReLU')\n", (1300, 1311), False, 'from oneflow.python.oneflow_export import oneflow_export, experimental_api\n'), ((2308, 2334), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.ReLU6"""'], {}), "('nn.ReLU6')\n", (2322, 2334), False, 'from oneflow.python.oneflow_export import oneflow_export, experimental_api\n'), ((3618, 3643), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.Tanh"""'], {}), "('nn.Tanh')\n", (3632, 3643), False, 'from oneflow.python.oneflow_export import oneflow_export, experimental_api\n'), ((4550, 4572), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""tanh"""'], {}), "('tanh')\n", (4564, 4572), False, 'from oneflow.python.oneflow_export import oneflow_export, experimental_api\n'), ((4574, 4600), 'oneflow.python.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""tanh"""'], {}), "('tanh')\n", (4592, 4600), False, 'from oneflow.python.framework.tensor import register_tensor_op\n'), ((5228, 5252), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.ELU"""'], {}), "('nn.ELU')\n", (5242, 5252), False, 'from oneflow.python.oneflow_export import oneflow_export, experimental_api\n'), ((6591, 6616), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.GELU"""'], {}), "('nn.GELU')\n", (6605, 6616), False, 'from oneflow.python.oneflow_export import oneflow_export, experimental_api\n'), ((7527, 7549), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""gelu"""'], {}), "('gelu')\n", (7541, 7549), False, 'from oneflow.python.oneflow_export import oneflow_export, experimental_api\n'), ((7551, 7577), 'oneflow.python.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""gelu"""'], {}), "('gelu')\n", (7569, 7577), False, 'from oneflow.python.framework.tensor import register_tensor_op\n'), ((8301, 8329), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.Sigmoid"""'], {}), "('nn.Sigmoid')\n", (8315, 8329), False, 'from oneflow.python.oneflow_export import oneflow_export, experimental_api\n'), ((9371, 9396), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""sigmoid"""'], {}), "('sigmoid')\n", (9385, 9396), False, 'from oneflow.python.oneflow_export import oneflow_export, experimental_api\n'), ((9398, 9427), 'oneflow.python.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""sigmoid"""'], {}), "('sigmoid')\n", (9416, 9427), False, 'from oneflow.python.framework.tensor import register_tensor_op\n'), ((10252, 10284), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.Hardsigmoid"""'], {}), "('nn.Hardsigmoid')\n", (10266, 10284), False, 'from oneflow.python.oneflow_export import oneflow_export, experimental_api\n'), ((11526, 11554), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.Softmax"""'], {}), "('nn.Softmax')\n", (11540, 11554), False, 'from oneflow.python.oneflow_export import oneflow_export, experimental_api\n'), ((12311, 12336), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""softmax"""'], {}), "('softmax')\n", (12325, 12336), False, 'from oneflow.python.oneflow_export import oneflow_export, experimental_api\n'), ((12338, 12367), 'oneflow.python.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""softmax"""'], {}), "('softmax')\n", (12356, 12367), False, 'from oneflow.python.framework.tensor import register_tensor_op\n'), ((13929, 13960), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.LogSoftmax"""'], {}), "('nn.LogSoftmax')\n", (13943, 13960), False, 'from oneflow.python.oneflow_export import oneflow_export, experimental_api\n'), ((15776, 15807), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.LogSigmoid"""'], {}), "('nn.LogSigmoid')\n", (15790, 15807), False, 'from oneflow.python.oneflow_export import oneflow_export, experimental_api\n'), ((16784, 16813), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.Softplus"""'], {}), "('nn.Softplus')\n", (16798, 16813), False, 'from oneflow.python.oneflow_export import oneflow_export, experimental_api\n'), ((18415, 18445), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.Hardswish"""'], {}), "('nn.Hardswish')\n", (18429, 18445), False, 'from oneflow.python.oneflow_export import oneflow_export, experimental_api\n'), ((19793, 19822), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.Hardtanh"""'], {}), "('nn.Hardtanh')\n", (19807, 19822), False, 'from oneflow.python.oneflow_export import oneflow_export, experimental_api\n'), ((22088, 22118), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.LeakyReLU"""'], {}), "('nn.LeakyReLU')\n", (22102, 22118), False, 'from oneflow.python.oneflow_export import oneflow_export, experimental_api\n'), ((23622, 23639), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (23637, 23639), False, 'import doctest\n'), ((16684, 16712), 'oneflow.experimental.sigmoid', 'flow.experimental.sigmoid', (['x'], {}), '(x)\n', (16709, 16712), True, 'import oneflow as flow\n'), ((16727, 16761), 'oneflow.experimental.log', 'flow.experimental.log', (['sigmoid_res'], {}), '(sigmoid_res)\n', (16748, 16761), True, 'import oneflow as flow\n'), ((18363, 18399), 'oneflow.experimental.exp', 'flow.experimental.exp', (['(self.beta * x)'], {}), '(self.beta * x)\n', (18384, 18399), True, 'import oneflow as flow\n'), ((2172, 2195), 'oneflow.builtin_op', 'flow.builtin_op', (['"""relu"""'], {}), "('relu')\n", (2187, 2195), True, 'import oneflow as flow\n'), ((4417, 4440), 'oneflow.builtin_op', 'flow.builtin_op', (['"""tanh"""'], {}), "('tanh')\n", (4432, 4440), True, 'import oneflow as flow\n'), ((7391, 7414), 'oneflow.builtin_op', 'flow.builtin_op', (['"""gelu"""'], {}), "('gelu')\n", (7406, 7414), True, 'import oneflow as flow\n'), ((9250, 9276), 'oneflow.builtin_op', 'flow.builtin_op', (['"""sigmoid"""'], {}), "('sigmoid')\n", (9265, 9276), True, 'import oneflow as flow\n'), ((11383, 11413), 'oneflow.builtin_op', 'flow.builtin_op', (['"""hardsigmoid"""'], {}), "('hardsigmoid')\n", (11398, 11413), True, 'import oneflow as flow\n'), ((11740, 11766), 'oneflow.builtin_op', 'flow.builtin_op', (['"""softmax"""'], {}), "('softmax')\n", (11755, 11766), True, 'import oneflow as flow\n'), ((19652, 19680), 'oneflow.builtin_op', 'flow.builtin_op', (['"""hardswish"""'], {}), "('hardswish')\n", (19667, 19680), True, 'import oneflow as flow\n'), ((6373, 6395), 'oneflow.builtin_op', 'flow.builtin_op', (['"""elu"""'], {}), "('elu')\n", (6388, 6395), True, 'import oneflow as flow\n'), ((11844, 11872), 'oneflow.builtin_op', 'flow.builtin_op', (['"""transpose"""'], {}), "('transpose')\n", (11859, 11872), True, 'import oneflow as flow\n'), ((15099, 15127), 'oneflow.builtin_op', 'flow.builtin_op', (['"""transpose"""'], {}), "('transpose')\n", (15114, 15127), True, 'import oneflow as flow\n'), ((23341, 23370), 'oneflow.builtin_op', 'flow.builtin_op', (['"""leaky_relu"""'], {}), "('leaky_relu')\n", (23356, 23370), True, 'import oneflow as flow\n'), ((3361, 3388), 'oneflow.builtin_op', 'flow.builtin_op', (['"""hardtanh"""'], {}), "('hardtanh')\n", (3376, 3388), True, 'import oneflow as flow\n'), ((21823, 21850), 'oneflow.builtin_op', 'flow.builtin_op', (['"""hardtanh"""'], {}), "('hardtanh')\n", (21838, 21850), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import unittest
import numpy as np
import oneflow as flow
import oneflow.unittest
class CrossEntropyModule(flow.nn.Module):
def __init__(self, pred):
super().__init__()
if pred.is_global:
self.param = flow.nn.Parameter(
flow.zeros(
*pred.shape,
dtype=pred.dtype,
placement=pred.placement,
sbp=pred.sbp,
)
)
else:
self.param = flow.nn.Parameter(
flow.zeros(*pred.shape, dtype=pred.dtype, device=pred.device)
)
def forward(self, pred, label):
pred = pred + self.param
loss = flow._C.sparse_softmax_cross_entropy(pred, label)
return loss.mean()
class CrossEntropyGraph(flow.nn.Graph):
def __init__(self, module):
super().__init__()
self.m = module
self.add_optimizer(flow.optim.SGD([module.param], lr=1.0, momentum=0.0))
def build(self, pred, label):
loss = self.m(pred, label)
loss.backward()
return loss
def _compare_with_nn_cross_entropy_loss(
test_case, pred, label, pred_sbp=None, label_sbp=None
):
if pred.is_global:
assert label.is_global
pred_ = pred.to_local().detach().clone()
label_ = label.to_local()
else:
pred_ = pred.detach().clone()
label_ = label
pred_.requires_grad = True
cross_entropy_loss = flow.nn.CrossEntropyLoss()
loss = cross_entropy_loss(pred_, label_)
loss.backward()
if pred_sbp is not None:
pred = pred.to_global(sbp=pred_sbp)
if label_sbp is not None:
label = label.to_global(sbp=label_sbp)
cross_entropy_module = CrossEntropyModule(pred)
cross_entropy_graph = CrossEntropyGraph(cross_entropy_module)
graph_loss = cross_entropy_graph(pred, label)
loss_a = loss.numpy()
grad_a = pred_.grad.numpy()
if graph_loss.is_local:
loss_b = graph_loss.numpy()
grad_b = -cross_entropy_module.param.numpy()
else:
graph_loss = graph_loss.to_global(
sbp=[flow.sbp.broadcast()] * len(graph_loss.sbp)
)
loss_b = graph_loss.to_local().numpy()
pred_grad = cross_entropy_module.param.to_global(
sbp=[flow.sbp.broadcast()] * len(cross_entropy_module.param.sbp)
)
grad_b = -pred_grad.to_local().numpy()
test_case.assertTrue(np.allclose(loss_a, loss_b), f"{loss_a} vs. {loss_b}")
test_case.assertTrue(np.allclose(grad_a, grad_b), f"\n{grad_a}\nvs.\n{grad_b}")
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
class TestSparseSoftmaxCrossEntropyGraph(oneflow.unittest.TestCase):
@flow.unittest.skip_unless_1n1d()
def test_local(test_case):
pred = flow.randn(8, 10).to("cuda")
label = flow.randint(0, 10, (8,)).to("cuda")
_compare_with_nn_cross_entropy_loss(test_case, pred, label)
@flow.unittest.skip_unless_1n2d()
def test_data_split(test_case):
pred = flow.randn(8, 10)
label = flow.randint(0, 10, (8,))
placement = flow.placement("cuda", list(range(flow.env.get_world_size())))
pred = pred.to_global(placement=placement, sbp=flow.sbp.broadcast())
label = label.to_global(placement=placement, sbp=flow.sbp.broadcast())
_compare_with_nn_cross_entropy_loss(
test_case, pred, label, flow.sbp.split(0), flow.sbp.split(0)
)
@flow.unittest.skip_unless_1n2d()
def test_model_split(test_case):
pred = flow.randn(8, 10)
label = flow.randint(0, 10, (8,))
placement = flow.placement("cuda", list(range(flow.env.get_world_size())))
pred = pred.to_global(placement=placement, sbp=flow.sbp.broadcast())
label = label.to_global(placement=placement, sbp=flow.sbp.broadcast())
_compare_with_nn_cross_entropy_loss(
test_case, pred, label, flow.sbp.split(1), flow.sbp.broadcast()
)
@flow.unittest.skip_unless_1n4d()
def test_2d_split(test_case):
pred = flow.randn(8, 10)
label = flow.randint(0, 10, (8,))
placement = flow.placement(
"cuda", np.array(range(flow.env.get_world_size())).reshape(2, 2)
)
pred = pred.to_global(
placement=placement, sbp=[flow.sbp.broadcast(), flow.sbp.broadcast()]
)
label = label.to_global(
placement=placement, sbp=[flow.sbp.broadcast(), flow.sbp.broadcast()]
)
_compare_with_nn_cross_entropy_loss(
test_case,
pred,
label,
[flow.sbp.split(0), flow.sbp.split(1)],
[flow.sbp.split(0), flow.sbp.broadcast()],
)
if __name__ == "__main__":
unittest.main()
| [
"oneflow._C.sparse_softmax_cross_entropy",
"oneflow.env.get_world_size",
"oneflow.sbp.broadcast",
"oneflow.unittest.skip_unless_1n4d",
"oneflow.sbp.split",
"oneflow.optim.SGD",
"oneflow.unittest.skip_unless_1n2d",
"oneflow.nn.CrossEntropyLoss",
"oneflow.zeros",
"oneflow.randn",
"oneflow.unittest... | [((2066, 2092), 'oneflow.nn.CrossEntropyLoss', 'flow.nn.CrossEntropyLoss', ([], {}), '()\n', (2090, 2092), True, 'import oneflow as flow\n'), ((3335, 3367), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (3365, 3367), True, 'import oneflow as flow\n'), ((3570, 3602), 'oneflow.unittest.skip_unless_1n2d', 'flow.unittest.skip_unless_1n2d', ([], {}), '()\n', (3600, 3602), True, 'import oneflow as flow\n'), ((4087, 4119), 'oneflow.unittest.skip_unless_1n2d', 'flow.unittest.skip_unless_1n2d', ([], {}), '()\n', (4117, 4119), True, 'import oneflow as flow\n'), ((4608, 4640), 'oneflow.unittest.skip_unless_1n4d', 'flow.unittest.skip_unless_1n4d', ([], {}), '()\n', (4638, 4640), True, 'import oneflow as flow\n'), ((3202, 3236), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (3211, 3236), False, 'import os\n'), ((5376, 5391), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5389, 5391), False, 'import unittest\n'), ((1300, 1349), 'oneflow._C.sparse_softmax_cross_entropy', 'flow._C.sparse_softmax_cross_entropy', (['pred', 'label'], {}), '(pred, label)\n', (1336, 1349), True, 'import oneflow as flow\n'), ((3044, 3071), 'numpy.allclose', 'np.allclose', (['loss_a', 'loss_b'], {}), '(loss_a, loss_b)\n', (3055, 3071), True, 'import numpy as np\n'), ((3124, 3151), 'numpy.allclose', 'np.allclose', (['grad_a', 'grad_b'], {}), '(grad_a, grad_b)\n', (3135, 3151), True, 'import numpy as np\n'), ((3654, 3671), 'oneflow.randn', 'flow.randn', (['(8)', '(10)'], {}), '(8, 10)\n', (3664, 3671), True, 'import oneflow as flow\n'), ((3688, 3713), 'oneflow.randint', 'flow.randint', (['(0)', '(10)', '(8,)'], {}), '(0, 10, (8,))\n', (3700, 3713), True, 'import oneflow as flow\n'), ((4172, 4189), 'oneflow.randn', 'flow.randn', (['(8)', '(10)'], {}), '(8, 10)\n', (4182, 4189), True, 'import oneflow as flow\n'), ((4206, 4231), 'oneflow.randint', 'flow.randint', (['(0)', '(10)', '(8,)'], {}), '(0, 10, (8,))\n', (4218, 4231), True, 'import oneflow as flow\n'), ((4690, 4707), 'oneflow.randn', 'flow.randn', (['(8)', '(10)'], {}), '(8, 10)\n', (4700, 4707), True, 'import oneflow as flow\n'), ((4724, 4749), 'oneflow.randint', 'flow.randint', (['(0)', '(10)', '(8,)'], {}), '(0, 10, (8,))\n', (4736, 4749), True, 'import oneflow as flow\n'), ((1529, 1581), 'oneflow.optim.SGD', 'flow.optim.SGD', (['[module.param]'], {'lr': '(1.0)', 'momentum': '(0.0)'}), '([module.param], lr=1.0, momentum=0.0)\n', (1543, 1581), True, 'import oneflow as flow\n'), ((4034, 4051), 'oneflow.sbp.split', 'flow.sbp.split', (['(0)'], {}), '(0)\n', (4048, 4051), True, 'import oneflow as flow\n'), ((4053, 4070), 'oneflow.sbp.split', 'flow.sbp.split', (['(0)'], {}), '(0)\n', (4067, 4070), True, 'import oneflow as flow\n'), ((4552, 4569), 'oneflow.sbp.split', 'flow.sbp.split', (['(1)'], {}), '(1)\n', (4566, 4569), True, 'import oneflow as flow\n'), ((4571, 4591), 'oneflow.sbp.broadcast', 'flow.sbp.broadcast', ([], {}), '()\n', (4589, 4591), True, 'import oneflow as flow\n'), ((870, 956), 'oneflow.zeros', 'flow.zeros', (['*pred.shape'], {'dtype': 'pred.dtype', 'placement': 'pred.placement', 'sbp': 'pred.sbp'}), '(*pred.shape, dtype=pred.dtype, placement=pred.placement, sbp=\n pred.sbp)\n', (880, 956), True, 'import oneflow as flow\n'), ((1139, 1200), 'oneflow.zeros', 'flow.zeros', (['*pred.shape'], {'dtype': 'pred.dtype', 'device': 'pred.device'}), '(*pred.shape, dtype=pred.dtype, device=pred.device)\n', (1149, 1200), True, 'import oneflow as flow\n'), ((3414, 3431), 'oneflow.randn', 'flow.randn', (['(8)', '(10)'], {}), '(8, 10)\n', (3424, 3431), True, 'import oneflow as flow\n'), ((3459, 3484), 'oneflow.randint', 'flow.randint', (['(0)', '(10)', '(8,)'], {}), '(0, 10, (8,))\n', (3471, 3484), True, 'import oneflow as flow\n'), ((3852, 3872), 'oneflow.sbp.broadcast', 'flow.sbp.broadcast', ([], {}), '()\n', (3870, 3872), True, 'import oneflow as flow\n'), ((3931, 3951), 'oneflow.sbp.broadcast', 'flow.sbp.broadcast', ([], {}), '()\n', (3949, 3951), True, 'import oneflow as flow\n'), ((4370, 4390), 'oneflow.sbp.broadcast', 'flow.sbp.broadcast', ([], {}), '()\n', (4388, 4390), True, 'import oneflow as flow\n'), ((4449, 4469), 'oneflow.sbp.broadcast', 'flow.sbp.broadcast', ([], {}), '()\n', (4467, 4469), True, 'import oneflow as flow\n'), ((5239, 5256), 'oneflow.sbp.split', 'flow.sbp.split', (['(0)'], {}), '(0)\n', (5253, 5256), True, 'import oneflow as flow\n'), ((5258, 5275), 'oneflow.sbp.split', 'flow.sbp.split', (['(1)'], {}), '(1)\n', (5272, 5275), True, 'import oneflow as flow\n'), ((5291, 5308), 'oneflow.sbp.split', 'flow.sbp.split', (['(0)'], {}), '(0)\n', (5305, 5308), True, 'import oneflow as flow\n'), ((5310, 5330), 'oneflow.sbp.broadcast', 'flow.sbp.broadcast', ([], {}), '()\n', (5328, 5330), True, 'import oneflow as flow\n'), ((3768, 3793), 'oneflow.env.get_world_size', 'flow.env.get_world_size', ([], {}), '()\n', (3791, 3793), True, 'import oneflow as flow\n'), ((4286, 4311), 'oneflow.env.get_world_size', 'flow.env.get_world_size', ([], {}), '()\n', (4309, 4311), True, 'import oneflow as flow\n'), ((4942, 4962), 'oneflow.sbp.broadcast', 'flow.sbp.broadcast', ([], {}), '()\n', (4960, 4962), True, 'import oneflow as flow\n'), ((4964, 4984), 'oneflow.sbp.broadcast', 'flow.sbp.broadcast', ([], {}), '()\n', (4982, 4984), True, 'import oneflow as flow\n'), ((5067, 5087), 'oneflow.sbp.broadcast', 'flow.sbp.broadcast', ([], {}), '()\n', (5085, 5087), True, 'import oneflow as flow\n'), ((5089, 5109), 'oneflow.sbp.broadcast', 'flow.sbp.broadcast', ([], {}), '()\n', (5107, 5109), True, 'import oneflow as flow\n'), ((2725, 2745), 'oneflow.sbp.broadcast', 'flow.sbp.broadcast', ([], {}), '()\n', (2743, 2745), True, 'import oneflow as flow\n'), ((2901, 2921), 'oneflow.sbp.broadcast', 'flow.sbp.broadcast', ([], {}), '()\n', (2919, 2921), True, 'import oneflow as flow\n'), ((4821, 4846), 'oneflow.env.get_world_size', 'flow.env.get_world_size', ([], {}), '()\n', (4844, 4846), True, 'import oneflow as flow\n')] |
import math
# import torch
# import torch.nn as nn
# import torch.nn.functional as F
import oneflow as of
import oneflow.nn as nn
import oneflow.nn.functional as F
# from libs.components import attention
class TAP(nn.Module):
def __init__(self):
super(TAP, self).__init__()
def forward(self, feature_map):
'''
params:
feature_map: (B x C x T)
returns:
embedding: (B x C)
'''
mean = of.mean(feature_map, dim = 2)
return mean
class GAP(nn.Module):
def __init__(self, output_size):
super(GAP, self).__init__()
assert (type(output_size) == int or len(output_size) >= 1), 'output_size must be int or list (tuple) type'
self.global_average_pooling = nn.AdaptiveAvgPool2d(output_size)
def forward(self, feature_map):
'''
params:
feature_map: (B x C x F x T)
returns:
embedding: (B x C)
'''
feature_map = self.global_average_pooling(feature_map)
return feature_map
class STAT(nn.Module):
"""
Mean and Standard deviation pooling
"""
def __init__(self):
super(STAT, self).__init__()
pass
def forward(self, feature_map):
'''
params:
feature_map: (B x C x T)
returns:
embedding: (B x C)
'''
mean = of.mean(feature_map, dim=2, keepdim = True)
std = of.std(feature_map, dim=2, keepdim = True)
return of.cat([mean, std], dim=1)
class MultiHeadFFA(nn.Module):
def __init__(self, input_dim, hidden_dim):
super(MultiHeadFFA, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
def forward(self, feature_map):
'''
params:
feature_map: (B x C x T)
returns:
embeddings: (B x C)
'''
pass
class AttentiveStatPooling(nn.Module):
def __init__(self, hidden_size, input_size = 1, hidden_layer = True, bias = True):
super(AttentiveStatPooling, self).__init__()
self.hidden_size = hidden_size
self.input_size = input_size
self.activation = nn.ReLU()
self.hidden_layer = hidden_layer
self.bias = bias
if hidden_layer:
self.W = nn.Parameter(of.Tensor(hidden_size, input_size), requires_grad = True)
if bias:
# self.b = nn.Parameter(torch.Tensor(1, hidden_size), requires_grad = True)
self.b = nn.Parameter(of.Tensor(hidden_size, 1), requires_grad = True)
self.v = nn.Parameter(of.Tensor(hidden_size, 1), requires_grad = True)
if bias:
self.k = nn.Parameter(of.Tensor(1, 1), requires_grad = True)
self._initialize_parameters()
def _initialize_parameters(self):
for parameter in self.parameters():
nn.init.kaiming_normal_(parameter)
def get_attention(self, x):
'''
params:
x: (B, C, T)
return:
alpha: (B, 1, T)
'''
if self.hidden_layer:
if self.bias:
hidden_mat = self.W.matmul(x) + self.b # B, H, T
else:
hidden_mat = self.W.matmul(x)
x = self.activation(hidden_mat)
if self.bias:
e = self.v.T.matmul(x) + self.k # B, 1, T
else:
e = self.v.T.matmul(x)
e = e.squeeze(1) # B, T
alpha = F.softmax(of.tanh(e), dim = 1) # B, T, torch.tanh is a key operation to prevent gradient vanish
# alpha = F.softmax(e, dim = 1) # B, T, torch.tanh is a key operation to prevent gradient vanish
return alpha.unsqueeze(-1)
def forward(self, x):
'''
params:
x: (B, C, T)
return:
attention_embedding: (B, C * 2)
'''
alpha = self.get_attention(x) # B, T, 1
attention_mean = x.matmul(alpha).squeeze(-1)
attention_std = of.sqrt(((x - attention_mean.unsqueeze(-1)) ** 2).matmul(alpha)).squeeze(-1)
attention_embedding = of.cat([attention_mean, attention_std], dim = 1)
return attention_embedding#.unsqueeze(-1)
# class AttentiveStatisticsPooling(nn.Module):
# """ An attentive statistics pooling.
# Reference: Okabe, Koji, <NAME>, and <NAME>. 2018. "Attentive Statistics Pooling
# for Deep Speaker Embedding." ArXiv Preprint ArXiv:1803.10963.
# """
# def __init__(self, input_dim, affine_layers=2, hidden_size=64, context=[0], stddev=True, stddev_attention=True, eps=1.0e-10):
# super(AttentiveStatisticsPooling, self).__init__()
#
# self.stddev = stddev
# self.input_dim = input_dim
#
# if self.stddev :
# self.output_dim = 2 * input_dim
# else :
# self.output_dim = input_dim
#
# self.eps = eps
# self.stddev_attention = stddev_attention
#
# self.attention = attention.AttentionAlphaComponent(input_dim, num_head=1, share=True, affine_layers=affine_layers,
# hidden_size=hidden_size, context=context)
#
# def forward(self, inputs):
# """
# @inputs: a 3-dimensional tensor (a batch), including [samples-index, frames-dim-index, frames-index]
# """
# assert len(inputs.shape) == 3
# assert inputs.shape[1] == self.input_dim
#
# alpha = self.attention(inputs)
#
# # Weight avarage
# mean = torch.sum(alpha * inputs, dim=2, keepdim=True)
#
# if self.stddev :
# if self.stddev_attention:
# var = torch.sum(alpha * inputs**2, dim=2, keepdim=True) - mean**2
# std = torch.sqrt(var.clamp(min=self.eps))
# else:
# var = torch.mean((inputs - mean)**2, dim=2, keepdim=True)
# std = torch.sqrt(var.clamp(min=self.eps))
# return torch.cat((mean, std), dim=1)
# else :
# return mean
#
# def get_output_dim(self):
# return self.output_dim
#
# class LDEPooling(torch.nn.Module):
# """A novel learnable dictionary encoding layer.
# Reference: <NAME>, etc., "A NOVEL LEARNABLE DICTIONARY ENCODING LAYER FOR END-TO-END
# LANGUAGE IDENTIFICATION", icassp, 2018
# """
# def __init__(self, input_dim, c_num=64, eps=1.0e-10):
# super(LDEPooling, self).__init__()
#
# self.input_dim = input_dim
# self.output_dim = input_dim * c_num
# self.eps = eps
#
# self.mu = torch.nn.Parameter(torch.randn(input_dim, c_num))
# self.s = torch.nn.Parameter(torch.ones(c_num))
#
# self.softmax_for_w = torch.nn.Softmax(dim=3)
#
# def forward(self, inputs):
# """
# @inputs: a 3-dimensional tensor (a batch), including [samples-index, frames-dim-index, frames-index]
# """
# assert len(inputs.shape) == 3
# assert inputs.shape[1] == self.input_dim
#
# r = inputs.transpose(1,2).unsqueeze(3) - self.mu
# # Make sure beta=self.s**2+self.eps > 0
# w = self.softmax_for_w(- (self.s**2 + self.eps) * torch.sum(r**2, dim=2, keepdim=True))
# e = torch.mean(w * r, dim=1)
#
# return e.reshape(-1, self.output_dim, 1)
#
# def get_output_dim(self):
# return self.output_dim
#
# class MultiHeadAttentionPooling(torch.nn.Module):
# """Implement multi-head attention pooling based on AttentionAlphaComponent.
# Reference: Safari, Pooyan, and <NAME>. 2019. “Self Multi-Head Attention for Speaker
# Recognition.” ArXiv Preprint ArXiv:1906.09890.
# Note, in this paper, affine_layers is default to 1, and final_dim is 1 which means the weights are shared.
# """
# def __init__(self, input_dim, stddev=True, stddev_attention=True, num_head=4, share=True, affine_layers=1, **options):
# super(MultiHeadAttentionPooling, self).__init__()
#
# self.input_dim = input_dim
# self.stddev = stddev
# self.stddev_attention = stddev_attention
# self.num_head = num_head
#
# if self.stddev :
# self.output_dim = 2 * input_dim
# else :
# self.output_dim = input_dim
#
# if "split_input" in options.keys():
# if not options["split_input"]:
# raise ValueError("split_input==False is not valid for this MultiHeadAttentionPooling.")
# options.pop("split_input")
#
# # In this pooling, the special point is that inputs will be splited.
# self.attention = attention.AttentionAlphaComponent(input_dim, num_head=num_head, split_input=True, share=share,
# affine_layers=affine_layers, bias=False, **options)
#
# def forward(self, inputs):
# """
# @inputs: a 3-dimensional tensor (a batch), including [samples-index, frames-dim-index, frames-index]
# """
# assert len(inputs.shape) == 3
# assert inputs.shape[1] == self.input_dim
#
# batch_size = inputs.shape[0]
# chunk_size = inputs.shape[2] # a.k.a total frames
#
# # alpha: [batch, weight, frames]
# # When using the conv1d to implement the multi-multiple of multi-head, we can get
# # the weight distribution of multi-head: [h11, h12, h13, h21, h22, h23, ..., hn1, hn2, ...]
# # So, just reshape it to split different heads.
# alpha = self.attention(inputs)
#
# # In sharing weight case, the shape of alpha is [batch, head, 1, frames] and [batch, head, splited-features, frames]
# # for another case.
# # inputs: [batch, head, splited-features, frames]
# after_mul = alpha.reshape(batch_size, self.num_head, -1, chunk_size) * \
# inputs.reshape(batch_size, self.num_head, -1, chunk_size)
#
# # After multi-multipling alpha and inputs for multi-head case, the mean could be got by reshaping back.
# mean = torch.sum(after_mul.reshape(batch_size, -1, chunk_size), dim=2, keepdim=True)
#
# if self.stddev :
# if self.stddev_attention:
# after_mul_2 = alpha.reshape(batch_size, self.num_head, -1, chunk_size) * \
# inputs.reshape(batch_size, self.num_head, -1, chunk_size)**2
# var = torch.sum(after_mul_2.reshape(batch_size, -1, chunk_size), dim=2, keepdim=True) - mean**2
# std = torch.sqrt(var.clamp(min=1.0e-10))
# else:
# var = torch.mean((inputs - mean)**2, dim=2, keepdim=True)
# std = torch.sqrt(var.clamp(min=1.0e-10))
# return torch.cat((mean, std), dim=1)
# else :
# return mean
#
# def get_output_dim(self):
# return self.output_dim
#
#
# class GlobalMultiHeadAttentionPooling(torch.nn.Module):
# """Implement global multi-head attention pooling based on AttentionAlphaComponent.
# Reference: <NAME>, <NAME>, <NAME>, <NAME>. "MULTI-RESOLUTION MULTI-HEAD
# ATTENTION IN DEEP SPEAKER EMBEDDING." ICASSP, 2020.
# It is not equivalent to multi-head attention pooling even when
# input_dim of global multi-head = 1/num_head * input_dim of multi-head.
# """
# def __init__(self, input_dim, stddev=True, stddev_attention=True, num_head=4, share=True, affine_layers=2, **options):
# super(GlobalMultiHeadAttentionPooling, self).__init__()
#
# self.input_dim = input_dim
# self.num_head = num_head
# self.stddev = stddev
# self.stddev_attention = stddev_attention
#
# if self.stddev :
# self.output_dim = 2 * input_dim
# else :
# self.output_dim = input_dim
#
# if "split_input" in options.keys():
# if options["split_input"]:
# raise ValueError("split_input==True is not valid for GlobalMultiHeadAttentionPooling.")
# options.pop("split_input")
# if "temperature" in options.keys():
# if options["temperature"]:
# raise ValueError("temperature==True is not valid for GlobalMultiHeadAttentionPooling.")
# options.pop("temperature")
#
# # In this pooling, the special point is that all (global) features of inputs will be used.
# self.attention = attention.AttentionAlphaComponent(input_dim, num_head=num_head, split_input=False, share=share,
# temperature=False, affine_layers=affine_layers, bias=True, **options)
#
# def forward(self, inputs):
# """
# @inputs: a 3-dimensional tensor (a batch), including [samples-index, frames-dim-index, frames-index]
# """
# assert len(inputs.shape) == 3
# assert inputs.shape[1] == self.input_dim
#
# batch_size = inputs.shape[0]
# chunk_size = inputs.shape[2] # a.k.a total frames
#
# # alpha: [batch, weight, frames]
# # When using the conv1d to implement the multi-multiple of multi-head, we can get
# # the weight distribution of multi-head: [h11, h12, h13, h21, h22, h23, ..., hn1, hn2, ...]
# # So, just reshape it to split different heads.
# alpha = self.attention(inputs)
#
# # In sharing weight case, the shape of alpha is [batch, head, 1, frames] and [batch, head, all-features, frames]
# # for another case.
# # inputs: [batch, 1, all-features, frames]
# after_mul = alpha.reshape(batch_size, self.num_head, -1, chunk_size) * \
# inputs.reshape(batch_size, 1, -1, chunk_size)
#
# # After multi-multipling alpha and inputs for multi-head case, the mean could be got by reshaping back.
# mean = torch.sum(after_mul.reshape(batch_size, -1, chunk_size), dim=2, keepdim=True)
#
# if self.stddev :
# if self.stddev_attention:
# after_mul_2 = alpha.reshape(batch_size, self.num_head, -1, chunk_size) * \
# inputs.reshape(batch_size, 1, -1, chunk_size)**2
# var = torch.sum(after_mul_2.reshape(batch_size, -1, chunk_size), dim=2, keepdim=True) - mean**2
# std = torch.sqrt(var.clamp(min=1.0e-10))
# else:
# var = torch.mean((inputs - mean)**2, dim=2, keepdim=True)
# std = torch.sqrt(var.clamp(min=1.0e-10))
# return torch.cat((mean, std), dim=1)
# else :
# return mean
#
# def get_output_dim(self):
# return self.output_dim * self.num_head
#
#
# class MultiResolutionMultiHeadAttentionPooling(torch.nn.Module):
# """Implement multi-resolution global multi-head attention pooling based on AttentionAlphaComponent.
# Reference: <NAME>, <NAME>, <NAME>, <NAME>. "MULTI-RESOLUTION MULTI-HEAD
# ATTENTION IN DEEP SPEAKER EMBEDDING." ICASSP, 2020.
# """
# def __init__(self, input_dim, stddev=True, stddev_attention=True, num_head=4, share=True, affine_layers=2, **options):
# super(MultiResolutionMultiHeadAttentionPooling, self).__init__()
#
# self.input_dim = input_dim
# self.num_head = num_head
# self.stddev = stddev
# self.stddev_attention = stddev_attention
#
# if self.stddev :
# self.output_dim = 2 * input_dim
# else :
# self.output_dim = input_dim
#
# if "split_input" in options.keys():
# if options["split_input"]:
# raise ValueError("split_input==True is not valid for MultiResolutionMultiHeadAttentionPooling.")
# options.pop("split_input")
# if "temperature" in options.keys():
# if not options["temperature"]:
# raise ValueError("temperature==False is not valid for MultiResolutionMultiHeadAttentionPooling.")
# options.pop("temperature")
#
# # In this pooling, the special point is that all (global) features of inputs will be used and
# # the temperature will be added.
# self.attention = attention.AttentionAlphaComponent(input_dim, num_head=num_head, split_input=False, temperature=True,
# share=share, affine_layers=affine_layers, bias=True, **options)
#
# def forward(self, inputs):
# """
# @inputs: a 3-dimensional tensor (a batch), including [samples-index, frames-dim-index, frames-index]
# """
# assert len(inputs.shape) == 3
# assert inputs.shape[1] == self.input_dim
#
# batch_size = inputs.shape[0]
# chunk_size = inputs.shape[2] # a.k.a total frames
#
# # alpha: [batch, weight, frames]
# # When using the conv1d to implement the multi-multiple of multi-head, we can get
# # the weight distribution of multi-head: [h11, h12, h13, h21, h22, h23, ..., hn1, hn2, ...]
# # So, just reshape it to split different heads.
# alpha = self.attention(inputs)
#
# # In sharing weight case, the shape of alpha is [batch, head, 1, frames] and [batch, head, all-features, frames]
# # for another case.
# # inputs: [batch, 1, all-features, frames]
# after_mul = alpha.reshape(batch_size, self.num_head, -1, chunk_size) * \
# inputs.reshape(batch_size, 1, -1, chunk_size)
#
# # After multi-multipling alpha and inputs for multi-head case, the mean could be got by reshaping back.
# mean = torch.sum(after_mul.reshape(batch_size, -1, chunk_size), dim=2, keepdim=True)
#
# if self.stddev :
# if self.stddev_attention:
# after_mul_2 = alpha.reshape(batch_size, self.num_head, -1, chunk_size) * \
# inputs.reshape(batch_size, 1, -1, chunk_size)**2
# var = torch.sum(after_mul_2.reshape(batch_size, -1, chunk_size), dim=2, keepdim=True) - mean**2
# std = torch.sqrt(var.clamp(min=1.0e-10))
# else:
# var = torch.mean((inputs - mean)**2, dim=2, keepdim=True)
# std = torch.sqrt(var.clamp(min=1.0e-10))
# return torch.cat((mean, std), dim=1)
# else :
# return mean
#
# def get_output_dim(self):
# return self.output_dim * self.num_head
if __name__ == '__main__':
inputs = of.randn(4, 512, 300)
# attention = AttentiveStatPooling(64, 512)
pooling = STAT()
output = pooling(inputs)
print(output.shape)
| [
"oneflow.Tensor",
"oneflow.cat",
"oneflow.std",
"oneflow.nn.init.kaiming_normal_",
"oneflow.nn.ReLU",
"oneflow.nn.AdaptiveAvgPool2d",
"oneflow.randn",
"oneflow.mean",
"oneflow.tanh"
] | [((18635, 18656), 'oneflow.randn', 'of.randn', (['(4)', '(512)', '(300)'], {}), '(4, 512, 300)\n', (18643, 18656), True, 'import oneflow as of\n'), ((470, 497), 'oneflow.mean', 'of.mean', (['feature_map'], {'dim': '(2)'}), '(feature_map, dim=2)\n', (477, 497), True, 'import oneflow as of\n'), ((769, 802), 'oneflow.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['output_size'], {}), '(output_size)\n', (789, 802), True, 'import oneflow.nn as nn\n'), ((1390, 1431), 'oneflow.mean', 'of.mean', (['feature_map'], {'dim': '(2)', 'keepdim': '(True)'}), '(feature_map, dim=2, keepdim=True)\n', (1397, 1431), True, 'import oneflow as of\n'), ((1448, 1488), 'oneflow.std', 'of.std', (['feature_map'], {'dim': '(2)', 'keepdim': '(True)'}), '(feature_map, dim=2, keepdim=True)\n', (1454, 1488), True, 'import oneflow as of\n'), ((1506, 1532), 'oneflow.cat', 'of.cat', (['[mean, std]'], {'dim': '(1)'}), '([mean, std], dim=1)\n', (1512, 1532), True, 'import oneflow as of\n'), ((2188, 2197), 'oneflow.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2195, 2197), True, 'import oneflow.nn as nn\n'), ((4102, 4148), 'oneflow.cat', 'of.cat', (['[attention_mean, attention_std]'], {'dim': '(1)'}), '([attention_mean, attention_std], dim=1)\n', (4108, 4148), True, 'import oneflow as of\n'), ((2612, 2637), 'oneflow.Tensor', 'of.Tensor', (['hidden_size', '(1)'], {}), '(hidden_size, 1)\n', (2621, 2637), True, 'import oneflow as of\n'), ((2884, 2918), 'oneflow.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['parameter'], {}), '(parameter)\n', (2907, 2918), True, 'import oneflow.nn as nn\n'), ((3486, 3496), 'oneflow.tanh', 'of.tanh', (['e'], {}), '(e)\n', (3493, 3496), True, 'import oneflow as of\n'), ((2323, 2357), 'oneflow.Tensor', 'of.Tensor', (['hidden_size', 'input_size'], {}), '(hidden_size, input_size)\n', (2332, 2357), True, 'import oneflow as of\n'), ((2712, 2727), 'oneflow.Tensor', 'of.Tensor', (['(1)', '(1)'], {}), '(1, 1)\n', (2721, 2727), True, 'import oneflow as of\n'), ((2533, 2558), 'oneflow.Tensor', 'of.Tensor', (['hidden_size', '(1)'], {}), '(hidden_size, 1)\n', (2542, 2558), True, 'import oneflow as of\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import oneflow as flow
import oneflow.unittest
@flow.unittest.skip_unless_1n1d()
class TestPybind11Caster(flow.unittest.TestCase):
def test_optional(test_case):
test_case.assertEqual(
flow._oneflow_internal.test_api.increase_if_not_none(1), 2
)
test_case.assertEqual(
flow._oneflow_internal.test_api.increase_if_not_none(None), None
)
def test_maybe(test_case):
test_case.assertEqual(flow._oneflow_internal.test_api.divide(6, 2), 3)
with test_case.assertRaises(Exception) as context:
flow._oneflow_internal.test_api.divide(6, 0)
test_case.assertTrue("Check failed" in str(context.exception))
def test_maybe_void(test_case):
flow._oneflow_internal.test_api.throw_if_zero(1)
with test_case.assertRaises(Exception) as context:
flow._oneflow_internal.test_api.throw_if_zero(0)
test_case.assertTrue("Check failed" in str(context.exception))
def test_return_maybe_shared_ptr(test_case):
a1 = flow._oneflow_internal.test_api.get_singleton_a()
x1 = a1.get_x()
a1.inc_x()
a2 = flow._oneflow_internal.test_api.get_singleton_a()
x2 = a2.get_x()
test_case.assertEqual(id(a1), id(a2))
test_case.assertEqual(x1 + 1, x2)
def test_pass_optional_shared_ptr(test_case):
a1 = flow._oneflow_internal.test_api.get_singleton_a()
x1 = a1.get_x()
a1.inc_x()
a2 = flow._oneflow_internal.test_api.increase_x_of_a_if_not_none(a1)
x2 = a2.get_x()
test_case.assertEqual(id(a1), id(a2))
test_case.assertEqual(x1 + 2, x2)
if __name__ == "__main__":
unittest.main()
| [
"oneflow._oneflow_internal.test_api.divide",
"oneflow._oneflow_internal.test_api.get_singleton_a",
"oneflow.unittest.skip_unless_1n1d",
"oneflow._oneflow_internal.test_api.increase_x_of_a_if_not_none",
"oneflow._oneflow_internal.test_api.increase_if_not_none",
"oneflow._oneflow_internal.test_api.throw_if_... | [((656, 688), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (686, 688), True, 'import oneflow as flow\n'), ((2300, 2315), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2313, 2315), False, 'import unittest\n'), ((1346, 1394), 'oneflow._oneflow_internal.test_api.throw_if_zero', 'flow._oneflow_internal.test_api.throw_if_zero', (['(1)'], {}), '(1)\n', (1391, 1394), True, 'import oneflow as flow\n'), ((1649, 1698), 'oneflow._oneflow_internal.test_api.get_singleton_a', 'flow._oneflow_internal.test_api.get_singleton_a', ([], {}), '()\n', (1696, 1698), True, 'import oneflow as flow\n'), ((1756, 1805), 'oneflow._oneflow_internal.test_api.get_singleton_a', 'flow._oneflow_internal.test_api.get_singleton_a', ([], {}), '()\n', (1803, 1805), True, 'import oneflow as flow\n'), ((1983, 2032), 'oneflow._oneflow_internal.test_api.get_singleton_a', 'flow._oneflow_internal.test_api.get_singleton_a', ([], {}), '()\n', (2030, 2032), True, 'import oneflow as flow\n'), ((2090, 2153), 'oneflow._oneflow_internal.test_api.increase_x_of_a_if_not_none', 'flow._oneflow_internal.test_api.increase_x_of_a_if_not_none', (['a1'], {}), '(a1)\n', (2149, 2153), True, 'import oneflow as flow\n'), ((816, 871), 'oneflow._oneflow_internal.test_api.increase_if_not_none', 'flow._oneflow_internal.test_api.increase_if_not_none', (['(1)'], {}), '(1)\n', (868, 871), True, 'import oneflow as flow\n'), ((928, 986), 'oneflow._oneflow_internal.test_api.increase_if_not_none', 'flow._oneflow_internal.test_api.increase_if_not_none', (['None'], {}), '(None)\n', (980, 986), True, 'import oneflow as flow\n'), ((1065, 1109), 'oneflow._oneflow_internal.test_api.divide', 'flow._oneflow_internal.test_api.divide', (['(6)', '(2)'], {}), '(6, 2)\n', (1103, 1109), True, 'import oneflow as flow\n'), ((1185, 1229), 'oneflow._oneflow_internal.test_api.divide', 'flow._oneflow_internal.test_api.divide', (['(6)', '(0)'], {}), '(6, 0)\n', (1223, 1229), True, 'import oneflow as flow\n'), ((1466, 1514), 'oneflow._oneflow_internal.test_api.throw_if_zero', 'flow._oneflow_internal.test_api.throw_if_zero', (['(0)'], {}), '(0)\n', (1511, 1514), True, 'import oneflow as flow\n')] |
import oneflow as flow
import oneflow.nn as nn
import oneflow.nn.functional as F
class ConformerConvolutionModule(nn.Module):
def __init__(self, channels, kernel_size, bias=True, dropout=0.0):
super(ConformerConvolutionModule, self).__init__()
assert kernel_size % 2 == 1
self.pointwise_conv1 = nn.Linear(channels, 2 * channels, bias=bias)
self.depthwise_conv = nn.Conv1d(
channels,
channels,
kernel_size,
stride=1,
padding=(kernel_size - 1) // 2,
groups=channels,
bias=bias,
)
self.batch_norm = nn.BatchNorm1d(channels)
self.pointwise_conv2 = nn.Linear(channels, channels, bias=bias)
self.dropout = nn.Dropout(dropout)
def forward(self, x, mask):
"""
Args:
x: [batch_size, time, channels]
mask: [batch_size, time]
"""
mask = mask.unsqueeze(2).repeat([1, 1, x.size(-1)])
x = self.pointwise_conv1(x)
x = F.glu(x)
x = flow.masked_fill(x, mask == 0, 0.0)
x = x.transpose(1, 2)
x = self.depthwise_conv(x)
x = self.batch_norm(x)
x = x * flow.sigmoid(x)
x = x.transpose(1, 2)
x = self.pointwise_conv2(x)
x = flow.masked_fill(x, mask == 0, 0.0)
return x
| [
"oneflow.nn.Conv1d",
"oneflow.sigmoid",
"oneflow.nn.Dropout",
"oneflow.nn.BatchNorm1d",
"oneflow.nn.functional.glu",
"oneflow.masked_fill",
"oneflow.nn.Linear"
] | [((327, 371), 'oneflow.nn.Linear', 'nn.Linear', (['channels', '(2 * channels)'], {'bias': 'bias'}), '(channels, 2 * channels, bias=bias)\n', (336, 371), True, 'import oneflow.nn as nn\n'), ((403, 519), 'oneflow.nn.Conv1d', 'nn.Conv1d', (['channels', 'channels', 'kernel_size'], {'stride': '(1)', 'padding': '((kernel_size - 1) // 2)', 'groups': 'channels', 'bias': 'bias'}), '(channels, channels, kernel_size, stride=1, padding=(kernel_size -\n 1) // 2, groups=channels, bias=bias)\n', (412, 519), True, 'import oneflow.nn as nn\n'), ((638, 662), 'oneflow.nn.BatchNorm1d', 'nn.BatchNorm1d', (['channels'], {}), '(channels)\n', (652, 662), True, 'import oneflow.nn as nn\n'), ((695, 735), 'oneflow.nn.Linear', 'nn.Linear', (['channels', 'channels'], {'bias': 'bias'}), '(channels, channels, bias=bias)\n', (704, 735), True, 'import oneflow.nn as nn\n'), ((760, 779), 'oneflow.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (770, 779), True, 'import oneflow.nn as nn\n'), ((1041, 1049), 'oneflow.nn.functional.glu', 'F.glu', (['x'], {}), '(x)\n', (1046, 1049), True, 'import oneflow.nn.functional as F\n'), ((1062, 1097), 'oneflow.masked_fill', 'flow.masked_fill', (['x', '(mask == 0)', '(0.0)'], {}), '(x, mask == 0, 0.0)\n', (1078, 1097), True, 'import oneflow as flow\n'), ((1306, 1341), 'oneflow.masked_fill', 'flow.masked_fill', (['x', '(mask == 0)', '(0.0)'], {}), '(x, mask == 0, 0.0)\n', (1322, 1341), True, 'import oneflow as flow\n'), ((1211, 1226), 'oneflow.sigmoid', 'flow.sigmoid', (['x'], {}), '(x)\n', (1223, 1226), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
import oneflow.experimental as flow
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
".numpy() doesn't work in lazy mode",
)
class TestArgmax(flow.unittest.TestCase):
def test_argmax_v1(test_case):
input = flow.Tensor(np.random.randn(2, 6, 5, 3), dtype=flow.float32)
axis = -1
of_out = flow.argmax(input, dim=axis)
np_out = np.argmax(input.numpy(), axis=axis)
test_case.assertTrue(np.array_equal(of_out.numpy().flatten(), np_out.flatten()))
def test_tensor_argmax(test_case):
input = flow.Tensor(np.random.randn(2, 6, 5, 3), dtype=flow.float32)
axis = 0
of_out = input.argmax(dim=axis)
np_out = np.argmax(input.numpy(), axis=axis)
test_case.assertTrue(np.array_equal(of_out.numpy().shape, np_out.shape))
test_case.assertTrue(np.array_equal(of_out.numpy().flatten(), np_out.flatten()))
def test_argmax_v3(test_case):
input = flow.Tensor(np.random.randn(2, 6, 5, 3), dtype=flow.float32)
axis = 1
of_out = flow.argmax(input, dim=axis)
np_out = np.argmax(input.numpy(), axis=axis)
test_case.assertTrue(np.array_equal(of_out.numpy().flatten(), np_out.flatten()))
def test_argmax_keepdims(test_case):
input = flow.Tensor(np.random.randn(2, 6, 5, 3), dtype=flow.float32)
axis = 0
of_out = input.argmax(axis, True)
np_out = np.argmax(input.numpy(), axis=axis)
np_out = np.expand_dims(np_out, axis=axis)
test_case.assertTrue(np.array_equal(of_out.numpy().shape, np_out.shape))
test_case.assertTrue(np.array_equal(of_out.numpy().flatten(), np_out.flatten()))
def test_argmax_dim_equal_none(test_case):
input = flow.Tensor(np.random.randn(2, 6, 5, 3), dtype=flow.float32)
of_out = input.argmax()
np_out = np.argmax(input.numpy().flatten(), axis=0)
test_case.assertTrue(np.array_equal(of_out.numpy().flatten(), np_out.flatten()))
if __name__ == "__main__":
unittest.main()
| [
"oneflow.experimental.argmax",
"oneflow.experimental.unittest.env.eager_execution_enabled"
] | [((2712, 2727), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2725, 2727), False, 'import unittest\n'), ((997, 1025), 'oneflow.experimental.argmax', 'flow.argmax', (['input'], {'dim': 'axis'}), '(input, dim=axis)\n', (1008, 1025), True, 'import oneflow.experimental as flow\n'), ((1727, 1755), 'oneflow.experimental.argmax', 'flow.argmax', (['input'], {'dim': 'axis'}), '(input, dim=axis)\n', (1738, 1755), True, 'import oneflow.experimental as flow\n'), ((2155, 2188), 'numpy.expand_dims', 'np.expand_dims', (['np_out'], {'axis': 'axis'}), '(np_out, axis=axis)\n', (2169, 2188), True, 'import numpy as np\n'), ((712, 755), 'oneflow.experimental.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (753, 755), True, 'import oneflow.experimental as flow\n'), ((911, 938), 'numpy.random.randn', 'np.random.randn', (['(2)', '(6)', '(5)', '(3)'], {}), '(2, 6, 5, 3)\n', (926, 938), True, 'import numpy as np\n'), ((1241, 1268), 'numpy.random.randn', 'np.random.randn', (['(2)', '(6)', '(5)', '(3)'], {}), '(2, 6, 5, 3)\n', (1256, 1268), True, 'import numpy as np\n'), ((1642, 1669), 'numpy.random.randn', 'np.random.randn', (['(2)', '(6)', '(5)', '(3)'], {}), '(2, 6, 5, 3)\n', (1657, 1669), True, 'import numpy as np\n'), ((1973, 2000), 'numpy.random.randn', 'np.random.randn', (['(2)', '(6)', '(5)', '(3)'], {}), '(2, 6, 5, 3)\n', (1988, 2000), True, 'import numpy as np\n'), ((2442, 2469), 'numpy.random.randn', 'np.random.randn', (['(2)', '(6)', '(5)', '(3)'], {}), '(2, 6, 5, 3)\n', (2457, 2469), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.