repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
tvm | tvm-main/python/tvm/meta_schedule/testing/torchbench/run.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-variable
"""
This script is for benchmarking TVM performance on models from TorchBench.
It uses the TorchDynamo as the frontend to ingest models into TVM, and it also
leverages the benchmark util from TorchDynamo.
TorchDynamo (https://github.com/pytorch/torchdynamo) and TorchBench
(https://github.com/pytorch/benchmark) need to be in the parent directory of TVM.
We need a local clone of these repos because torchbench and the benchmark runner
in TorchDynamo isn't designed to be used as a Python package.
To setup the environment, run the following commands in the parent directory of TVM and with
the appropriate Python environment:
```bash
# torchdynamo requires nightly pytorch. If it fails to find the specified version, try
# installing the latest nightly pytorch.
pip3 install --pre \
--extra-index-url https://download.pytorch.org/whl/nightly/cu116 \
torch==1.13.0.dev20220926 \
torchvision==0.14.0.dev20220926 \
torchtext==0.14.0.dev20220926
git clone https://github.com/pytorch/torchdynamo
pushd torchdynamo
git checkout c537639f9712621dc04ca09908796dbbe86c354b
pip install -e .
popd
sudo apt install git-lfs # git lfs is used for TorchBench
git clone https://github.com/pytorch/benchmark
pushd benchmark
python install.py --continue_on_fail # fambench_xlmr might fail to install
popd
```
To run a benchmark, the script can be run under 'tune' mode by
```bash
python python/tvm/meta_schedule/testing/torchbench/run.py \
--mode tune \
--model resnet50 \
--target "nvidia/geforce-rtx-3070" \
--work-dir /path/to/work/dir/ \
--num-trials 20000 \
--rpc-host <rpc tracker host for tuning> \
--rpc-port <rpc tracker port for tuning> \
--rpc-key <rpc key> \
```
All available target tags (like nvidia/geforce-rtx-3070) can be found at
https://github.com/apache/tvm/blob/main/src/target/tag.cc
Then the script can be run under 'eval' mode to actual benchmark the performance,
using the tuning database under the work directory. This can be executed on a different
machine than the one executes tuning (the database json files need to be inside
of the work directory).
```bash
python python/tvm/meta_schedule/testing/torchbench/run.py \
--mode eval \
--model resnet50 \
--target "nvidia/geforce-rtx-3070" \
--work-dir /path/to/work/dir/ \
--num-trials 0
```
Alternatively, both tuning and evaluation can be done in a single run on the same machine,
by
```bash
python python/tvm/meta_schedule/testing/torchbench/run.py \
--mode all \
--model resnet50 \
--target "llvm -num-cores 6" \
--work-dir /path/to/work/dir/ \
--num-trials 0
```
"""
# pylint: disable=logging-format-interpolation
import argparse
import contextlib
import logging
import os
import pickle
import sys
import warnings
from collections import defaultdict
from enum import Enum
from typing import Callable, Dict, List, Tuple
import numpy as np # type: ignore
import torch # type: ignore
from scipy.stats import ttest_ind # type: ignore
import tvm
import tvm.relay
from tvm import meta_schedule as ms
from tvm._ffi import get_global_func
from tvm.contrib.graph_executor import GraphModule
from tvm.meta_schedule.testing.torchbench.utils import (
DisallowedOperator,
load_torchdynamo_benchmark_runner,
same,
timed,
)
from tvm.runtime.vm import VirtualMachine
from tvm.support import describe
# Needs to be imported after the .utils is executed
import torchdynamo # type: ignore # isort: skip, pylint: disable=wrong-import-order
class RunMode(Enum):
"""
The running mode of this script. Available values are:
- extract: Only import the model and extract tuning tasks from it.
- tune: Only tune the tasks and create the tuning database.
- eval: Only benchmark model using pre-existing tuning database.
- all: Run both tuning and benchmark
"""
ALL = "all"
EXTRACT = "extract"
TUNE = "tune"
EVAL = "eval"
@property
def should_extract(self):
"""
Returns whether it should extract tuning tasks.
"""
return self in (RunMode.ALL, RunMode.EXTRACT)
@property
def should_tune(self):
"""
Returns whether it should tune the tasks.
"""
return self in (RunMode.ALL, RunMode.TUNE)
@property
def should_eval(self):
"""
Returns whether it should actually benchmark the model.
"""
return self in (RunMode.ALL, RunMode.EVAL)
class ResultComparisonMetric(Enum):
"""
This changes how it compares the results with the expected value during
accuracy check.
- cosine: Use the cosine similarity. It should be greater than 0.99.
- allclose-1e-4: Use the max elementwise absolute difference. It should be less than 1e-4.
"""
COSINE = "cosine"
ALLCLOSE = "allclose-1e-4"
def parse_args():
"""
Parse arguments
"""
args = argparse.ArgumentParser()
args.add_argument(
"--mode",
type=RunMode,
default=RunMode.ALL,
help=RunMode.__doc__,
)
args.add_argument(
"--batch-size",
type=int,
default=None,
help="The batch size of model input. Use TorchBench's default value if not specified.",
)
args.add_argument(
"--result-metric",
type=ResultComparisonMetric,
default=ResultComparisonMetric.ALLCLOSE,
help=ResultComparisonMetric.__doc__,
)
args.add_argument(
"--benchmark-repeat",
type=int,
default=10,
help="The number of times to repeat the benchmark measurement.",
)
args.add_argument(
"--benchmark-warmup-rounds",
type=int,
default=5,
help="The number of rounds to warmup before starting to measure the performance.",
)
args.add_argument(
"--disallowed-op",
type=str,
default="all",
help=DisallowedOperator.__doc__,
)
# Model selection
args.add_argument(
"--model",
type=str,
required=True,
help="""
The name of model to run. It should a directory name under
https://github.com/pytorch/benchmark/tree/main/torchbenchmark/models.
""",
)
args.add_argument(
"--float32",
action="store_true",
help="""
Cast model and inputs to fp32
""",
)
# Tuning-related config
args.add_argument(
"--target",
type=tvm.target.Target,
required=True,
help="The target to tune and run benchmark for.",
)
args.add_argument(
"--work-dir",
type=str,
required=True,
help="""
The working directory to save intermediate results and store databases for compilation.
""",
)
args.add_argument(
"--strategy",
type=str,
default="evolutionary",
help="The search strategy used by MetaSchdule.",
)
args.add_argument(
"--num-trials",
type=int,
required=True,
help="The max number of trials to run MetaSchedule.",
)
args.add_argument(
"--max-trials-per-task",
type=int,
default=None,
help="""
The max number of trials to run per task extracted in MetaSchedule.
By default it's the same as --num-trials.
""",
)
args.add_argument(
"--backend",
type=str,
choices=["graph", "vm"],
default="graph",
help="The backend to use for relay compilation(graph / vm).",
)
# TODO(@yelite): Add a layout arg to transform the network after
# ingesting into Relay and before feeding into MetaSchedule.
# Evaluator-related config
args.add_argument(
"--number",
type=int,
default=3,
help="The number of times to run the model for taking average in a single measurement.",
)
args.add_argument(
"--repeat",
type=int,
default=1,
help="The number of times to repeat the measurement.",
)
args.add_argument(
"--min-repeat-ms",
type=int,
default=100,
help="""
Minimum repeat time in ms. The number of runs will be increased if the actual
repeat time is lowered than this.
""",
)
args.add_argument(
"--adaptive-training",
action="store_true",
help="Whether to use adaptive training for cost model.",
)
args.add_argument(
"--cpu-flush",
action="store_true",
help="Whether to perform CPU cache flush.",
)
# RPC-related args
args.add_argument(
"--rpc-host",
type=str,
help="Host of the RPC Tracker for tuning. Use LocalRunner if not provided",
)
args.add_argument(
"--rpc-port",
type=int,
help="Port of the RPC Tracker for tuning",
)
args.add_argument(
"--rpc-key",
type=str,
help="Key of the RPC Tracker for tuning",
)
parsed = args.parse_args()
if parsed.disallowed_op == "all":
disallowed_op = set(DisallowedOperator)
else:
disallowed_op = {DisallowedOperator(v) for v in parsed.disallowed_op.split(",")}
parsed.disallowed_op = disallowed_op
# Trim all args, otherwise it confuses the arg parser of timm_efficientdet
sys.argv = sys.argv[:1]
return parsed
logging.basicConfig(
format="%(asctime)s.%(msecs)03d %(levelname)s %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
logging.getLogger("tvm.meta_schedule").setLevel(logging.DEBUG)
ARGS = parse_args()
IS_CUDA = ARGS.target.kind.name == "cuda"
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
logger.setLevel(logging.INFO)
runner = load_torchdynamo_benchmark_runner( # pylint: disable=invalid-name
IS_CUDA,
cosine_similarity=ARGS.result_metric == ResultComparisonMetric.COSINE,
float32=ARGS.float32,
disallowed_operators=ARGS.disallowed_op,
)
def get_meta_schedule_runner() -> ms.runner.PyRunner:
"""
Get the Runner for MetaSchedule.
It returns RPCRunner if --rpc-host is given, otherwise it returns LocalRunner
"""
if ARGS.rpc_host is not None:
assert ARGS.rpc_port is not None, "Missing rpc_port"
assert ARGS.rpc_key is not None, "Missing rpc_key"
return ms.runner.RPCRunner(
rpc_config=ms.runner.RPCConfig(
tracker_host=ARGS.rpc_host,
tracker_port=ARGS.rpc_port,
tracker_key=ARGS.rpc_key,
session_timeout_sec=600,
),
evaluator_config=ms.runner.EvaluatorConfig(
number=ARGS.number,
repeat=ARGS.repeat,
min_repeat_ms=ARGS.min_repeat_ms,
enable_cpu_cache_flush=ARGS.cpu_flush,
),
alloc_repeat=1,
)
else:
warnings.warn("Falling back to MetaSchedule LocalRunner because --rpc-host isn't provided.")
return ms.runner.LocalRunner()
def get_graph_executor_forward(
graph_executor_factory: tvm.runtime.Module, device: tvm.runtime.Device
) -> Callable:
"""
Get the forward function for graph executor, in order to integrate with TorchDynamo.
"""
# It has to lazily import this package, loading the C++ PyTorch integration
# after the transformers package is imported when loading model. Otherwise
# there will be segfault caused by the protobuf library.
import tvm.contrib.torch # pylint: disable=import-outside-toplevel, unused-import, redefined-outer-name
save_runtime_mod = get_global_func("tvmtorch.save_runtime_mod", allow_missing=True)
if save_runtime_mod is None:
warnings.warn(
"C++ PyTorch TVM integration is missing. Fallback to Python forward function."
"Build TVM with 'USE_PT_TVMDSOOP' to enable the C++ custom operator"
)
mod = GraphModule(graph_executor_factory["default"](device))
def forward(*args):
if IS_CUDA:
torch.cuda.synchronize()
args = tuple(arg.detach().contiguous() for arg in args)
for idx, arg in enumerate(args, 0):
mod.set_input(
f"inp_{idx}",
tvm.nd.from_dlpack(arg),
)
mod.run()
device.sync()
result = [torch.from_dlpack(mod.get_output(i)) for i in range(mod.get_num_outputs())]
return result
return forward
else:
save_runtime_mod(graph_executor_factory.module)
module = torch.classes.tvm_torch.GraphExecutorFactoryWrapper()
def forward(*args): # type: ignore # isort: skip, pylint: disable=function-redefined
return module.forward(args)
return forward
def get_vm_forward(virtual_machine: VirtualMachine, device: tvm.runtime.Device) -> Callable:
"""
Get the forward function for VM, in order to integrate with TorchDynamo.
"""
def forward(*args):
if IS_CUDA:
torch.cuda.synchronize()
args = tuple(tvm.nd.from_dlpack(arg.detach().contiguous()) for arg in args)
result = virtual_machine.invoke("main", *args)
device.sync()
if isinstance(result, tvm.nd.NDArray):
result = [result]
return [torch.from_dlpack(m) for m in result]
return forward
def should_skip_subgraph(graph_module: torch.fx.GraphModule) -> bool:
"""
Returns whether it should skip optimizing the input graph module.
The graph could be empyt or only containing nodes calling function
for side effect.
"""
graph = graph_module.graph
inputs = [n for n in graph.nodes if n.op == "placeholder"]
outputs = [n for n in graph.nodes if n.op == "output"]
return len(inputs) == 0 and all(output.args == ((),) for output in outputs)
def create_tvm_task_collection_backend() -> Tuple[Callable, List[ms.ExtractedTask]]:
"""
This torchdynamo backend only collects the extracted tasks from MetaSchedule.
It doesn't tune the model.
"""
subgraph_idx = 0
subgraphs_dir = os.path.join(ARGS.work_dir, "subgraphs")
os.makedirs(subgraphs_dir, exist_ok=True)
collected_tasks = []
task_index: Dict[int, List[ms.ExtractedTask]] = defaultdict(list)
def collect_task(task):
task_hash = tvm.ir.structural_hash(task.dispatched[0])
for duplicate_task in task_index[task_hash]:
if tvm.ir.structural_equal(duplicate_task.dispatched[0], task.dispatched[0]):
duplicate_task.weight += task.weight
return
task_index[task_hash].append(task)
collected_tasks.append(task)
def backend(graph_module, example_inputs):
nonlocal subgraph_idx
torch.save(graph_module, os.path.join(subgraphs_dir, f"graph_module_{subgraph_idx}"))
torch.save(example_inputs, os.path.join(subgraphs_dir, f"example_inputs_{subgraph_idx}"))
if should_skip_subgraph(graph_module):
return graph_module.forward
jit_mod = torch.jit.trace(graph_module, example_inputs)
shape_list = [(f"inp_{idx}", i.shape) for idx, i in enumerate(example_inputs)]
ir_mod, params = tvm.relay.frontend.from_pytorch(jit_mod, shape_list)
extracted_tasks = ms.relay_integration.extract_tasks(
mod=ir_mod,
target=ARGS.target,
params=params,
)
old_tasks_count = len(collected_tasks)
for task in extracted_tasks:
collect_task(task)
logger.info(
"Extracted %d tasks from graph %d, with %d new tasks",
len(extracted_tasks),
subgraph_idx,
len(collected_tasks) - old_tasks_count,
)
subgraph_idx += 1
return graph_module.forward
return backend, collected_tasks
def create_tvm_compilation_backend(database: ms.database.Database) -> Callable:
"""
This torchdynamo backend compiles the model using history best record from the
MetaSchedule database.
"""
def backend(graph_module, example_inputs):
if should_skip_subgraph(graph_module):
return graph_module.forward
jit_mod = torch.jit.trace(graph_module, example_inputs)
shape_list = [(f"inp_{idx}", i.shape) for idx, i in enumerate(example_inputs)]
ir_mod, params = tvm.relay.frontend.from_pytorch(jit_mod, shape_list)
lib = ms.relay_integration.compile_relay(
database=database,
mod=ir_mod,
target=ARGS.target,
params=params,
backend=ARGS.backend,
)
device = tvm.cuda(0) if IS_CUDA else tvm.cpu(0)
if ARGS.backend == "graph":
return get_graph_executor_forward(lib, device)
elif ARGS.backend == "vm":
vm = VirtualMachine(lib, device) # pylint: disable=invalid-name
return get_vm_forward(vm, device)
else:
raise RuntimeError(f"Unknown backend {ARGS.backend}")
return backend
def format_time(seconds: float) -> str:
"""
Format elapsed time based on its value.
"""
if seconds > 1:
return f"{seconds:.3g}s"
else:
return f"{seconds * 1000:.3g}ms"
def is_output_correct(output: torch.Tensor, expected: torch.Tensor) -> bool:
"""
Check whether the output is correct.
"""
comparison_metric = ARGS.result_metric
if comparison_metric == ResultComparisonMetric.COSINE:
return same(expected, output, cosine_similarity=True)
elif comparison_metric == ResultComparisonMetric.ALLCLOSE:
return same(expected, output, tol=1e-4)
else:
raise RuntimeError(f"Unknown comparison metric {comparison_metric}")
def inspect_output_error(output, expected):
"""
Inpsect the error between the actual output and expected output.
"""
if not isinstance(output, torch.Tensor):
logger.info(
f"Unsupported type for error inspection: {type(output).__name__}."
f"Please manually check output.pt"
)
return
output = output.cpu().float()
expected = expected.cpu().float()
abs_error = (output - expected).abs()
rel_error = (abs_error / expected).abs()
def format_error_table(error, bins) -> str:
bin_tensor = torch.as_tensor([float(b) for b in bins], dtype=error.dtype)
error_hist = torch.histogram(error, bin_tensor).hist.int()
return "\n".join(f"< {b}\t{e}" for e, b in zip(error_hist, bins[1:]))
abs_error_bins = [
"-1e10",
"0",
"1e-8",
"1e-6",
"1e-5",
"1e-4",
"1e-3",
"1e-2",
"1e-1",
"1",
"1e10",
]
rel_error_bins = [
"-1e10",
"0",
"1e-4",
"1e-3",
"1e-2",
"1e-1",
"1",
"1e1",
"1e2",
"1e3",
"1e100",
]
large_rel_error_idx = rel_error > 1
abs_error_with_large_rel_error = abs_error[large_rel_error_idx]
logger.error(f"Expected (PyTorch eager): {expected}")
logger.error(f"Actual (Optimized): {output}")
logger.error(f"Absolute Error\n{format_error_table(abs_error, abs_error_bins)}")
logger.error(f"Relative Error\n{format_error_table(rel_error, rel_error_bins)}")
logger.error(
f"Max absolute error for position with large relative error (> 1):"
f"{abs_error_with_large_rel_error.max()}"
)
def performance_experiment(
model_iter_fn: Callable,
model: torch.nn.Module,
example_inputs: Tuple[torch.Tensor],
) -> str:
"""
Performs the actual benchmarking
Simplified from https://github.com/pytorch/torchdynamo/blob/c537639f9712621dc04ca09908796dbbe86c354b/benchmarks/common.py#L494 pylint: disable=line-too-long
"""
timings = np.zeros((ARGS.benchmark_repeat, 2), np.float64)
if IS_CUDA:
torch.cuda.empty_cache()
is_correct = True
frozen_model_iter_fn = torchdynamo.run(model_iter_fn)
for _ in range(ARGS.benchmark_warmup_rounds):
frozen_model_iter_fn(model, example_inputs)
model_iter_fn(model, example_inputs)
for rep in range(ARGS.benchmark_repeat):
# interleave the runs to handle frequency scaling and load changes
timings[rep, 0], expected_output = timed(
model, model_iter_fn, example_inputs, return_result=True
)
timings[rep, 1], actual_output = timed(
model, frozen_model_iter_fn, example_inputs, return_result=True
)
is_correct = is_correct and is_output_correct(expected_output, actual_output)
pvalue = ttest_ind(timings[:, 0], timings[:, 1]).pvalue
median = np.median(timings, axis=0)
speedup = median[0] / median[1]
logger.info(
f"eager:{format_time(median[0])} "
f"optimized:{format_time(median[1])} "
f"speedup:{speedup:.3f}x p:{pvalue:.3f}"
)
torch.save(actual_output, os.path.join(ARGS.work_dir, "output.pt"))
torch.save(expected_output, os.path.join(ARGS.work_dir, "expected.pt"))
if not is_correct:
logger.error("Result is incorrect.")
inspect_output_error(actual_output, expected_output)
return ""
def get_torch_device_type(target: tvm.target.Target) -> str:
if target.kind.name == "llvm":
return "cpu"
elif target.kind.name == "cuda":
return "cuda"
else:
raise RuntimeError(f"Unsupported target {target}")
def main():
"""
Entry point of the benchmark
"""
describe()
meta_schedule_work_dir = os.path.join(ARGS.work_dir, "meta_schedule")
os.makedirs(meta_schedule_work_dir, exist_ok=True)
database = ms.database.JSONDatabase(work_dir=meta_schedule_work_dir)
if not ARGS.mode.should_tune:
if len(database) == 0:
raise RuntimeError(
"Script is running in eval mode while the tuning database is empty. "
"Please tune the model first."
)
if IS_CUDA and ARGS.cpu_flush:
warnings.warn(
"Benchmark is running on CUDA, while --cpu-flush is turned on. "
"This flag will have no effect on CUDA."
)
ARGS.cpu_flush = False
try:
logger.info(f"Loading model with batch size: {ARGS.batch_size}")
_, name, model, example_inputs, batch_size = runner.load_model(
get_torch_device_type(ARGS.target),
ARGS.model,
batch_size=ARGS.batch_size,
)
model, example_inputs = runner.maybe_cast(model, example_inputs)
logger.info(f"Got model with batch size: {batch_size}")
except NotImplementedError:
logger.exception(f"{ARGS.model} failed to load")
raise
with contextlib.ExitStack() as stack:
profiler = stack.enter_context(ms.Profiler())
stack.enter_context(torch.no_grad())
tasks_path = os.path.join(ARGS.work_dir, "extracted_tasks")
if ARGS.mode.should_extract:
task_collect_backend, extracted_tasks = create_tvm_task_collection_backend()
task_collect_ctx = torchdynamo.optimize(task_collect_backend)
task_collect_ctx(runner.model_iter_fn)(model, example_inputs)
with open(tasks_path, "wb") as f:
pickle.dump(extracted_tasks, f)
else:
with open(tasks_path, "rb") as f:
extracted_tasks = pickle.load(f)
if ARGS.mode.should_tune:
tasks, task_weights = ms.relay_integration.extracted_tasks_to_tune_contexts(
extracted_tasks=extracted_tasks,
work_dir=ARGS.work_dir,
strategy=ARGS.strategy,
)
database = ms.tune.tune_tasks(
tasks=tasks,
task_weights=task_weights,
work_dir=ARGS.work_dir,
max_trials_global=ARGS.num_trials,
max_trials_per_task=ARGS.max_trials_per_task,
runner=get_meta_schedule_runner(), # type: ignore
database=database,
cost_model=ms.cost_model.XGBModel( # type: ignore
extractor=ms.feature_extractor.PerStoreFeature(),
adaptive_training=ARGS.adaptive_training,
),
)
if ARGS.mode.should_eval:
torchdynamo.reset()
model_compile_ctx = torchdynamo.optimize(create_tvm_compilation_backend(database))
model_compile_ctx(runner.model_iter_fn)(model, example_inputs)
with torch.no_grad():
performance_experiment(runner.model_iter_fn, model, example_inputs)
print(profiler.table())
if __name__ == "__main__":
main()
| 25,562 | 31.276515 | 160 | py |
tvm | tvm-main/python/tvm/meta_schedule/cost_model/mlp_model.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# type: ignore[import]
"""
Segment Sum MLP cost model
"""
import glob
import math
import os
import random
import tempfile
from collections import OrderedDict
from itertools import chain as itertools_chain
from typing import Dict, List, NamedTuple, Optional, Tuple
import numpy as np # type: ignore
import torch # type: ignore
import tvm
from ...contrib.tar import tar, untar
from ...runtime import NDArray
from ...target import Target
from ..cost_model import PyCostModel
from ..database import JSONDatabase
from ..feature_extractor import FeatureExtractor, PerStoreFeature
from ..logging import get_logger
from ..runner import RunnerResult
from ..search_strategy import MeasureCandidate
from ..tune_context import TuneContext
from ..utils import derived_object, shash2hex
logger = get_logger("mlp_model") # pylint: disable=invalid-name
# pylint: disable=no-member,import-outside-toplevel
class SegmentSumMLPConfig(NamedTuple):
"""SegmentSum MLP model configuration
Parameters
----------
input_dim : int
The input dim for the model.
hidden_dim : int
The hidden dim for the model.
output_dim : int
The output dim for the model.
use_norm : bool
Whether to normalize the segment sum or not.
use_sigmoid : bool
Whether to use sigmoid on the final output or not.
"""
input_dim: int = 172
hidden_dim: int = 256
output_dim: int = 1
use_norm: bool = False
use_sigmoid: bool = False
def to_dict(self): # pylint: disable=missing-function-docstring
return {
"input_dim": self.input_dim,
"hidden_dim": self.hidden_dim,
"output_dim": self.output_dim,
"use_norm": self.use_norm,
"use_sigmoid": self.use_sigmoid,
}
class TrainerConfig(NamedTuple):
"""Trainer configuration
Parameters
----------
batch_size : int
The batch size.
learning rate : float
The learning rate.
weight decay : float
The weight decay.
num_epoch_full : int
The number of epochs used in full training.
num_epoch_incremental : int
The number of epochs used in incremental training.
grad_clip_norm: float
The norm of gradient clipping.
train_verbose: int
The verbose frequency for training in batches.
test_interval: int
The testing interval in epochs.
test_split: float
The fraction of data for testing.
frozen: bool
Determine whether to re-train the model or not.
"""
batch_size: int = 128
learning_rate: float = 7e-4
weight_decay: float = 1e-6
num_epoch_full: int = 50
num_epoch_incremental: int = 5
grad_clip_norm: float = 0.5
train_verbose: int = 1000
test_interval: int = 1
test_split: float = 0.2
frozen: bool = False
def to_dict(self): # pylint: disable=missing-function-docstring
return {
"batch_size": self.batch_size,
"learning_rate": self.learning_rate,
"weight_decay": self.weight_decay,
"num_epoch_full": self.num_epoch_full,
"num_epoch_incremental": self.num_epoch_incremental,
"grad_clip_norm": self.grad_clip_norm,
"train_verbose": self.train_verbose,
"test_interval": self.test_interval,
"test_split": self.test_split,
"frozen": self.frozen,
}
# pylint: disable=too-few-public-methods
class FeatureGroup:
"""Feature group
Parameters
----------
group_hash : str
The hash of the group
features : List[np.ndarray]
The features
costs : List[float]
The costs
min_cost : float
The minimum cost
"""
group_hash: str
features: List[np.ndarray]
costs: np.ndarray
min_cost: float
def __init__(
self,
group_hash: str,
features: List[np.ndarray],
costs: np.ndarray,
) -> None:
self.group_hash = group_hash
self.features = features
self.costs = costs
self.min_cost = np.min(costs)
def append( # pylint: disable=missing-function-docstring
self,
features: List[np.ndarray],
costs: np.ndarray,
) -> None:
self.features.extend(features)
self.costs = np.append(self.costs, costs)
self.min_cost = np.min(self.costs)
# pylint: disable=too-many-instance-attributes
class SegmentDataLoader:
"""Dataloader for Segment Sum MLP model.
Parameters
----------
features : List[np.ndarray]
The features
results : np.ndarray
The measured results, can be None.
batch_size : int
The batch size
shuffle : bool
Whether to shuffle the dataset or not
"""
def __init__(
self,
features,
results=None,
batch_size=128,
shuffle=True,
):
self.batch_size = batch_size
self.shuffle = shuffle
self.data_size = len(features)
# flatten features and store the starting indices
self.segment_sizes = torch.tensor([len(feature) for feature in features], dtype=torch.int32)
self.feature_offsets = (
torch.cumsum(self.segment_sizes, 0, dtype=torch.int32) - self.segment_sizes
)
features = torch.cat([torch.tensor(feature) for feature in features])
norm, _ = features.max(dim=0)
norm[norm == 0] = 1
self.features = features / norm
self.results = torch.tensor(results) if results is not None else None
self.iter_order = self.pointer = None
def __len__(self):
return self.data_size
def __iter__(self):
if self.shuffle:
self.iter_order = torch.randperm(self.data_size)
else:
self.iter_order = torch.arange(self.data_size)
self.pointer = 0
return self
def __next__(self):
if self.pointer >= self.data_size:
raise StopIteration
batch_indices = self.iter_order[self.pointer : self.pointer + self.batch_size]
self.pointer += self.batch_size
return self._fetch_indices(batch_indices)
def _fetch_indices(self, indices):
segment_sizes, feature_offsets = self.segment_sizes[indices], self.feature_offsets[indices]
feature_indices = torch.empty(segment_sizes.sum(), dtype=torch.int32)
idx = 0
for offset, seg_size in zip(feature_offsets, segment_sizes):
feature_indices[idx : idx + seg_size] = torch.arange(offset, offset + seg_size)
idx += seg_size
features = self.features[feature_indices.long()]
results = None
if self.results is not None:
results = self.results[indices.long()]
return segment_sizes, features, results
def lambda_rank_loss( # pylint: disable=too-many-locals
preds: "torch.Tensor",
labels: "torch.Tensor",
k: int = None,
eps: float = 1e-10,
sigma: float = 1.0,
) -> "torch.Tensor":
"""
LambdaLoss: Metric-Driven Loss for Learning-to-Rank
Parameters
----------
preds : Tensor
The predicted runtime for each candidate.
labels : Tensor
The measured runtime for each candidate.
k : int
Loss for top k.
Default is None, which means computing all scores.
eps : float
The minimum value to the denominator and argument of log if they reach 0.
sigma : float
The scaling factor to the input of the sigmoid function.
Returns
-------
loss : Tensor
The lambda rank loss.
"""
device = preds.device
y_pred, y_true = preds[None, :], labels[None, :]
y_pred_sorted, indices_pred = y_pred.sort(descending=True, dim=-1)
y_true_sorted, _ = y_true.sort(descending=True, dim=-1)
true_sorted_by_preds = torch.gather(y_true, dim=1, index=indices_pred)
true_diffs = true_sorted_by_preds[:, :, None] - true_sorted_by_preds[:, None, :]
padded_pairs_mask = torch.isfinite(true_diffs) & (true_diffs > 0)
ndcg_at_k_mask = torch.zeros(
(y_pred.shape[1], y_pred.shape[1]), dtype=torch.bool, device=device
)
ndcg_at_k_mask[:k, :k] = 1
true_sorted_by_preds.clamp_(min=0.0)
y_true_sorted.clamp_(min=0.0)
pos_idxs = torch.arange(1, y_pred.shape[1] + 1).to(device)
D = torch.log2(1.0 + pos_idxs.float())[None, :] # pylint: disable=invalid-name
maxDCGs = torch.sum( # pylint: disable=invalid-name
((torch.pow(2, y_true_sorted) - 1) / D)[:, :k], dim=-1
).clamp(min=eps)
G = (torch.pow(2, true_sorted_by_preds) - 1) / maxDCGs[:, None] # pylint: disable=invalid-name
weights = torch.abs(
torch.pow(D[:, :, None], -1.0) - torch.pow(D[:, None, :], -1.0)
) * torch.abs(G[:, :, None] - G[:, None, :])
scores_diffs = (y_pred_sorted[:, :, None] - y_pred_sorted[:, None, :]).clamp(min=-1e8, max=1e8)
scores_diffs[torch.isnan(scores_diffs)] = 0.0
weighted_probs = (torch.sigmoid(sigma * scores_diffs).clamp(min=eps) ** weights).clamp(min=eps)
losses = torch.log2(weighted_probs)
masked_losses = losses[padded_pairs_mask & ndcg_at_k_mask]
loss = -torch.sum(masked_losses)
return loss
def topk_score(
pred_results: "torch.Tensor",
gt_results: "torch.Tensor",
k: int,
) -> float:
"""
Evaluate the top-k score
Parameters
----------
pred_results: Tensor
The raw prediction
gt_results: Tensor
The measured results
k : int
The k in top k score
Returns
-------
score : float
The top-k score
"""
k = min(k, len(pred_results))
topk_indices = torch.topk(pred_results, k, largest=False).indices
score = gt_results.min() / gt_results[topk_indices].min()
return score.item()
class SegmentSumMLP(torch.nn.Module):
"""Segment Sum MLP model.
Parameters
----------
input_dim : int
The input dim for the model.
hidden_dim : int
The hidden dim for the model.
output_dim : int
The output dim for the model.
use_norm : bool
Whether to normalize the segment sum or not.
use_sigmoid : bool
Whether to use sigmoid on the final output or not.
"""
input_dim: int
hidden_dim: int
output_dim: int
use_norm: bool
use_sigmoid: bool
def __init__( # pylint: disable=too-many-arguments
self,
input_dim: int = 172,
hidden_dim: int = 256,
output_dim: int = 1,
use_norm: bool = False,
use_sigmoid: bool = False,
):
from torch import nn # type: ignore
super().__init__()
self.encoder = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
)
self.norm = nn.BatchNorm1d(hidden_dim) if use_norm else nn.Identity()
self.layer0 = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
)
self.layer1 = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
)
self.decoder = nn.Linear(hidden_dim, output_dim)
self.sigmoid = nn.Sigmoid() if use_sigmoid else nn.Identity()
def forward( # pylint: disable=missing-function-docstring
self,
segment_sizes: "torch.Tensor",
features: "torch.Tensor",
) -> "torch.Tensor":
n_seg = len(segment_sizes)
encoded_features = self.encoder(features)
segment_indices = torch.repeat_interleave(
torch.arange(n_seg, device=features.device),
segment_sizes.long(),
)
n_dim = encoded_features.shape[1]
segment_sum = torch.scatter_add(
input=torch.zeros((n_seg, n_dim), dtype=encoded_features.dtype, device=features.device),
dim=0,
index=segment_indices.view(-1, 1).expand(-1, n_dim),
src=encoded_features,
)
out = self.norm(segment_sum)
out = self.layer0(out) + out
out = self.layer1(out) + out
out = self.decoder(out).squeeze()
out = self.sigmoid(out)
return out
def extract_features(
context: TuneContext,
candidates: List[MeasureCandidate],
results: Optional[List[RunnerResult]] = None,
extractor: Optional[FeatureExtractor] = None,
):
"""Extract feature vectors and compute mean costs.
Parameters
----------
context: TuneContext
The tuning context.
candidates: List[MeasureCandidate]
The measure candidates.
results: Optional[List[RunnerResult]]
The measured results, can be None if used in prediction.
extractor: Optional[FeatureExtractor]
The feature extractor.
Returns
-------
new_features: List[np.ndarray]
The extracted features.
new_mean_costs: np.ndarray
The mean costs.
"""
extractor = extractor or PerStoreFeature(extract_workload=True)
def _feature(feature: NDArray) -> np.ndarray:
return feature.numpy().astype("float32")
def _mean_cost(res: RunnerResult) -> float:
if not res.run_secs:
return 1e10
return float(np.median([float(s) for s in res.run_secs]))
new_features = [_feature(x) for x in extractor.extract_from(context, candidates)]
new_mean_costs = (
np.array([_mean_cost(x) for x in results]).astype("float32")
if results is not None
else None
)
return new_features, new_mean_costs
class State:
"""State of the trainer
Parameters
----------
model: SegmentSumMLP
The cost model.
data: Dict[str, FeatureGroup]
The data groups.
data_size: int
The size of all data.
untrained_size: int
The size of the untrained data.
"""
model: SegmentSumMLP
data: Dict[str, FeatureGroup]
data_size: int
untrained_size: int
def __init__(
self,
model_config: Optional[SegmentSumMLPConfig] = None,
extractor: Optional[FeatureExtractor] = None,
):
model_config = model_config or SegmentSumMLPConfig()
extractor = extractor or PerStoreFeature(extract_workload=True)
self.model = SegmentSumMLP(**model_config.to_dict())
self.data = OrderedDict()
self.data_size = 0
self.untrained_size = 0
self.extractor = extractor
def load( # pylint: disable=too-many-locals
self,
path: str,
target: str = "nvidia/nvidia-v100",
) -> None:
"""Load the cached model, cached features, or raw data.
Parameters
----------
path: str
The path to the tar file containing cached model, cached features,
or raw data.
target: str
The target for the tuning context.
"""
with tempfile.TemporaryDirectory() as tmp_dir:
model_path = os.path.join(tmp_dir, "model.pth")
cache_path = os.path.join(tmp_dir, "cached_data.npy")
raw_path = os.path.join(tmp_dir, "raw_data")
untar(path, tmp_dir)
if os.path.exists(model_path):
self.model.load_state_dict(torch.load(model_path))
if os.path.exists(cache_path):
for group_hash, features, costs in np.load(cache_path, allow_pickle=True):
self.data[group_hash] = FeatureGroup(
group_hash=group_hash,
features=list(features),
costs=costs,
)
self.data_size += len(costs)
self.untrained_size += len(costs)
elif os.path.exists(raw_path):
from tqdm import tqdm # type: ignore
model_dirs = glob.glob(os.path.join(raw_path, "*"))
workload_paths = []
for model_dir in model_dirs:
json_files = glob.glob(os.path.join(model_dir, "*.json"))
for json_file in json_files:
if json_file.endswith("_workload.json"):
workload_paths.append(json_file)
for workload_path in tqdm(workload_paths):
try:
database = JSONDatabase(
path_workload=workload_path,
path_tuning_record=workload_path.replace(
"_workload.json", "_candidates.json"
),
)
except tvm._ffi.base.TVMError: # pylint: disable=protected-access
continue
candidates, results = [], []
tuning_records = database.get_all_tuning_records()
if len(tuning_records) == 0:
continue
for record in tuning_records:
candidates.append(record.as_measure_candidate())
results.append(RunnerResult(run_secs=record.run_secs, error_msg=None))
assert len(candidates) == len(results)
context = TuneContext(mod=tuning_records[0].workload.mod, target=Target(target))
features, mean_costs = extract_features(
context, candidates, results, self.extractor
)
self.add_to_group(features, mean_costs, shash2hex(context.mod))
def save(self, path: str) -> None:
"""Cache the model and data.
Parameters
----------
path: str
The path to the cached tar file.
"""
with tempfile.TemporaryDirectory() as tmp_dir:
model_path = os.path.join(tmp_dir, "model.pth")
cache_path = os.path.join(tmp_dir, "cached_data.npy")
torch.save(self.model.state_dict(), model_path)
data = [
(
g.group_hash,
g.features,
g.costs,
)
for g in self.data.values()
]
np.save(
file=cache_path,
arr=np.array(data, dtype=object),
)
tar(path, [x for x in [model_path, cache_path] if x is not None])
logger.info("Saved MLPModel to %s", path)
def add_to_group(
self,
features: List[np.ndarray],
costs: np.ndarray,
group_hash: str,
):
"""Add features and costs to the data groups with key group_hash.
Parameters
----------
features: List[np.ndarray]
The feature vectors.
costs: np.ndarray
The measured results.
group_hash: str
The structural hash of the candidates.
"""
group = self.data.get(group_hash, None)
if group is None:
group = FeatureGroup(
group_hash=group_hash,
features=features,
costs=costs,
)
else:
group.append(features, costs)
self.data[group_hash] = group
self.data_size += len(features)
self.untrained_size += len(features)
class SegmentSumMLPTrainer:
"""The trainer for Segment Sum MLP model.
Parameters
----------
state: State
The state of the trainer.
batch_size : int
The batch size.
learning rate : float
The learning rate.
weight decay : float
The weight decay.
num_epoch_full : int
The number of epochs used in full training.
num_epoch_incremental : int
The number of epochs used in incremental training.
grad_clip_norm: float
The norm of gradient clipping.
train_verbose: int
The verbose frequency for training in batches.
test_interval: int
The testing interval in epochs.
test_split: float
The fraction of data for testing.
frozen: bool
Determine whether to re-train the model or not.
optimizer: "torch.optim.adam.Adam"
The optimizer.
scheduler: "torch.optim.lr_scheduler.StepLR"
The scheduler.
"""
state: State
batch_size: int = 128
learning_rate: float = 7e-4
weight_decay: float = 1e-6
num_epoch_full: int = 50
num_epoch_incremental: int = 5
grad_clip_norm: float = 0.5
train_verbose: int = 1000
test_interval: int = 1
test_split: float = 0.2
frozen: bool = False
optimizer: "torch.optim.adam.Adam" # type: ignore
scheduler: "torch.optim.lr_scheduler.StepLR" # type: ignore
def __init__(
self,
train_config: Optional[TrainerConfig] = None,
state: Optional[State] = None,
):
train_config = train_config or TrainerConfig()
state = state or State()
config = train_config.to_dict()
for attr in config:
setattr(self, attr, config[attr])
self.state = state
self.device = "cuda" if torch.cuda.device_count() else "cpu"
self.optimizer, self.scheduler = None, None
def train_step(
self,
data: Tuple["torch.Tensor", "torch.Tensor", "torch.Tensor"],
batch: int = 0,
train_loss: Optional[float] = None,
) -> float:
"""Helper function for training on a single batch.
Parameters
----------
data: Tuple["torch.Tensor", "torch.Tensor", "torch.Tensor"]
A batch of data, should be a tuple of (segment_sizes, features, gt_results).
batch: int = 0
The current batch number.
train_loss: Optional[float] = None
The previous averaged training loss, None if it is the first batch.
Returns
-------
train_loss: float
The averaged training loss after the current batch.
"""
segment_sizes, features, gt_results = (
data[0].to(self.device),
data[1].to(self.device),
data[2].to(self.device),
)
self.optimizer.zero_grad()
pred_results = self.state.model(segment_sizes, features)
loss = lambda_rank_loss(pred_results, gt_results)
loss.backward()
torch.nn.utils.clip_grad_norm_(self.state.model.parameters(), self.grad_clip_norm)
self.optimizer.step()
loss = loss.detach().cpu()
train_loss = (
train_loss * 0.95 + loss.item() * 0.05 if train_loss is not None else loss.item()
)
segment_sizes, features, gt_results, pred_results = (
segment_sizes.detach().cpu(),
features.detach().cpu(),
gt_results.detach().cpu(),
pred_results.detach().cpu(),
)
if batch % self.train_verbose == 0:
logger.info("Batch: %d, train loss: %6f", batch, train_loss)
return train_loss
def predict_step(
self,
data: Tuple["torch.Tensor", "torch.Tensor", "torch.Tensor"],
):
"""Helper function for predicting (validating) on a single batch.
Parameters
----------
data: Tuple["torch.Tensor", "torch.Tensor", "torch.Tensor"]
A batch of data, should be a tuple of (segment_sizes, features, gt_results).
gt_results can be None if it is used for predicting.
Returns
-------
pred_results: np.ndarray
The predicted results for the current batch.
test_loss_batch: float
If used for validation, return the test loss for the current batch.
test_scores_batch: List[float]
If used for validation, return the topk scores for the current batch.
"""
test_loss_batch, test_scores_batch = None, []
segment_sizes, features = (
data[0].to(self.device),
data[1].to(self.device),
)
gt_results = data[2]
pred_results = self.state.model(segment_sizes, features)
segment_sizes, features, pred_results = (
segment_sizes.detach().cpu(),
features.detach().cpu(),
pred_results.detach().cpu(),
)
if gt_results is not None:
test_loss_batch = lambda_rank_loss(pred_results, gt_results).item()
for k in [1, 5, 10]:
test_scores_batch.append(topk_score(pred_results, gt_results, k))
return pred_results.numpy(), test_loss_batch, test_scores_batch
def train_full(self): # pylint: disable=too-many-locals
"""Training on the full dataset."""
# split into training and testing set
keys = list(self.state.data.keys())
test_keys = random.sample(keys, k=math.floor(len(keys) * self.test_split))
train_data = OrderedDict()
test_data = OrderedDict()
for key in keys:
if key in test_keys:
test_data[key] = self.state.data[key]
else:
train_data[key] = self.state.data[key]
train_features = list(
itertools_chain.from_iterable([g.features for g in train_data.values()])
)
test_features = list(
itertools_chain.from_iterable([g.features for g in test_data.values()])
)
train_results = np.concatenate([g.min_cost / g.costs for g in train_data.values()])
test_results = np.concatenate([g.min_cost / g.costs for g in test_data.values()])
train_loader = SegmentDataLoader(
train_features, train_results, batch_size=self.batch_size, shuffle=True
)
test_loader = SegmentDataLoader(
test_features, test_results, batch_size=self.batch_size, shuffle=False
)
self.optimizer = torch.optim.Adam(
self.state.model.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay
)
self.scheduler = torch.optim.lr_scheduler.StepLR(
self.optimizer,
step_size=self.num_epoch_full // 10,
gamma=0.8,
verbose=True,
)
self.state.model = self.state.model.to(self.device)
min_test_loss = 1e10
logger.info("Training size: %d; Testing size: %d", len(train_loader), len(test_loader))
model_cache_path = tempfile.NamedTemporaryFile().name # pylint: disable=consider-using-with
for epoch in range(self.num_epoch_full):
logger.info("Epoch: %d", epoch)
# training
self.state.model.train()
train_loss = None
for batch, data in enumerate(train_loader):
train_loss = self.train_step(data, batch, train_loss)
self.scheduler.step()
# testing
if epoch % self.test_interval == 0:
self.state.model.eval()
test_losses, test_scores = [], []
for data in test_loader:
_, test_loss_batch, test_scores_batch = self.predict_step(data)
test_losses.append(test_loss_batch)
test_scores.append(test_scores_batch)
test_loss = (
np.array(test_losses[:-1]).mean() if len(test_losses) > 1 else test_losses[0]
)
logger.info(
"Average test loss: %6f, top1 score: %5f, top5 score: %5f, top10 score: %5f",
test_loss,
np.array(test_scores)[:, 0].mean(),
np.array(test_scores)[:, 1].mean(),
np.array(test_scores)[:, 2].mean(),
)
if test_loss < min_test_loss:
min_test_loss = test_loss
torch.save(self.state.model.state_dict(), model_cache_path)
self.state.model.to("cpu").load_state_dict(torch.load(model_cache_path))
self.state.untrained_size = 0
def train_incremental(
self,
features: List[np.ndarray],
results: np.ndarray,
):
"""Training on incremental data.
Parameters
----------
features: List[np.ndarray]
The extracted features.
results: np.ndarray
The measured results.
"""
results = np.min(results) / results
loader = SegmentDataLoader(features, results, batch_size=self.batch_size, shuffle=True)
self.optimizer = torch.optim.Adam(
self.state.model.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay
)
self.state.model = self.state.model.to(self.device)
logger.info("Incremental training size: %d", len(loader))
for epoch in range(self.num_epoch_incremental):
logger.info("Epoch: %d", epoch)
self.state.model.train()
loss = None
for batch, data in enumerate(loader):
loss = self.train_step(data, batch, loss)
self.state.model.to("cpu")
self.state.untrained_size = max(0, self.state.untrained_size - len(loader))
def predict_incremental(
self,
features: List[np.ndarray],
results: Optional[np.ndarray] = None,
) -> np.ndarray:
"""Predicting (validating) on incremental data.
Parameters
----------
features: List[np.ndarray]
The extracted features.
results: Optional[np.ndarray]
The measured results, can be None if used for predicting.
Returns
-------
pred_results: np.ndarray
The predicted results.
"""
if results is not None:
results = np.min(results) / results
loader = SegmentDataLoader(features, results, batch_size=self.batch_size, shuffle=False)
self.state.model = self.state.model.to(self.device).eval()
logger.info("Incremental testing size: %d", len(loader))
pred_results, losses, scores = [], [], []
for data in loader:
pred_results_batch, losses_batch, scores_batch = self.predict_step(data)
pred_results.append(pred_results_batch)
losses.append(losses_batch)
scores.append(scores_batch)
pred_results = np.concatenate(pred_results)
if results is not None:
losses = np.array(losses[:-1]).mean() if len(losses) > 1 else losses[0]
logger.info(
"Average test loss: %6f, top1 score: %5f, top5 score: %5f, top10 score: %5f",
losses,
np.array(scores)[:, 0].mean(),
np.array(scores)[:, 1].mean(),
np.array(scores)[:, 2].mean(),
)
return pred_results
def update(
self,
features: List[np.ndarray],
costs: np.ndarray,
group_hash: str,
):
"""Update the dataset and re-train the model if not frozen.
Parameters
----------
features: List[np.ndarray]
The extracted features.
costs: np.ndarray
The measured results.
group_hash: str
The hash of the group.
"""
self.state.add_to_group(features, costs, group_hash)
if not self.frozen:
self.predict_incremental(features, costs)
if self.state.untrained_size / self.state.data_size > 0.2:
self.train_full()
else:
self.train_incremental(features, costs)
@derived_object
class MLPModel(PyCostModel):
"""Segment Sum MLP Model
Parameters
----------
trainer: SegmentSumMLPTrainer
The trainer for the model, handling the training interface.
"""
trainer: SegmentSumMLPTrainer
def __init__(
self,
*,
trainer: Optional[SegmentSumMLPTrainer] = None,
):
super().__init__()
self.trainer = trainer or SegmentSumMLPTrainer()
def load(self, path: str) -> None:
"""Load the cost model, cached data or raw data from given file location.
Parameters
----------
path : str
The file path.
"""
self.trainer.state.load(path)
def save(self, path: str) -> None:
"""Save the cost model and data to given file location.
Parameters
----------
path : str
The file path.
"""
self.trainer.state.save(path)
def update(
self,
context: TuneContext,
candidates: List[MeasureCandidate],
results: List[RunnerResult],
) -> None:
"""Update the dataset, re-train the cost model if not frozen.
Parameters
----------
context : TuneContext,
The tuning context.
candidates : List[MeasureCandidate]
The measure candidates.
results : List[RunnerResult]
The running results of the measure candidates.
"""
features, mean_costs = extract_features(
context, candidates, results, self.trainer.state.extractor
)
self.trainer.update(features, mean_costs, shash2hex(context.mod))
def predict(self, context: TuneContext, candidates: List[MeasureCandidate]) -> np.ndarray:
"""Predict given the measure candidates.
Parameters
----------
context : TuneContext,
The tuning context.
candidates : List[MeasureCandidate]
The measure candidates.
Return
------
result : np.ndarray
The predicted normalized score.
"""
features, _ = extract_features(context, candidates, None, self.trainer.state.extractor)
pred_results = self.trainer.predict_incremental(features)
return pred_results
| 34,492 | 32.949803 | 100 | py |
tvm | tvm-main/python/tvm/meta_schedule/cost_model/xgb_model.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""XGBoost-based cost model"""
import os
import tempfile
from collections import OrderedDict
from itertools import chain as itertools_chain
from typing import TYPE_CHECKING, Any, Callable, Dict, List, NamedTuple, Optional, Tuple
import numpy as np # type: ignore
from ...contrib.tar import tar, untar
from ...runtime import NDArray
from ..cost_model import PyCostModel
from ..feature_extractor import FeatureExtractor
from ..logging import get_logger
from ..runner import RunnerResult
from ..search_strategy import MeasureCandidate
from ..utils import cpu_count, derived_object, shash2hex
from .metric import max_curve
if TYPE_CHECKING:
import xgboost as xgb # type: ignore
from xgboost.callback import TrainingCallback # type: ignore
from ..tune_context import TuneContext
logger = get_logger(__name__) # pylint: disable=invalid-name
def make_metric_sorter(focused_metric):
"""Make sure the focused metric is the first one."""
def metric_name_for_sort(name):
if focused_metric == name:
return "!" + name
return name
def sort_key(key):
key, _ = key
return metric_name_for_sort(key)
return sort_key
class PackSum:
"""The pack-sum format
Parameters
----------
dmatrix : xgb.DMatrix
A float64 array of shape [n, m],
where `n` is the packed number of blocks,
and `m` is the length of feature vector on each block
ids : np.ndarray
An int64 array of shape [n] containing nonnegative integers,
indicating which the index of a sample that a block belongs to
"""
dmatrix: "xgb.DMatrix" # type: ignore # pylint: disable=invalid-name
ids: np.ndarray
def __init__(
self,
xs: List[np.ndarray], # pylint: disable=invalid-name
ys: Optional[np.ndarray], # pylint: disable=invalid-name
):
"""Create PackSum format given a batch of samples
Parameters
----------
xs : List[np.ndarray]
A batch of input samples
ys : Optional[List[float]]
A batch of labels. None means no labels available.
"""
import xgboost as xgb # type: ignore # pylint: disable=import-outside-toplevel
repeats = [x.shape[0] for x in xs]
xs = np.concatenate(xs, axis=0)
self.ids = np.concatenate([[i] * repeat for i, repeat in enumerate(repeats)], axis=0)
if ys is None:
self.dmatrix = xgb.DMatrix(data=xs, label=None)
else:
ys = np.concatenate([[y] * repeat for y, repeat in zip(ys, repeats)], axis=0)
self.dmatrix = xgb.DMatrix(data=xs, label=ys)
self.dmatrix.set_weight(ys)
def predict_with_score(self, pred: np.ndarray) -> np.ndarray:
"""Predict the labels given the block level prediction scores.
Parameters
----------
pred : np.ndarray
The block level predictions
Returns
-------
result : np.ndarray
The predictions for each candidate.
"""
return np.bincount(self.ids, weights=pred)
def obj_square_error(self, ys_pred: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Implement square error loss on pack-sum format as
a custom objective function for xgboost.
Parameters
----------
ys_pred: np.ndarray
The predictions
Returns
-------
gradient: np.ndarray
The gradient according to the xgboost format
hessian: np.ndarray
The hessian according to the xgboost format
"""
# Making prediction
ys_pred = self.predict_with_score(ys_pred)
# Propagate prediction to each block
ys_pred = ys_pred[self.ids] # pylint: disable=invalid-sequence-index
# The gradient and hessian
ys = self.dmatrix.get_label() # type: ignore # pylint: disable=invalid-name
gradient = ys_pred - ys
hessian = np.ones_like(gradient)
return gradient * ys, hessian * ys
def rmse(self, ys_pred: np.ndarray) -> Tuple[str, float]:
"""Evaluate RMSE (rooted mean square error) in the pack-sum format
Parameters
----------
ys_pred: np.ndarray
The raw predictions
Returns
-------
name: str
The name of the metric
score: float
The score of the metric
"""
# Making prediction
ys_pred = self.predict_with_score(ys_pred)
# Propagate prediction to each block
ys_pred = ys_pred[self.ids] # pylint: disable=invalid-sequence-index
# The RMSE
ys = self.dmatrix.get_label() # type: ignore # pylint: disable=invalid-name
square_error = np.square(ys_pred - ys)
rmse = np.sqrt(square_error.mean())
return "p-rmse", rmse
def average_peak_score(
self,
ys_pred: np.ndarray,
n: int,
) -> Tuple[str, float]:
"""Evaluate average-peak-score@N in the pack-sum format
Parameters
----------
ys_pred: np.ndarray
The raw prediction
n : int
The N in average-peak-score@N
Returns
-------
name: str
The name of the metric
score: float
The score of the metric
"""
ys = self.dmatrix.get_label() # type: ignore # pylint: disable=invalid-name
ys = self.predict_with_score(ys) # type: ignore # pylint: disable=invalid-name
ys = ys / np.unique(self.ids, return_counts=True)[1] # type: ignore # pylint: disable=invalid-name
ys_pred = self.predict_with_score(ys_pred)
trials = np.argsort(ys_pred)[::-1][:n]
trial_scores = ys[trials]
curve = max_curve(trial_scores) / np.max(ys)
score = np.mean(curve)
return f"a-peak@{n}", score
class XGBConfig(NamedTuple):
"""XGBoost model configuration
Parameters
----------
max_depth : int
The maximum depth.
gamma : float
The gamma.
min_child_weight : float
The minimum child weight.
eta : float
The eta, learning rate.
seed : int
The random seed.
nthread : Optional[int],
The number of threads to use.
Default is None, which means to use physical number of cores.
"""
max_depth: int = 10
gamma: float = 0.001
min_child_weight: float = 0
eta: float = 0.2
seed: int = 43
nthread: Optional[int] = None
def to_dict(self):
return {
"max_depth": self.max_depth,
"gamma": self.gamma,
"min_child_weight": self.min_child_weight,
"eta": self.eta,
"seed": self.seed,
"nthread": self.nthread,
}
class FeatureGroup:
"""Feature group
Parameters
----------
group_hash : str
The hash of the group
features : List[np.ndarray]
The features
costs : List[float]
The costs
min_cost : float
The minimum cost
"""
group_hash: str
features: List[np.ndarray]
costs: np.ndarray
min_cost: float
def __init__(
self,
group_hash: str,
features: List[np.ndarray],
costs: np.ndarray,
) -> None:
self.group_hash = group_hash
self.features = features
self.costs = costs
self.min_cost = np.min(costs)
def append(
self,
features: List[np.ndarray],
costs: np.ndarray,
) -> None:
self.features.extend(features)
self.costs = np.append(self.costs, costs)
self.min_cost = np.min(self.costs)
@derived_object
class XGBModel(PyCostModel):
"""XGBoost model
Parameters
----------
extractor : FeatureExtractor
The feature extractor for the model.
config : XGBConfig
The XGBoost model config.
num_warmup_samples : int
The number of samples that are used for warmup, i.e., the first few samples are predicted
with random results.
early_stopping_rounds : int
The number of rounds for early stopping.
verbose_eval : int
The verbose level when doing evaluation.
average_peak_n : int
The number to calculate average peak score.
adaptive_training : bool
Whether use adaptive training to reduce tuning time.
"""
# feature extractor
extractor: FeatureExtractor
# xgboost model config
config: XGBConfig
# behavior of randomness
num_warmup_samples: int
# evaluation
early_stopping_rounds: int
verbose_eval: int
average_peak_n: int
# states
data: Dict[str, FeatureGroup]
data_size: int
booster: Optional["xgb.Booster"]
# adaptive training
adaptive_training: bool
last_train_size: int
def __init__(
self,
*,
# feature extractor
extractor: FeatureExtractor.FeatureExtractorType = "per-store-feature",
# xgboost model config
config: XGBConfig = XGBConfig(),
# random result before enough samples
num_warmup_samples: int = 100,
# evaluation
early_stopping_rounds: int = 50,
verbose_eval: int = 25,
average_peak_n: int = 32,
adaptive_training: bool = True,
num_tuning_cores: Optional[int] = None,
):
super().__init__()
if not isinstance(extractor, FeatureExtractor):
extractor = FeatureExtractor.create(extractor)
# feature extractor
self.extractor = extractor
# model-related
if config.nthread is None:
# use physical core number
if num_tuning_cores is None:
config = config._replace(nthread=cpu_count(logical=False))
else:
config = config._replace(nthread=num_tuning_cores)
self.config = config
# behavior of randomness
self.num_warmup_samples = num_warmup_samples
# evaluation
self.early_stopping_rounds = early_stopping_rounds
self.verbose_eval = verbose_eval
self.average_peak_n = average_peak_n
# states
self.data = OrderedDict()
self.data_size = 0
self.booster = None
# adaptive training
self.adaptive_training = adaptive_training
self.last_train_size = 0
def load(self, path: str) -> None:
"""Load the cost model from given file location.
Parameters
----------
path : str
The file path.
Note
----
Since XGBoost model trains from scratch, each time this method loads the model together with
previously cached feature vectors and results, so that the subsequent training process could
use all the existing data being stored on disk.
"""
import xgboost as xgb # pylint: disable=import-outside-toplevel
with tempfile.TemporaryDirectory() as tmp_dir:
model_path = os.path.join(tmp_dir, "model.bin")
data_path = os.path.join(tmp_dir, "data.npy")
# Step 1. Untar
untar(path, tmp_dir)
# Step 2. Load data
data = OrderedDict()
data_size = 0
for group_hash, features, costs in np.load(data_path, allow_pickle=True):
data[group_hash] = FeatureGroup(
group_hash=group_hash,
features=list(features),
costs=costs,
)
data_size += len(costs)
# Step 3. Load the model
if os.path.exists(model_path):
booster = xgb.Booster()
booster.load_model(model_path)
else:
self.booster = None
self.data = data
self.data_size = data_size
self.booster = booster
def save(self, path: str) -> None:
"""Save the cost model to given file location.
Parameters
----------
path : str
The file path.
Note
----
Since XGBoost model trains from scratch, each time this method saves the model together with
previously cached feature vectors and results, so that the subsequent training process could
use all the existing data being stored on disk.
"""
with tempfile.TemporaryDirectory() as tmp_dir:
model_path = os.path.join(tmp_dir, "model.bin")
data_path = os.path.join(tmp_dir, "data.npy")
# Step 1. Save the model
booster = self.booster
if booster is not None:
booster.save_model(model_path)
else:
model_path = None
# Step 2. Save data
data = [
(
g.group_hash,
g.features,
g.costs,
)
for g in self.data.values()
]
np.save(
file=data_path,
arr=np.array(data, dtype=object),
)
# Step 3. Tar it
tar(path, [x for x in [model_path, data_path] if x is not None])
logger.info("Saved XGBModel to %s", path)
def update(
self,
context: "TuneContext",
candidates: List[MeasureCandidate],
results: List[RunnerResult],
) -> None:
"""Update the cost model given running results.
Parameters
----------
context : TuneContext
The tuning context.
candidates : List[MeasureCandidate]
The measure candidates.
results : List[RunnerResult]
The running results of the measure candidates.
"""
assert len(candidates) == len(results)
if len(candidates) == 0:
return
# Step 1. Get the feature group
new_group_hash = shash2hex(context.mod)
group = self.data.get(new_group_hash, None)
# Step 2. Extract features
def _feature(x: NDArray) -> np.ndarray:
return x.numpy().astype("float32")
def _mean_cost(x: RunnerResult) -> float:
if not x.run_secs:
return 1e10
return float(np.median([float(s) for s in x.run_secs]))
new_features = [_feature(x) for x in self.extractor.extract_from(context, candidates)]
new_mean_costs = [_mean_cost(x) for x in results]
# Filter instances with no features
new_mean_costs = [c for i, c in enumerate(new_mean_costs) if len(new_features[i]) != 0]
new_mean_costs_np = np.array(new_mean_costs).astype("float32")
new_features = [f for f in new_features if len(f) != 0]
if not new_features:
return
# Steps 3. Run validation
if group is not None and self.booster is not None:
logger.debug(
"XGB validation: %s",
"\t".join(
f"{key}: {score:.6f}"
for key, score in self._validate(
xs=new_features,
ys=group.min_cost / new_mean_costs_np,
)
),
)
# Step 4. Add the features into the data points
if group is None:
group = FeatureGroup(
group_hash=new_group_hash,
features=new_features,
costs=new_mean_costs_np,
)
else:
group.append(new_features, new_mean_costs_np)
self.data[new_group_hash] = group
self.data_size += len(new_features)
if (
self.adaptive_training
and self.data_size - self.last_train_size < self.last_train_size / 5
):
# Set a training threshold related to `last_train_size` to reduce the training
# overhead when there're too many results
return
self.last_train_size = self.data_size
# Step 5. Re-train the model
self._train(
xs=list(itertools_chain.from_iterable([g.features for g in self.data.values()])),
ys=np.concatenate(
[g.min_cost / g.costs for g in self.data.values()],
axis=0,
),
)
def predict(
self,
context: "TuneContext",
candidates: List[MeasureCandidate],
) -> np.ndarray:
"""Predict the normalized score using the cost model.
Parameters
----------
context : TuneContext
The tuning context.
candidates : List[MeasureCandidate]
The measure candidates.
Return
------
result : np.ndarray
The predicted normalized score.
"""
if self.data_size >= self.num_warmup_samples and self.booster is not None:
ret = self._predict(
xs=[
x.numpy().astype("float32")
for x in self.extractor.extract_from(
context,
candidates,
)
]
)
else:
ret = np.random.uniform(
low=0,
high=1,
size=(len(candidates),),
)
return ret.astype("float64")
def _train( # type: ignore # pylint: disable=invalid-name
self,
xs: List[np.ndarray],
ys: np.ndarray,
) -> None:
import xgboost as xgb # type: ignore # pylint: disable=import-outside-toplevel
self.d_train = PackSum(xs=xs, ys=ys)
def obj(ys_pred: np.ndarray, d_train: "xgb.DMatrix"): # type: ignore # pylint: disable = unused-argument
return self.d_train.obj_square_error(ys_pred)
def rmse(ys_pred: np.ndarray, d_train: "xgb.DMatrix"): # type: ignore # pylint: disable = unused-argument
return self.d_train.rmse(ys_pred)
def avg_peak_score(ys_pred: np.ndarray, d_train: "xgb.DMatrix"): # type: ignore # pylint: disable = unused-argument
return self.d_train.average_peak_score(ys_pred, self.average_peak_n)
self.booster = xgb.train(
self.config.to_dict(),
self.d_train.dmatrix,
num_boost_round=10000,
obj=obj,
callbacks=[
_get_custom_call_back(
early_stopping_rounds=self.early_stopping_rounds,
verbose_eval=self.verbose_eval,
fevals=[rmse, avg_peak_score],
evals=[(self.d_train.dmatrix, "tr")],
cvfolds=None,
)
],
)
del self.d_train
def _predict( # type: ignore # pylint: disable=invalid-name
self,
xs: List[np.ndarray],
) -> np.ndarray:
d_test = PackSum(xs=xs, ys=None)
pred = self.booster.predict(d_test.dmatrix)
ret = d_test.predict_with_score(pred)
return ret
def _validate( # type: ignore # pylint: disable=invalid-name
self,
xs: List[np.ndarray],
ys: np.ndarray,
) -> List[Tuple[str, float]]:
"""Evaluate the score of inputs.
Parameters
----------
xs : List[np.ndarray]
A batch of input samples
ys : List[float]
A batch of labels
Returns
-------
scores: np.ndarray
The predicted result for all inputs.
"""
assert self.booster is not None
d_valid = PackSum(xs=xs, ys=ys)
def average_peak_score(ys_pred: np.ndarray):
return d_valid.average_peak_score(ys_pred, n=self.average_peak_n)
ys_pred = self.booster.predict(d_valid.dmatrix)
eval_result: List[Tuple[str, float]] = [
feval(ys_pred)
for feval in (
average_peak_score,
d_valid.rmse,
)
]
eval_result.sort(key=make_metric_sorter("p-rmse"))
return eval_result
def _get_custom_call_back(
early_stopping_rounds: int,
verbose_eval: int,
fevals: List[Callable],
evals: List[Tuple["xgb.DMatrix", str]],
focused_metric: str = "tr-p-rmse",
cvfolds: List["xgb.training.CVPack"] = None,
) -> "TrainingCallback":
"""Get a customized callback function for XGBoost. Work around xgboost import."""
def optional_xgboost_callback(cls):
"""Decorator for importing TrainingCallback from xgboost"""
# pylint:disable = import-outside-toplevel
try:
from xgboost.callback import TrainingCallback # type: ignore
# pylint:enable = import-outside-toplevel
except ImportError:
class TrainingCallback: # type: ignore
pass
class OptXGBoostCustomCallback(cls, TrainingCallback): # type: ignore
pass
return OptXGBoostCustomCallback
@optional_xgboost_callback
class XGBoostCustomCallback:
"""Custom callback class for xgboost to support multiple custom evaluation functions"""
def __init__(
self,
early_stopping_rounds: int,
verbose_eval: int,
fevals: List[Callable],
evals: List[Tuple["xgb.DMatrix", str]],
focused_metric: str = "tr-p-rmse",
cvfolds: List["xgb.training.CVPack"] = None,
):
self.early_stopping_rounds = early_stopping_rounds
self.verbose_eval = verbose_eval
self.fevals = fevals
self.evals = evals
self.state: Dict[str, Any] = {}
self.focused_metric = focused_metric
self.sort_key = make_metric_sorter(focused_metric=focused_metric)
self.cvfolds = cvfolds
if cvfolds is not None:
self.aggregated_cv = None
def __call__(self, env: "xgb.core.CallbackEnv"):
# Compatibility with xgboost < 1.3
return self.after_iteration(env.model, env.iteration, env.evaluation_result_list)
def init(self, model: "xgb.Booster"):
"""Internal function for initialization"""
booster: "xgb.Booster" = model
self.state["best_iteration"] = 0
self.state["best_score"] = float("inf")
if booster is None:
assert self.cvfolds is not None
return
if booster.attr("best_score") is not None:
self.state["best_score"] = float(booster.attr("best_score"))
self.state["best_iteration"] = int(booster.attr("best_iteration"))
self.state["best_msg"] = booster.attr("best_msg")
else:
booster.set_attr(best_iteration=str(self.state["best_iteration"]))
booster.set_attr(best_score=str(self.state["best_score"]))
def after_iteration(
self, model: "xgb.Booster", epoch: int, evals_log: Dict
): # pylint: disable = unused-argument
"""Internal function for after_iteration"""
# pylint:disable = import-outside-toplevel
try:
from xgboost.callback import _fmt_metric # type: ignore
except ImportError:
# Compatibility with xgboost >= 1.6
def _fmt_metric(value, show_stdv=True):
if len(value) == 2:
return f"{value[0]}:{value[1]:.5f}"
if len(value) == 3:
if show_stdv:
return f"{value[0]}:{value[1]:.5f}+{value[2]:.5f}"
return f"{value[0]}:{value[1]:.5f}"
raise ValueError("wrong metric value", value)
import xgboost as xgb
from xgboost import rabit # type: ignore
try:
from xgboost.training import aggcv # type: ignore
except ImportError:
from xgboost.callback import _aggcv as aggcv # type: ignore
# pylint:enable = import-outside-toplevel
if not self.state:
self.init(model)
booster: xgb.Booster = model
iteration: int = epoch
cvfolds: List[xgb.training.CVPack] = self.cvfolds
##### Evaluation #####
# `eval_result` is a list of (key, score)
eval_result: List[Tuple[str, float]] = []
if cvfolds is None:
eval_result = list(
itertools_chain.from_iterable(
[
(key, float(value))
for key, value in map(
lambda x: x.split(":"),
booster.eval_set(
evals=self.evals,
iteration=iteration,
feval=feval,
).split()[1:],
)
]
for feval in self.fevals
)
)
else:
eval_result = list(
itertools_chain.from_iterable(
[
(key, score)
for key, score, _std in aggcv(
fold.eval(
iteration=iteration,
feval=feval,
)
for fold in cvfolds
)
]
for feval in self.fevals
)
)
eval_result = list(eval_result)
eval_result.sort(key=self.sort_key)
##### Print eval result #####
if self.verbose_eval and iteration % self.verbose_eval == 0:
info = []
for key, score in eval_result:
if "null" not in key:
info.append(f"{key}: {score:.6f}")
logger.debug("XGB iter %3d: %s", iteration, "\t".join(info))
##### Choose score and do early stopping #####
score = None
for key, _score in eval_result:
if key == self.focused_metric:
score = _score
break
assert score is not None
best_score = self.state["best_score"]
best_iteration = self.state["best_iteration"]
if score < best_score:
tab = "\t" # to work with f-string
msg = f"[{epoch}] {tab.join([_fmt_metric(x) for x in eval_result])}"
self.state["best_msg"] = msg
self.state["best_score"] = score
self.state["best_iteration"] = epoch
# save the property to attributes, so they will occur in checkpoint.
if model is not None:
model.set_attr(
best_score=str(self.state["best_score"]),
best_iteration=str(self.state["best_iteration"]),
best_msg=self.state["best_msg"],
)
elif epoch - best_iteration >= self.early_stopping_rounds:
best_msg = self.state["best_msg"]
if self.verbose_eval and rabit.get_rank() == 0:
logger.debug("XGB stopped. Best iteration: %s ", best_msg)
# instead of raising EarlyStopException, returning True to end the training
return True
# False to indicate training should not stop.
return False
return XGBoostCustomCallback(
early_stopping_rounds=early_stopping_rounds,
verbose_eval=verbose_eval,
fevals=fevals,
evals=evals,
focused_metric=focused_metric,
cvfolds=cvfolds,
)
| 28,769 | 33.047337 | 124 | py |
tvm | tvm-main/python/tvm/auto_scheduler/cost_model/xgb_model.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Cost model based on xgboost"""
import multiprocessing
import logging
from typing import Dict
from collections import defaultdict
import numpy as np
from tvm.autotvm.tuner.metric import max_curve
from .cost_model import PythonBasedModel
from ..feature import get_per_store_features_from_measure_pairs, get_per_store_features_from_states
from ..measure_record import RecordReader
try:
from xgboost.callback import TrainingCallback # type: ignore
except ImportError:
class TrainingCallback: # type: ignore
pass
xgb = None
logger = logging.getLogger("auto_scheduler")
class XGBDMatrixContext:
"""A global context to hold additional attributes of xgb.DMatrix"""
def __init__(self):
self.context_dict = defaultdict(dict)
def get(self, key, matrix, default=None):
"""
Get an attribute of a xgb.DMatrix
Parameters
----------
key: str
The name of the attribute
matrix: xgb.DMatrix
The matrix
default: Optional[Any]
The default value if the item does not exist
"""
return self.context_dict[key].get(matrix.handle.value, default)
def set(self, key, matrix, value):
"""
Set an attribute for a xgb.DMatrix
Parameters
----------
key: str
The name of the attribute
matrix: xgb.DMatrix
The matrix
value: Optional[Any]
The new value
"""
self.context_dict[key][matrix.handle.value] = value
dmatrix_context = XGBDMatrixContext()
class XGBModel(PythonBasedModel):
"""Train a XGBoost model to predict the normalized throughputs of programs.
Let the normalized throughput be the score of a program (higher is better). We predict
the (approximate) score of a program = the sum of the scores of all stages in this program.
i.e. score(P) = score_s0 + score_s1 + ... + score_sn,
where score_si is the score of Stage i in Program P.
We extract feature for each stage and let the xgboost predict the score for each stage.
We then sum up the predictions as the score of the whole program.
We use RMSE as the loss function. i.e. loss(P, y) = 1/2 * (score(P) - y)^2,
where P is the program and y is the normalized throughput according to
the ground truth (measurement).
XGBoost does not support this loss function because `score(P)` is a sum of the prediction
of several samples, so we implemented a custom loss function and call it pack-sum-rmse.
It is called "pack-sum" because we combine several samples into a "pack" and sum up
their predictions.
Parameters
----------
verbose_eval: int = 25
Print training log every `verbose_eval` iterations.
num_warmup_sample: int = 100
The minimum number of samples to start to use the trained model.
If the number of samples is less than this number, the model outputs random predictions.
seed: Optional[int]
The random seed
model_file: Optional[str]
If is not None, save model to this file after every update.
adaptive_training: bool = False
Whether to use adaptive training, which reduces the training frequency when there are
too many logs.
"""
def __init__(
self,
verbose_eval=25,
num_warmup_sample=100,
seed=None,
model_file=None,
adaptive_training=False,
):
global xgb
try:
if xgb is None:
xgb = __import__("xgboost")
except ImportError:
# add "from Node" to silence
# "During handling of the above exception, another exception occurred"
raise ImportError(
"XGBoost is required for XGBModel. "
"Please install its python package first. "
"Help: (https://xgboost.readthedocs.io/en/latest/) "
) from None
self.xgb_params = {
"max_depth": 10,
"gamma": 0.001,
"min_child_weight": 0,
"eta": 0.2,
# todo(merrymercy): automatically decrease learning rate when the loss is too large
"n_gpus": 0,
"nthread": multiprocessing.cpu_count() // 2,
"verbosity": 0,
"seed": seed or 43,
"disable_default_eval_metric": 1,
}
self.bst = None
self.plan_size = 32
self.num_warmup_sample = num_warmup_sample
self.verbose_eval = verbose_eval
self.model_file = model_file
self.adaptive_training = adaptive_training
super().__init__()
# cache measurement input/result pairs and extracted features
self.inputs = []
self.results = []
self.last_train_length = 0
self.inputs_feature_cache = []
def update(self, inputs, results):
"""Update the cost model according to new measurement results (training data).
XGBoost does not support incremental training, so we re-train a new model every time.
Parameters
----------
inputs : List[MeasureInput]
The measurement inputs
results : List[MeasureResult]
The measurement results
"""
if len(inputs) <= 0:
return
assert len(inputs) == len(results)
self.inputs.extend(inputs)
self.results.extend(results)
if (
self.adaptive_training
and len(self.inputs) - self.last_train_length < self.last_train_length / 5
):
# Set a training threshold related to `last_train_length` to reduce the training
# overhead when there're too many logs
return
self.last_train_length = len(self.inputs)
# extract feature
n_cached = len(self.inputs_feature_cache)
features, normalized_throughputs, task_ids = get_per_store_features_from_measure_pairs(
self.inputs, self.results, skip_first_n_feature_extraction=n_cached
)
if n_cached > 0:
features = list(features)
features[:n_cached] = self.inputs_feature_cache
features = np.array(features, dtype=object)
self.inputs_feature_cache = features
dtrain = pack_sum_xgbmatrix(
features, normalized_throughputs, task_ids, normalized_throughputs
)
# train xgb model
self.bst = xgb.train(
self.xgb_params,
dtrain,
num_boost_round=10000,
obj=pack_sum_square_error,
callbacks=[
CustomCallback(
stopping_rounds=50,
metric="tr-p-rmse",
fevals=[pack_sum_rmse, pack_sum_average_peak_score(self.plan_size)],
evals=[(dtrain, "tr")],
maximize=False,
verbose_eval=self.verbose_eval,
)
],
)
# Update the model file if it has been set
if self.model_file:
self.save(self.model_file)
def predict(self, task, states):
"""Predict the scores of states
Parameters
----------
search_task : SearchTask
The search task of states
statse : List[State]
The input states
Returns
-------
scores: List[float]
The predicted scores for all states
"""
features = get_per_store_features_from_states(states, task)
if self.bst is not None and len(self.inputs) > self.num_warmup_sample:
dtest, pack_ids = feature_to_pack_sum_xgbmatrix(features)
raw_preds = self.bst.predict(dtest)
ret = predict_throughput_pack_sum(raw_preds, pack_ids)
else:
ret = np.random.uniform(0, 1, (len(states),))
# Predict -inf for invalid states that failed to be lowered.
for idx, feature in enumerate(features):
if feature.min() == feature.max() == 0:
ret[idx] = float("-inf")
return ret
def predict_stages(self, task, states):
"""Predict the scores of all stages in states. This is the breakdown version of `predict`.
Parameters
----------
search_task : SearchTask
The search task of states
statse : List[State]
The input states
Returns
-------
scores: List[float]
The predicted scores for all stages in all states in the packed format
Note
----
For faster data copy between c++ and python, the python part returns scores in a
single flatten array using a packed format. The c++ part then unpacks the flatten array.
The packed format is:
{
float scores[N]; // scores[i] is the score for states[i].
int n_stage_0; // the number of stages in states[0]
float stage_scores_0[[n_stage_0] // the scores for all stages in states[0]
int n_stage_1; // the number of stages in states[1]
float stage_scores_1[n_stage_1]; // the scores for all stages in states[1]
...
int n_stage_i; // the number of stages in states[i]
float stage_scores_1[n_stage_i]; // the scores for all stages in states[i]
... // untill i == N - 1
}
To implement this format, we also store int as float, so we can store all numbers
into a single float array.
"""
features = get_per_store_features_from_states(states, task)
if self.bst is not None and len(self.inputs) > self.num_warmup_sample:
dtest, pack_ids = feature_to_pack_sum_xgbmatrix(features)
raw_preds = self.bst.predict(dtest)
breakdown = predict_throughput_pack_sum(raw_preds, pack_ids)
stage_scores = [[] for _ in range(len(states))]
for pred, pack_id in zip(raw_preds, pack_ids):
stage_scores[pack_id].append(pred)
for idx, stage_score in enumerate(stage_scores):
breakdown = np.append(breakdown, len(stage_score))
breakdown = np.concatenate((breakdown, np.array(stage_score)))
else:
breakdown = np.concatenate(
(np.random.uniform(0, 1, (len(states),)), np.zeros(len(states)))
)
# Predict 0 for invalid states that failed to be lowered.
for idx, feature in enumerate(features):
if feature.min() == feature.max() == 0:
breakdown[idx] = float("-inf")
return breakdown
def update_from_file(self, file_name, n_lines=None):
"""Load measure records from a log file to update the cost model.
This function can be used to pre-train the cost model with history log files.
Parameters
----------
file_name: str
The filename
n_lines: Optional[int]
Only load first n lines of the log file
"""
inputs, results = RecordReader(file_name).read_lines(n_lines)
logger.info("XGBModel: Loaded %s measurement records from %s", len(inputs), file_name)
self.update(inputs, results)
def save(self, file_name: str):
"""Save the model to a file
Parameters
----------
file_name: str
The filename
"""
self.bst.save_model(file_name)
def load(self, file_name: str):
"""Load the model from a file
Parameters
----------
file_name: str
The filename
"""
if self.bst is None:
self.bst = xgb.Booster(self.xgb_params)
self.bst.load_model(file_name)
self.num_warmup_sample = -1
def feature_to_pack_sum_xgbmatrix(xs):
"""Convert an extracted multi-stage feature vector to a xgbmatrx in pack-sum format
Parameters
----------
xs: np.ndarray
The feature vector
Returns
-------
dmatrix: xgb.DMatrix
The DMatrix
pack_ids: List[int]
pack ids information
"""
x_flatten = []
pack_ids = []
for ct, x in enumerate(xs):
for row in x:
x_flatten.append(row)
pack_ids.append(ct)
return xgb.DMatrix(np.array(x_flatten)), pack_ids
def pack_sum_xgbmatrix(xs, ys, gids=None, weights=None):
"""Convert (feature, label) pairs into a xgb matrix with pack-sum format
Parameters
----------
xs: np.ndarray
The feature vector
ys: np.ndarray
The normaizlied throughput
gids: Optional[List[int]]
Group id (task id)
weights: Optional[np.ndarray]
The weight of samples
Returns
-------
dmatrix: xgb.DMatrix
The DMatrix with pack-sum information
"""
if gids is not None:
# sort by group
indices = gids.argsort()
xs, ys = xs[indices], ys[indices]
group_sizes = np.bincount(gids)
if weights is not None:
weights = weights[indices]
else:
# assume it has only one group
group_sizes = [len(xs)]
x_flatten = []
y_flatten = []
weights_flatten = []
pack_ids = []
if weights is not None:
for ct, (x, y, w) in enumerate(zip(xs, ys, weights)):
for row in x:
x_flatten.append(row)
y_flatten.append(y)
weights_flatten.append(w)
pack_ids.append(ct)
else:
for ct, (x, y) in enumerate(zip(xs, ys)):
for row in x:
x_flatten.append(row)
y_flatten.append(y)
pack_ids.append(ct)
ret = xgb.DMatrix(np.array(x_flatten), y_flatten)
if weights is not None:
ret.set_weight(weights_flatten)
dmatrix_context.set("pack_ids", ret, np.array(pack_ids))
dmatrix_context.set("group_sizes", ret, group_sizes)
return ret
def predict_throughput_pack_sum(raw_preds, pack_ids):
"""Predict the throughputs for predictions in pack-sum format
Parameters
----------
raw_preds: np.ndarray
The raw predictions
pack_ids: List[int]
The pack id for predictions
Returns
-------
throughputs: np.ndarray
The throughput
"""
sum_pred = np.bincount(pack_ids, weights=raw_preds)
return sum_pred
def pack_sum_square_error(preds, dtrain):
"""Implement square error loss on pack-sum format as
a custom objective function for xgboost.
Parameters
----------
preds: np.ndarray
The predicitons
dtrain: xgb.DMatrix
The training set
Returns
-------
gradient: np.ndarray
hessian: np.ndarray
gradient and hessian according to the xgboost format
"""
pack_ids = dmatrix_context.get("pack_ids", dtrain)
weight = dtrain.get_weight()
sum_pred = np.bincount(pack_ids, weights=preds)
x = sum_pred[pack_ids]
y = dtrain.get_label()
gradient = x - y
hessian = np.ones_like(gradient)
if len(weight) == 0:
return gradient, hessian
return gradient * weight, hessian * weight
def pack_sum_rmse(raw_preds, labels):
"""Evaluate RMSE (rooted mean square error) in the pack-sum format
Parameters
----------
raw_preds: np.ndarray
The raw prediction
labels: xgb.DMatrix
The groud-truth label matrix
Returns
-------
name: str
score: float
The name and score of this metric
"""
pack_ids = dmatrix_context.get("pack_ids", labels)
preds = predict_throughput_pack_sum(raw_preds, pack_ids)[pack_ids]
return "p-rmse", np.sqrt(np.mean(np.square((preds - labels.get_label()))))
def pack_sum_average_peak_score(N):
"""Return the evaluation function for average-peak-score@N
Parameters
----------
N: int
The "N" in "average-peak-score@N"
Returns
-------
The evaluation function
"""
def feval(preds, labels):
"""Evaluate average-peak-score@N in the pack-sum format
Parameters
----------
raw_preds: np.ndarray
The raw prediction
labels: xgb.DMatrix
The groud-truth label matrix
Returns
-------
name: str
score: float
The name and score of this metric
"""
group_sizes = dmatrix_context.get("group_sizes", labels, [len(preds)])
pack_ids = dmatrix_context.get("pack_ids", labels)
preds = predict_throughput_pack_sum(preds, pack_ids)
labels = (
np.bincount(pack_ids, weights=labels.get_label())
/ np.unique(pack_ids, return_counts=True)[1]
)
scores = []
offset = 0
for size in group_sizes:
preds_group = preds[offset : offset + size]
labels_group = labels[offset : offset + size]
offset += size
trials = np.argsort(preds_group)[::-1][:N]
trial_scores = labels_group[trials]
curve = max_curve(trial_scores) / np.max(labels_group)
scores.append(np.mean(curve))
return f"a-peak@{N}", np.mean(scores)
return feval
class XGBoostCallback(TrainingCallback):
"""Base class for XGBoost callbacks."""
def __call__(self, env: "xgb.core.CallbackEnv"):
# Compatibility with xgboost < 1.3
return self.after_iteration(env.model, env.iteration, env.evaluation_result_list)
def after_iteration(self, model: "xgb.Booster", epoch: int, evals_log: Dict):
raise NotImplementedError
class CustomCallback(XGBoostCallback):
"""
Callback function for xgboost.
Support custom evaluation function and early-stopping.
"""
def __init__(
self,
stopping_rounds,
metric,
fevals,
evals=(),
log_file=None,
maximize=False,
verbose_eval=True,
skip_every=2,
):
"""Init function"""
self.stopping_rounds = stopping_rounds
self.metric = metric
self.metric_shortname = metric.split("-")[1]
self.fevals = fevals
self.evals = evals
self.log_file = log_file
self.maximize = maximize
self.verbose_eval = verbose_eval
self.skip_every = skip_every
self.state = {}
def after_iteration(self, model: "xgb.Booster", epoch: int, evals_log: Dict):
"""Run after each iteration. Return True when training should stop."""
# pylint:disable = import-outside-toplevel
try:
from xgboost.callback import _fmt_metric # type: ignore
except ImportError:
# Compatibility with xgboost >= 1.6
def _fmt_metric(value, show_stdv=True):
"""format metric string"""
if len(value) == 2:
return f"{value[0]}:{value[1]:.5f}"
if len(value) == 3:
if show_stdv:
return f"{value[0]}:{value[1]:.5f}+{value[2]:.5f}"
return f"{value[0]}:{value[1]:.5f}"
raise ValueError("wrong metric value", value)
##### init state #####
if not self.state:
self.state["maximize_score"] = self.maximize
self.state["best_iteration"] = 0
if self.maximize:
self.state["best_score"] = float("-inf")
else:
self.state["best_score"] = float("inf")
assert model is not None
if model.attr("best_score") is not None:
self.state["best_score"] = float(model.attr("best_score"))
self.state["best_iteration"] = int(model.attr("best_iteration"))
self.state["best_msg"] = model.attr("best_msg")
else:
model.set_attr(best_iteration=str(self.state["best_iteration"]))
model.set_attr(best_score=str(self.state["best_score"]))
res_dict = {}
if epoch % self.skip_every == 1:
return False
##### evaluation #####
for feval in self.fevals:
bst_eval = model.eval_set(self.evals, epoch, feval)
res = [x.split(":") for x in bst_eval.split()]
for kv in res[1:]:
res_dict[kv[0]] = [float(kv[1])]
eval_res = []
keys = list(res_dict.keys())
keys.sort(key=lambda x: x if self.metric_shortname not in x else "a" + x)
for key in keys:
v = res_dict[key]
eval_res.append([key] + v)
##### print eval result #####
if (
not isinstance(self.verbose_eval, bool)
and self.verbose_eval
and epoch % self.verbose_eval == 0
):
infos = [f"XGB iter: {epoch:3d}"]
for item in eval_res:
if "null" in item[0]:
continue
infos.append(f"{item[0]}: {item[1]:.6f}")
logger.debug("\t".join(infos))
if self.log_file:
with open(self.log_file, "a") as fout:
fout.write("\t".join(infos) + "\n")
##### choose score and do early stopping #####
score = None
for item in eval_res:
if item[0] == self.metric:
score = item[1]
break
assert score is not None
best_score = self.state["best_score"]
best_iteration = self.state["best_iteration"]
maximize_score = self.state["maximize_score"]
if (maximize_score and score > best_score) or (not maximize_score and score < best_score):
msg = f"[{epoch}] " + "\t".join([_fmt_metric(x) for x in eval_res])
self.state["best_msg"] = msg
self.state["best_score"] = score
self.state["best_iteration"] = epoch
# save the property to attributes, so they will occur in checkpoint.
if model is not None:
model.set_attr(
best_score=str(self.state["best_score"]),
best_iteration=str(self.state["best_iteration"]),
best_msg=self.state["best_msg"],
)
elif epoch - best_iteration >= self.stopping_rounds:
best_msg = self.state["best_msg"]
if self.verbose_eval:
logger.debug("XGB stopped. Best iteration: %s ", best_msg)
return True
return False
| 23,300 | 33.065789 | 99 | py |
tvm | tvm-main/python/tvm/contrib/mxnet.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""MXNet bridge wrap Function MXNet's async function."""
from __future__ import absolute_import as _abs
import tvm._ffi.registry
import tvm.runtime._ffi_api
from tvm.runtime import Module
# pylint: disable=invalid-name
_wrap_async = None
def to_mxnet_func(func, const_loc=None):
"""Wrap a TVM function as MXNet function
MXNet function runs asynchrously via its engine.
Parameters
----------
func : Function
A TVM function that can take positional arguments
const_loc : list of int
List of integers indicating the argument position
of read only NDArray argument.
The NDArray argument location that are not annotated
will be viewed as mutable arrays in MXNet's engine.
Returns
-------
async_func : Function
A function that can take MXNet NDArray as argument
in places that used to expect TVM NDArray.
Run asynchrously in MXNet's async engine.
"""
# only import mxnet when wrap get called.
# pylint: disable=import-self, import-outside-toplevel
import mxnet
if isinstance(func, Module):
func = func.entry_func
def _get_bridge_func():
"""Get MXNet bridge function"""
if not mxnet.base._LIB.MXTVMBridge:
raise RuntimeError(
"MXTVMBridge not exist in mxnet package," " please update to latest version"
)
fdict = tvm._ffi.registry.extract_ext_funcs(mxnet.base._LIB.MXTVMBridge)
ret = fdict["WrapAsyncCall"]
ret.is_global = True
return ret
global _wrap_async
if _wrap_async is None:
# Register extension type in first time
_wrap_async = _get_bridge_func()
tvm._ffi.registry.register_extension(mxnet.nd.NDArray)
const_loc = const_loc if const_loc else []
return _wrap_async(func, tvm.runtime._ffi_api.TVMSetStream, len(const_loc), *const_loc)
| 2,693 | 33.101266 | 92 | py |
tvm | tvm-main/python/tvm/contrib/dlpack.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Wrapping functions to bridge frameworks with DLPack support to TVM"""
from tvm.runtime import ndarray
def convert_func(tvm_func, tensor_type, to_dlpack_func):
"""Convert a tvm function into one that accepts a tensor from another
framework, provided the other framework supports DLPACK
Parameters
----------
tvm_func: Function
Built tvm function operating on arrays
tensor_type: Type
Type of the tensors of the target framework
to_dlpack_func: Function
Function to convert the source tensors to DLPACK
"""
assert callable(tvm_func)
def _wrapper(*args):
args = tuple(
ndarray.from_dlpack(to_dlpack_func(arg)) if isinstance(arg, tensor_type) else arg
for arg in args
)
return tvm_func(*args)
return _wrapper
def to_pytorch_func(tvm_func):
"""Convert a tvm function into one that accepts PyTorch tensors
Parameters
----------
tvm_func: Function
Built tvm function operating on arrays
Returns
-------
wrapped_func: Function
Wrapped tvm function that operates on PyTorch tensors
"""
# pylint: disable=import-outside-toplevel
import torch
import torch.utils.dlpack
return convert_func(tvm_func, torch.Tensor, torch.utils.dlpack.to_dlpack)
| 2,115 | 31.060606 | 93 | py |
tvm | tvm-main/python/tvm/contrib/torch/optimize_torch.py | # pylint: disable=inconsistent-return-statements
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-module-docstring
# pylint: disable=missing-class-docstring
# pylint: disable=missing-function-docstring
"""
optimize_torch: a function similar to `torch.jit.trace`,
which is used to optimize the `torch.nn.module` by TVM metaSchedule,
and returns a custom TorchScript operator
"""
import base64
import contextlib
import tempfile
from typing import Optional, Tuple, Union
import torch
import torch.utils.dlpack
import tvm
from tvm import meta_schedule as ms
from tvm import relay
from tvm._ffi import get_global_func, register_func
from tvm.target import Target
class GraphExecutorFactoryWrapper(torch.nn.Module):
def __init__(self, module: tvm.runtime.Module):
super().__init__()
self.inner_module = module
def forward(self, *torch_inputs: Tuple[torch.Tensor]):
ret = self.inner_module.forward(torch_inputs)
if len(ret) == 1:
return ret[0]
return ret
@register_func("script_torch.save_to_base64")
def save_to_base64(obj) -> bytes:
with tempfile.NamedTemporaryFile(suffix=".so") as tmpfile:
obj.export_library(tmpfile.name)
with open(tmpfile.name, "rb") as temp_file:
return base64.b64encode(temp_file.read())
def optimize_torch(
func,
example_inputs,
*,
max_trials_global: int,
work_dir=None,
target: Union[str, Target] = "cpu",
max_trials_per_task: Optional[int] = None,
num_trials_per_iter: int = 64,
builder: ms.Builder.BuilderType = "local",
runner: ms.Runner.RunnerType = "local",
database: ms.Database.DatabaseType = "json",
cost_model: ms.CostModel.CostModelType = "xgb",
measure_callbacks: ms.MeasureCallback.CallbackListType = "default",
task_scheduler: ms.TaskScheduler.TaskSchedulerType = "gradient",
space: ms.SpaceGenerator.SpaceGeneratorType = "post-order-apply",
strategy: ms.SearchStrategy.SearchStrategyType = "evolutionary",
seed: Optional[int] = None,
):
"""Load PyTorch model that could be traced by TorchScript, then optimize it via MetaSchedule.
Parameters
----------
func : callable or torch.nn.Module
A Python function or nn.Module that could run by TorchScript's trace.
(ie: torch.jit.trace(model, input))
example_inputs : tuple or torch.Tensor
Inputs to `torch.jit.trace`.
max_trials_global : int
The maximum number of trials to run globally.
work_dir : Optional[str]
The working directory to save intermediate results.
target : Optional[Union[str, Target]]
The target of the compilation.
If user doesn't set the target, the module will be built for the CPU target.
max_trials_per_task : Optional[int]
The maximum number of trials to run per task.
num_trials_per_iter : int
The number of trials to run per iteration
builder : Builder.BuilderType
The builder.
runner : Runner.RunnerType
The runner.
database : Database.DatabaseType
The database.
cost_model : CostModel.CostModelType
The cost model.
measure_callbacks : MeasureCallback.CallbackListType
The measure callbacks.
task_scheduler : TaskScheduler.TaskSchedulerType
The task scheduler.
space : SpaceGenerator.SpaceGeneratorType
The space generator to use.
strategy : SearchStrategy.SearchStrategyType
The search strategy to use.
seed : Optional[int]
The random seed to use.
Returns
-------
mod : GraphExecutorFactoryWrapper
It will return an object of GraphExecutorFactoryWrapper,
which is the subclass of the original nn.Module.
"""
if target == "cpu":
target = f"llvm --num-cores {ms.utils.cpu_count(logical=False)}"
if not isinstance(target, Target):
target = Target(target)
# If `func` is already a traced module this statement makes no effect
jit_mod = torch.jit.trace(func, example_inputs)
if isinstance(example_inputs, torch.Tensor):
example_inputs = [example_inputs]
shape_list = [(f"inp_{idx}", i.shape) for idx, i in enumerate(example_inputs)]
mod, params = relay.frontend.from_pytorch(jit_mod, shape_list) # IRmodule
if work_dir:
context_manager = contextlib.nullcontext(work_dir)
else:
context_manager = tempfile.TemporaryDirectory()
with context_manager as work_dir: # pylint: disable=redefined-argument-from-local
database = ms.relay_integration.tune_relay(
mod=mod,
params=params,
target=target,
work_dir=work_dir,
max_trials_global=max_trials_global,
max_trials_per_task=max_trials_per_task,
num_trials_per_iter=num_trials_per_iter,
builder=builder,
runner=runner,
database=database,
cost_model=cost_model,
measure_callbacks=measure_callbacks,
task_scheduler=task_scheduler,
space=space,
strategy=strategy,
seed=seed,
)
executor_factory = ms.relay_integration.compile_relay(
database=database,
mod=mod,
target=target,
params=params,
backend="graph",
)
save_runtime_mod = get_global_func("tvmtorch.save_runtime_mod", allow_missing=True)
if save_runtime_mod is None:
raise ValueError('optimize_torch requires the flag /"USE_PT_TVMDSOOP/" set in config.cmake')
save_runtime_mod(executor_factory.module)
return GraphExecutorFactoryWrapper(torch.classes.tvm_torch.GraphExecutorFactoryWrapper())
| 6,488 | 35.869318 | 100 | py |
tvm | tvm-main/python/tvm/contrib/torch/module.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Module container of PyTorch custom class"""
import warnings
from typing import List
import torch
class GraphModule(torch.nn.Module):
r"""Module container of Pytorch class which wraps exported
TVM op implementation library to be called on Pytorch side"""
@classmethod
def shape_repr(cls, input_shapes):
return torch.ops.tvm_dsoop.tvm_shape_repr(input_shapes)
def __init__(self, num_inputs, num_outputs, device=None):
warnings.warn(
"This module will be removed at TVM version 0.11",
DeprecationWarning,
stacklevel=2,
)
super().__init__()
self.dummy_param = torch.nn.Parameter(torch.empty(0))
self.engine = None
if device is not None:
self.to(device)
self.engine = torch.classes.tvm_dsoop.TvmGraphModule(num_inputs, num_outputs, self.device)
def init(self, input_shapes, lib_path, graph_path, params_path):
r"""Load tvm module"""
self.engine.load_tvm_module(input_shapes, lib_path, graph_path, params_path)
def forward(self, inputs: List[torch.Tensor]):
r"""Call tvm module to forward"""
return self.engine.forward(inputs)
@property
def device(self):
r"""Get the device string"""
return str(self.dummy_param.device)
def _apply(self, fn):
r"""Override to device function, manually move tvm module to desired device"""
super()._apply(fn)
if self.engine is not None:
self.engine.to(self.device)
return self
class VMModule(torch.nn.Module):
r"""Module container of Pytorch class which wraps exported
TVM op implementation library to be called on Pytorch side"""
@classmethod
def shape_repr(cls, input_shapes):
return torch.ops.tvm_dsoop.tvm_shape_repr(input_shapes)
def __init__(self, num_inputs, num_outputs, device=None):
warnings.warn(
"This module will be removed at TVM version 0.11",
DeprecationWarning,
stacklevel=2,
)
super().__init__()
self.dummy_param = torch.nn.Parameter(torch.empty(0))
self.engine = None
if device is not None:
self.to(device)
self.engine = torch.classes.tvm_dsoop.TvmVMModule(num_inputs, num_outputs, self.device)
def init(self, input_shapes, lib_path, code_path):
r"""Load tvm module"""
self.engine.load_tvm_module(input_shapes, lib_path, code_path)
def forward(self, inputs: List[torch.Tensor]):
r"""Call tvm module to forward"""
return self.engine.forward(inputs)
@property
def device(self):
r"""Get the device string"""
return str(self.dummy_param.device)
def _apply(self, fn):
r"""Override to device function, manually move tvm module to desired device"""
super()._apply(fn)
if self.engine is not None:
self.engine.to(self.device)
return self
class TraceTvmModule(torch.nn.Module):
r"""Wrapper for trace GraphModule
GraphModule and VMModule only supports List[Tensor] inputs and cannot be traced.
This is a wrapper class for trace GraphModule or VMModule in order to support
arbitrary number of inputs
Example:
import tvm.contrib.torch
tvm_module = tvm.contrib.torch.GraphModule(1, 1, 'cuda:0')
tvm_module.init(input_shapes, lib_path, graph_path, params_path)
trace_wrapper = tvm.contrib.torch.TraceGraphModule(torch.jit.script(tvm_module))
traced = torch.jit.trace(trace_wrapper, example_inputs)
"""
def __init__(self, tvm_module):
warnings.warn(
"This module will be removed at TVM version 0.11",
DeprecationWarning,
stacklevel=2,
)
super().__init__()
self.tvm_module = tvm_module
def forward(self, *inputs):
outputs = self.tvm_module(inputs)
return outputs[0] if len(outputs) == 1 else tuple(outputs)
| 4,823 | 33.705036 | 98 | py |
tvm | tvm-main/python/tvm/contrib/torch/as_torch.py | # pylint: disable=inconsistent-return-statements
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-module-docstring
# pylint: disable=missing-class-docstring
# pylint: disable=missing-function-docstring
"""
as_torch: a decorator, which is used to wrap the TVMScript code to `torch.nn.module`.
"""
import tempfile
from typing import Callable, List, Optional, Union
# isort: off
from typing_extensions import Literal
# isort: on
import torch
import torch.utils.dlpack
import tvm
from tvm import meta_schedule as ms
from tvm.target.target import Target
from tvm.tir import PrimFunc
# python wrapper for OperatorModule
class OperatorModuleWrapper(torch.nn.Module):
def __init__(
self,
module: Union[
tvm.ir.module.IRModule,
tvm.tir.function.PrimFunc,
],
):
super().__init__()
self.rt_module = None # runtime module
self.ir_module = module # IR modules
def tune(
self,
target: Union[str, Target] = "cpu",
max_trials_global: int = 32,
*,
num_trials_per_iter: int = 32,
builder: ms.Builder.BuilderType = "local",
runner: ms.Runner.RunnerType = "local",
database: ms.Database.DatabaseType = "json",
cost_model: ms.CostModel.CostModelType = "xgb",
measure_callbacks: ms.MeasureCallback.CallbackListType = "default",
task_scheduler: ms.TaskScheduler.TaskSchedulerType = "round-robin",
space: ms.SpaceGenerator.SpaceGeneratorType = "post-order-apply",
strategy: ms.SearchStrategy.SearchStrategyType = "replay-trace",
num_tuning_cores: Union[Literal["physical", "logical"], int] = "physical",
seed: Optional[int] = None,
) -> None:
"""
Tune the TVMScript code.
Parameters
----------
config: Optional[TuneConfig]
The tuning configuration.
target : Optional[str, Target]
The target to tune for.
"""
if target == "cpu":
target = f"llvm --num-cores {ms.utils.cpu_count(logical=False)}"
with tempfile.TemporaryDirectory() as work_dir:
database = ms.tir_integration.tune_tir(
mod=self.ir_module,
target=target,
work_dir=work_dir,
max_trials_global=max_trials_global,
num_trials_per_iter=num_trials_per_iter,
builder=builder,
runner=runner,
database=database,
cost_model=cost_model,
measure_callbacks=measure_callbacks,
task_scheduler=task_scheduler,
space=space,
strategy=strategy,
num_tuning_cores=num_tuning_cores,
seed=seed,
)
sch = ms.tir_integration.compile_tir(database, self.ir_module, target)
self.ir_module = sch.mod
self.build(target)
def script(self):
return self.ir_module.script()
def build(self, target=None):
runtime_module = tvm.build(self.ir_module, target=target)
func = tvm.get_global_func("tvmtorch.save_runtime_mod", allow_missing=True)
if func is None:
raise ValueError('as_torch requires the flag /"USE_PT_TVMDSOOP/" set in config.cmake')
func(runtime_module)
self.rt_module = torch.classes.tvm_torch.OperatorModuleWrapper()
def forward(self, *torch_inputs: List[torch.Tensor]) -> List[torch.Tensor]:
if self.rt_module is None:
if torch_inputs[0].is_cuda:
self.build(target="cuda")
elif torch_inputs[0].device.type == "cpu":
self.build()
else:
raise Exception(f"the target {torch_inputs[0].device.type} is not supported yet")
return self.rt_module.forward(torch_inputs)
def as_torch(func: Union[tvm.ir.module.IRModule, tvm.tir.function.PrimFunc, Callable]):
"""A decorator of converting TensorIR to PyTorch nn.Module.
Parameters
----------
func: Optional[tvm.ir.module.IRModule, tvm.tir.function.PrimFunc, Callable]
The function written by TVMScript.
Returns
-------
mod : Union[OperatorModuleWrapper, Callable]
It will return an object, or a templated function of OperatorModuleWrapper,
which is the subclass of the original nn.Module.
"""
if isinstance(func, (tvm.ir.module.IRModule, PrimFunc)):
return OperatorModuleWrapper(func)
if callable(func):
def func_get_param(*args, **kwargs):
return OperatorModuleWrapper(func(*args, **kwargs))
return func_get_param
| 5,465 | 33.815287 | 98 | py |
tvm | tvm-main/python/tvm/contrib/torch/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=wrong-import-position,redefined-builtin,invalid-name
"""Module container of Pytorch custom class"""
import os
import platform
import warnings
import torch
from tvm._ffi import libinfo
def _load_platform_specific_library(lib_name):
system = platform.system()
if system == "Darwin":
lib_file_name = lib_name + ".dylib"
elif system == "Windows":
lib_file_name = lib_name + ".dll"
else:
lib_file_name = lib_name + ".so"
lib_path = libinfo.find_lib_path()[0]
lib_dir = os.path.dirname(lib_path)
lib_file_path = os.path.join(lib_dir, lib_file_name)
try:
torch.classes.load_library(lib_file_path)
except OSError as err:
errmsg = str(err)
if errmsg.find("undefined symbol") != -1:
reason = " ".join(
(
"Got undefined symbol error,",
"which might be due to the CXXABI incompatibility.",
)
)
else:
reason = errmsg
warnings.warn(
f"The library {lib_name} is not built successfully. {reason}",
RuntimeWarning,
)
_load_platform_specific_library("libpt_tvmdsoop")
_load_platform_specific_library("libpt_tvmdsoop_new")
from . import module
GraphModule = module.GraphModule
VMModule = module.VMModule
TraceTvmModule = module.TraceTvmModule
from . import pytorch_tvm
PyTorchTVMModule = pytorch_tvm.PyTorchTVMModule
compile = pytorch_tvm.compile
from . import as_torch
TVMScriptIRModule = as_torch.OperatorModuleWrapper
as_torch = as_torch.as_torch
from . import optimize_torch
GraphExecutorFactoryWrapper = optimize_torch.GraphExecutorFactoryWrapper
optimize_torch = optimize_torch.optimize_torch
| 2,535 | 31.101266 | 74 | py |
tvm | tvm-main/python/tvm/contrib/torch/pytorch_tvm.py | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=redefined-builtin
"""`compile` api that convert torch module to torch tvm module"""
import os
import warnings
import tvm
import tvm.testing
from tvm import relay, autotvm
from tvm.runtime import load_module
from tvm.autotvm.tuner import XGBTuner, GATuner, RandomTuner, GridSearchTuner
from tvm.contrib import graph_executor
from tvm.contrib.debugger import debug_executor
from . import GraphModule
def tune_tasks(
tasks,
measure_option,
tuner="xgb",
n_trial=1000,
early_stopping=None,
log_filename="tuning.log",
use_transfer_learning=True,
):
"""Tune tasks and generate tuning log to file"""
# create tmp log file
tmp_log_file = log_filename + ".tmp"
if os.path.exists(tmp_log_file):
os.remove(tmp_log_file)
for i, tsk in enumerate(reversed(tasks)):
prefix = f"[Task {i + 1:2d}/{len(tasks):2d}] "
# create tuner
if tuner == "xgb":
tuner_obj = XGBTuner(tsk, loss_type="reg")
elif tuner == "xgb_knob":
tuner_obj = XGBTuner(tsk, loss_type="reg", feature_type="knob")
elif tuner == "xgb_itervar":
tuner_obj = XGBTuner(tsk, loss_type="reg", feature_type="itervar")
elif tuner == "xgb_curve":
tuner_obj = XGBTuner(tsk, loss_type="reg", feature_type="curve")
elif tuner == "xgb_rank":
tuner_obj = XGBTuner(tsk, loss_type="rank")
elif tuner == "xgb_rank_knob":
tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="knob")
elif tuner == "xgb_rank_itervar":
tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="itervar")
elif tuner == "xgb_rank_curve":
tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="curve")
elif tuner == "xgb_rank_binary":
tuner_obj = XGBTuner(tsk, loss_type="rank-binary")
elif tuner == "xgb_rank_binary_knob":
tuner_obj = XGBTuner(tsk, loss_type="rank-binary", feature_type="knob")
elif tuner == "xgb_rank_binary_itervar":
tuner_obj = XGBTuner(tsk, loss_type="rank-binary", feature_type="itervar")
elif tuner == "xgb_rank_binary_curve":
tuner_obj = XGBTuner(tsk, loss_type="rank-binary", feature_type="curve")
elif tuner == "ga":
tuner_obj = GATuner(tsk, pop_size=100)
elif tuner == "random":
tuner_obj = RandomTuner(tsk)
elif tuner == "gridsearch":
tuner_obj = GridSearchTuner(tsk)
else:
raise ValueError("Invalid tuner: " + tuner)
if use_transfer_learning:
if os.path.isfile(tmp_log_file):
tuner_obj.load_history(autotvm.record.load_from_file(tmp_log_file))
# do tuning
tsk_trial = min(n_trial, len(tsk.config_space))
tuner_obj.tune(
n_trial=tsk_trial,
early_stopping=early_stopping,
measure_option=measure_option,
callbacks=[
autotvm.callback.progress_bar(tsk_trial, prefix=prefix),
autotvm.callback.log_to_file(tmp_log_file),
],
)
# pick best records to a cache file
if not os.path.exists(log_filename):
with open(log_filename, "w", encoding="utf-8"):
pass
if os.path.exists(tmp_log_file):
autotvm.record.pick_best(tmp_log_file, log_filename)
os.remove(tmp_log_file)
def get_tuning_opt(log_file="tuning.log", n_trial=200):
"""Returns tuning options"""
tuning_opt = {
"log_filename": log_file,
"tuner": "random",
"n_trial": n_trial,
"early_stopping": 60,
"measure_option": autotvm.measure_option(
builder=autotvm.LocalBuilder(timeout=10),
runner=autotvm.LocalRunner(number=20, repeat=3, timeout=4, min_repeat_ms=150),
),
}
return tuning_opt
TVM_ASSETS = ["mod.so", "graph.json", "params"]
class PyTorchTVMModule:
"""Helper class for compiling pytorch module to tvm module"""
def __init__(self, target="cuda", device=tvm.cuda(0)) -> None:
self.script_module = None
self.input_infos = None
self.default_dtype = "float32"
self.mod = None
self.params = None
self.tasks = None
self.target = target
self.dev = device
self.log_file = None
self.tvm_module = None
self.tvm_graph = None
self.tvm_lib = None
self.tvm_params = None
def from_pytorch(self, script_module, input_infos, default_dtype="float32"):
self.script_module = script_module
self.input_infos = input_infos
self.default_dtype = default_dtype
self.mod, self.params = relay.frontend.from_pytorch(
script_module, input_infos, default_dtype=default_dtype
)
def tune_tvm(self, log_file="tuning.log", n_trial=200):
self.tasks = autotvm.task.extract_from_program(
self.mod["main"],
target=self.target,
params=self.params,
)
self.log_file = log_file
tuning_opt = get_tuning_opt(log_file, n_trial)
tune_tasks(self.tasks, **tuning_opt)
def build_tvm(self, export_dir, debug_runtime=False):
tvm_mod = self._build_tvm(debug_runtime)
self._export_tvm(export_dir)
return tvm_mod
def _build_tvm(self, debug_runtime=False):
# compile kernels with history best records
with autotvm.apply_history_best(self.log_file):
with tvm.transform.PassContext(opt_level=3):
self.tvm_graph, self.tvm_lib, self.tvm_params = relay.build(
self.mod, target=self.target, params=self.params
)
if not debug_runtime:
self.tvm_module = graph_executor.create(self.tvm_graph, self.tvm_lib, device=self.dev)
else:
self.tvm_module = debug_executor.create(self.tvm_graph, self.tvm_lib, device=self.dev)
self.tvm_module.set_input(**self.tvm_params)
return self.tvm_module
def _export_tvm(self, export_dir):
if not os.path.isdir(export_dir):
os.makedirs(export_dir)
self.export_dir = export_dir
self.tvm_lib.export_library(os.path.join(export_dir, TVM_ASSETS[0]))
with open(os.path.join(export_dir, TVM_ASSETS[1]), "w", encoding="utf8") as fout:
fout.write(self.tvm_graph)
with open(os.path.join(export_dir, TVM_ASSETS[2]), "wb") as fout:
fout.write(relay.save_param_dict(self.tvm_params))
def load_tvm(self, export_dir):
"""Load tvm module from export directory"""
self.export_dir = export_dir
self.tvm_lib = load_module(os.path.join(export_dir, TVM_ASSETS[0]))
with open(os.path.join(export_dir, TVM_ASSETS[1]), "r", encoding="utf8") as f:
self.tvm_graph = f.read()
with open(os.path.join(export_dir, TVM_ASSETS[2]), "rb") as f:
self.tvm_params = relay.load_param_dict(f.read())
self.tvm_module = graph_executor.create(self.tvm_graph, self.tvm_lib, device=self.dev)
self.tvm_module.set_input(**self.tvm_params)
return self.tvm_module
def build_pytorch_module(self, num_inputs, num_outputs, input_infos=None):
"""Build pytorch module containing TVM Graph Module"""
warnings.warn(
" ".join(
(
"This function will be removed at TVM version 0.11,",
"we suggest users to use `optimized_torch` for tuning Torch modules instead.",
)
),
DeprecationWarning,
stacklevel=2,
)
assert self.export_dir, "you must build_tvm or load_tvm before"
input_infos = input_infos or self.input_infos
assert input_infos
assert len(input_infos) == num_inputs
assets = [os.path.join(self.export_dir, i) for i in TVM_ASSETS]
input_shapes = [i[1] for i in input_infos]
def _tvm_dev_to_pt_dev(device):
"""convert tvm device to pytorch device string"""
if tvm.runtime.Device.MASK2STR[device.device_type] == "cpu":
return "cpu"
if tvm.runtime.Device.MASK2STR[device.device_type] == "cuda":
return f"cuda:{device.device_id}"
raise ValueError(f"unsupported device for pt graph module: {device}")
mod = GraphModule(num_inputs=num_inputs, num_outputs=num_outputs).to(
_tvm_dev_to_pt_dev(self.dev)
)
mod.init(input_shapes, *assets)
return mod
def compile(script_module, option):
"""
example:
option = {
"input_infos": [
("x", (1, 3, 244, 244)),
],
"default_dtype": "float16",
"export_dir": "pytorch_compiled",
"num_outputs": 1,
"tuning_n_trials": 20, # set zero to skip tuning
"tuning_log_file": "tuning.log",
"target": "llvm",
"device": tvm.cpu(),
}
script_module = torch.jit.script(model)
pytorch_tvm_module = compile(script_module, option)
pytorch_tvm_module("model_tvm.pt")
"""
warnings.warn(
" ".join(
(
"This function will be removed at TVM version 0.11,",
"we suggest users to use `optimized_torch` for tuning Torch modules instead.",
)
),
DeprecationWarning,
stacklevel=2,
)
input_infos = option["input_infos"]
default_dtype = option.get("default_dtype", "float32")
export_dir = option.get("export_dir", "pytorch_compiled")
tuning_log_file = option.get("tuning_log_file", "tuning.log")
tuning_n_trials = option.get("tuning_n_trials", 20)
num_outputs = option.get("num_outputs", 1)
target = option.get("target", "cuda")
device = option.get("device", tvm.cuda(0))
mod = PyTorchTVMModule(target=target, device=device)
print("Converting...")
mod.log_file = tuning_log_file
mod.from_pytorch(script_module, input_infos, default_dtype)
if tuning_n_trials > 0:
print("Tuning...")
mod.tune_tvm(log_file=tuning_log_file, n_trial=tuning_n_trials)
print("Building...")
mod.build_tvm(export_dir)
pytorch_mod = mod.build_pytorch_module(num_inputs=len(input_infos), num_outputs=num_outputs)
return pytorch_mod
| 11,168 | 37.119454 | 98 | py |
tvm | tvm-main/python/tvm/contrib/relay_viz/interface.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Abstract class used by :py:class:`tvm.contrib.relay_viz.RelayVisualizer`."""
import abc
from typing import (
Dict,
Union,
Tuple,
List,
)
import tvm
from tvm import relay
UNKNOWN_TYPE = "unknown"
class VizNode:
"""VizNode carry node information for `VizGraph` interface.
Parameters
----------
node_id: str
Unique identifier for this node.
node_type: str
Type of this node.
node_detail: str
Any supplement for this node such as attributes.
"""
def __init__(self, node_id: str, node_type: str, node_detail: str):
self._id = node_id
self._type = node_type
self._detail = node_detail
@property
def identity(self) -> str:
return self._id
@property
def type_name(self) -> str:
return self._type
@property
def detail(self) -> str:
return self._detail
def __repr__(self) -> str:
detail = self._detail.replace("\n", ", ")
return f"VizNode(identity: {self._id}, type_name: {self._type}, detail: {detail}"
class VizEdge:
"""VizEdge connect two `VizNode`.
Parameters
----------
start_node: str
The identifier of the node starting the edge.
end_node: str
The identifier of the node ending the edge.
"""
def __init__(self, start_node: str, end_node: str):
self._start_node = start_node
self._end_node = end_node
@property
def start(self) -> str:
return self._start_node
@property
def end(self) -> str:
return self._end_node
class VizParser(abc.ABC):
"""VizParser parses out a VizNode and VizEdges from a `relay.Expr`."""
@abc.abstractmethod
def get_node_edges(
self,
node: relay.Expr,
relay_param: Dict[str, tvm.runtime.NDArray],
node_to_id: Dict[relay.Expr, str],
) -> Tuple[Union[VizNode, None], List[VizEdge]]:
"""Get VizNode and VizEdges for a `relay.Expr`.
Parameters
----------
node : relay.Expr
relay.Expr which will be parsed and generate a node and edges.
relay_param: Dict[str, tvm.runtime.NDArray]
relay parameters dictionary.
node_to_id : Dict[relay.Expr, str]
This is a mapping from relay.Expr to a unique id, generated by `RelayVisualizer`.
Returns
-------
rv1 : Union[VizNode, None]
VizNode represent the relay.Expr. If the relay.Expr is not intended to introduce a node
to the graph, return None.
rv2 : List[VizEdge]
a list of VizEdges to describe the connectivity of the relay.Expr.
Can be empty list to indicate no connectivity.
"""
class VizGraph(abc.ABC):
"""Abstract class for graph, which is composed of nodes and edges."""
@abc.abstractmethod
def node(self, viz_node: VizNode) -> None:
"""Add a node to the underlying graph.
Nodes in a Relay IR Module are expected to be added in the post-order.
Parameters
----------
viz_node : VizNode
A `VizNode` instance.
"""
@abc.abstractmethod
def edge(self, viz_edge: VizEdge) -> None:
"""Add an edge to the underlying graph.
Parameters
----------
viz_edge : VizEdge
A `VizEdge` instance.
"""
class DefaultVizParser(VizParser):
"""DefaultVizParser provde a set of logics to parse a various relay types.
These logics are inspired and heavily based on
`visualize` function in https://tvm.apache.org/2020/07/14/bert-pytorch-tvm
"""
def get_node_edges(
self,
node: relay.Expr,
relay_param: Dict[str, tvm.runtime.NDArray],
node_to_id: Dict[relay.Expr, str],
) -> Tuple[Union[VizNode, None], List[VizEdge]]:
if isinstance(node, relay.Function):
return self._function(node, node_to_id)
if isinstance(node, relay.expr.Call):
return self._call(node, node_to_id)
if isinstance(node, relay.expr.Var):
return self._var(node, relay_param, node_to_id)
if isinstance(node, relay.expr.Tuple):
return self._tuple(node, node_to_id)
if isinstance(node, relay.expr.TupleGetItem):
return self._tuple_get_item(node, node_to_id)
if isinstance(node, relay.expr.Constant):
return self._constant(node, node_to_id)
# GlobalVar possibly mean another global relay function,
# which is expected to in "Graph" level, not in "Node" level.
if isinstance(node, (relay.expr.GlobalVar, tvm.ir.Op)):
return None, []
viz_node = VizNode(node_to_id[node], UNKNOWN_TYPE, f"don't know how to parse {type(node)}")
viz_edges = []
return viz_node, viz_edges
def _var(
self,
node: relay.Expr,
relay_param: Dict[str, tvm.runtime.NDArray],
node_to_id: Dict[relay.Expr, str],
) -> Tuple[Union[VizNode, None], List[VizEdge]]:
"""Render rule for a relay var node"""
node_id = node_to_id[node]
name_hint = node.name_hint
node_detail = f"name_hint: {name_hint}"
node_type = "Var(Param)" if name_hint in relay_param else "Var(Input)"
if node.type_annotation is not None:
if hasattr(node.type_annotation, "shape"):
shape = tuple(map(int, node.type_annotation.shape))
dtype = node.type_annotation.dtype
node_detail = f"{node_detail}\nshape: {shape}\ndtype: {dtype}"
else:
node_detail = f"{node_detail}\ntype_annotation: {node.type_annotation}"
# only node
viz_node = VizNode(node_id, node_type, node_detail)
viz_edges = []
return viz_node, viz_edges
def _function(
self,
node: relay.Expr,
node_to_id: Dict[relay.Expr, str],
) -> Tuple[Union[VizNode, None], List[VizEdge]]:
"""Render rule for a relay function node"""
node_details = []
name = ""
func_attrs = node.attrs
if func_attrs:
node_details = [f"{k}: {func_attrs.get_str(k)}" for k in func_attrs.keys()]
# "Composite" might from relay.transform.MergeComposite
if "Composite" in func_attrs.keys():
name = func_attrs["Composite"]
node_id = node_to_id[node]
# Body -> FunctionNode
viz_node = VizNode(node_id, f"Func {name}", "\n".join(node_details))
viz_edges = [VizEdge(node_to_id[node.body], node_id)]
return viz_node, viz_edges
def _call(
self,
node: relay.Expr,
node_to_id: Dict[relay.Expr, str],
) -> Tuple[Union[VizNode, None], List[VizEdge]]:
"""Render rule for a relay call node"""
node_id = node_to_id[node]
op_name = UNKNOWN_TYPE
node_detail = []
if isinstance(node.op, tvm.ir.Op):
op_name = node.op.name
if node.attrs:
node_detail = [f"{k}: {node.attrs.get_str(k)}" for k in node.attrs.keys()]
elif isinstance(node.op, relay.Function):
func_attrs = node.op.attrs
op_name = "Anonymous Func"
if func_attrs:
node_detail = [f"{k}: {func_attrs.get_str(k)}" for k in func_attrs.keys()]
# "Composite" might from relay.transform.MergeComposite
if "Composite" in func_attrs.keys():
op_name = func_attrs["Composite"]
elif isinstance(node.op, relay.GlobalVar):
op_name = "GlobalVar"
node_detail = [f"GlobalVar.name_hint: {node.op.name_hint}"]
else:
op_name = str(type(node.op)).split(".")[-1].split("'")[0]
# Arguments -> CallNode
viz_node = VizNode(node_id, f"Call {op_name}", "\n".join(node_detail))
args = [node_to_id[arg] for arg in node.args]
viz_edges = [VizEdge(arg, node_id) for arg in args]
return viz_node, viz_edges
def _tuple(
self,
node: relay.Expr,
node_to_id: Dict[relay.Expr, str],
) -> Tuple[Union[VizNode, None], List[VizEdge]]:
node_id = node_to_id[node]
# Fields -> TupleNode
viz_node = VizNode(node_id, "Tuple", "")
viz_edges = [VizEdge(node_to_id[field], node_id) for field in node.fields]
return viz_node, viz_edges
def _tuple_get_item(
self,
node: relay.Expr,
node_to_id: Dict[relay.Expr, str],
) -> Tuple[Union[VizNode, None], List[VizEdge]]:
node_id = node_to_id[node]
# Tuple -> TupleGetItemNode
viz_node = VizNode(node_id, "TupleGetItem", f"idx: {node.index}")
viz_edges = [VizEdge(node_to_id[node.tuple_value], node_id)]
return viz_node, viz_edges
def _constant(
self,
node: relay.Expr,
node_to_id: Dict[relay.Expr, str],
) -> Tuple[Union[VizNode, None], List[VizEdge]]:
node_id = node_to_id[node]
node_detail = f"shape: {node.data.shape}, dtype: {node.data.dtype}"
# only node
viz_node = VizNode(node_id, "Const", node_detail)
viz_edges = []
return viz_node, viz_edges
class Plotter(abc.ABC):
"""Plotter can render a collection of Graph interfaces to a file."""
@abc.abstractmethod
def create_graph(self, name: str) -> VizGraph:
"""Create a VizGraph
Parameters
----------
name : str
the name of the graph
Return
------
rv1: an instance of class inheriting from VizGraph interface.
"""
@abc.abstractmethod
def render(self, filename: str) -> None:
"""Render the graph as a file.
Parameters
----------
filename : str
see the definition of implemented class.
"""
| 10,681 | 31.567073 | 99 | py |
tvm | tvm-main/python/tvm/topi/searchsorted.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""searchsorted operator"""
from . import utils
from . import te
from ..tir import ir_builder
from .math import cast
def binary_search(ib, sequence_offset, search_range, sorted_sequence, value, right, out_dtype):
"""Common IR generator for binary search used by CPU and GPU backends.
`sorted_sequence` is a N-D Buffer whose innermost dimension we want to search for `value`,
and `search_range` is the size of the innermost dimension. `sequence_offset` is
a 1-D linearlized offset specifying which of innermost sequences to search.
So the search for `value` is performed over
`sorted_sequence[sequence_offset:(sequence_offset + search_range)]`.
Note that we index N-D Buffer by 1-D linearlized indices.
"""
lo = ib.allocate(out_dtype, (1,), name="lo", scope="local")
hi = ib.allocate(out_dtype, (1,), name="hi", scope="local")
lo[0] = cast(0, out_dtype)
hi[0] = cast(search_range, out_dtype)
# Reference: pytorch/aten/src/ATen/native/cuda/Bucketization.cu
def condition(current_val, target_val):
if right:
return current_val <= target_val
return current_val < target_val
with ib.while_loop(lo[0] < hi[0]):
mid = lo[0] + (hi[0] - lo[0] >> 1)
with ib.if_scope(condition(sorted_sequence[sequence_offset + mid], value)):
lo[0] = mid + 1
with ib.else_scope():
hi[0] = mid
return lo[0]
def searchsorted(sorted_sequence, values, right=False, out_dtype="int64"):
"""Find indices where elements should be inserted to maintain order.
If `sorted_sequence` is N-dimensional, the innermost dimension of
`values` are searched in the corresponding dimension of `sorted_sequence`.
Parameters
----------
sorted_sequence : te.Tensor
N-D or 1-D Tensor, containing monotonically increasing sequence
on the innermost dimension.
values : te.Tensor
N-D Tensor containing the search values. When `sorted_sequence` is 1-D,
the shape of `values` can be arbitrary. Otherwise, ranks of `sorted_sequence`
and `values` must be the same, and outer N-1 axes must have the same size.
right : bool, optional
Controls which index is returned if a value lands exactly on one of sorted values. If
False, the index of the first suitable location found is given. If true, return the
last such index. If there is no suitable index, return either 0 or N (where N is the
size of the innermost dimension).
dtype : string, optional
The data type of the output indices.
Returns
-------
indices : te.Tensor
Tensor with same shape as values, representing the indices of
elements of `values` if they are inserted in `sorted_sequence`.
"""
def ir(sorted_sequence, values, indices):
ib = ir_builder.create()
sorted_sequence_shape = sorted_sequence.shape
values_shape = values.shape
num_search = utils.prod(values_shape)
search_range = sorted_sequence_shape[-1]
sorted_sequence = ib.buffer_ptr(sorted_sequence)
values = ib.buffer_ptr(values)
indices = ib.buffer_ptr(indices)
with ib.for_range(0, num_search, name="i", kind="parallel") as i:
if len(sorted_sequence_shape) == 1:
sequence_offset = 0
else:
sequence_id = i // values_shape[-1]
sequence_offset = sequence_id * search_range
indices[i] = binary_search(
ib,
sequence_offset,
search_range,
sorted_sequence,
values[i],
right,
out_dtype,
)
return ib.get()
return te.extern(
values.shape,
[sorted_sequence, values],
lambda ins, outs: ir(ins[0], ins[1], outs[0]),
name="searchsorted",
dtype=out_dtype,
)
| 4,777 | 36.328125 | 95 | py |
tvm | tvm-main/python/tvm/topi/nn/depth_to_space.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""TVM operator depth_to_space compute."""
from __future__ import absolute_import
import tvm
from tvm import te
from .. import tag
def depth_to_space(data, block_size, layout="NCHW", mode="DCR"):
"""Perform depth to space transformation on the data
Parameters
----------
data : tvm.te.Tensor
4-D tensor in either NCHW or NHWC layout.
block_size : int
Size of blocks to compose from channel dimension.
layout : string
Either NCHW or NHWC, indicating data layout.
mode : string
Either DCR or CDR, indicates how channels should be accessed.
In DCR, channels are interwoven in the Tensorflow style while
in CDR channels are accessed sequentially as in Pytorch.
Returns
-------
output : tvm.te.Tensor
Output of shape [N, C / block_size**2, H * block_size, W * block_size]
"""
if layout == "NCHW":
in_n, in_c, in_h, in_w = data.shape
channel_factor = tvm.tir.truncdiv(in_c, (block_size * block_size))
output_shape = [in_n, channel_factor, in_h * block_size, in_w * block_size]
elif layout == "NHWC":
in_n, in_h, in_w, in_c = data.shape
channel_factor = tvm.tir.truncdiv(in_c, (block_size * block_size))
output_shape = [in_n, in_h * block_size, in_w * block_size, channel_factor]
else:
raise ValueError("Only NCHW and NHWC layouts are currently supported.")
def _get_indices(*indices):
if layout == "NCHW":
n, c, y, x = indices
elif layout == "NHWC":
n, y, x, c = indices
return n, c, y, x
def _get_pixel(n, c, y, x):
block_x = tvm.tir.truncdiv(x, block_size)
block_y = tvm.tir.truncdiv(y, block_size)
idx_x = tvm.tir.truncmod(x, block_size)
idx_y = tvm.tir.truncmod(y, block_size)
if mode == "DCR":
channel_idx = channel_factor * ((block_size * idx_y) + idx_x) + c
else:
channel_idx = (c * block_size * block_size) + ((block_size * idx_y) + idx_x)
if layout == "NCHW":
output = data(n, channel_idx, block_y, block_x)
else:
output = data(n, block_y, block_x, channel_idx)
return output
def _compute(*indices):
n, c, y, x = _get_indices(*indices)
return _get_pixel(n, c, y, x)
return te.compute(output_shape, _compute, name="depth_to_space", tag=tag.INJECTIVE)
| 3,249 | 35.931818 | 88 | py |
tvm | tvm-main/python/tvm/topi/image/grid_sample.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""affine_grid and grid_sample operator"""
from tvm import te, tir
def affine_grid(data, target_shape):
"""affine_grid operator that generates 2D sampling grid.
This operation is described in https://arxiv.org/pdf/1506.02025.pdf. It generates a uniform
sampling grid within the target shape and normalizes it to [-1, 1]. The provided affine
transformation is then applied on the sampling grid.
Parameters
----------
data : tvm.Tensor
3-D with shape [batch, 2, 3]. The affine matrix.
target_shape: list/tuple of two int
Specifies the output shape (H, W).
Returns
-------
Output : tvm.Tensor
4-D with shape [batch, 2, target_height, target_width]
"""
assert target_shape is not None
assert len(target_shape) == 2
assert (
target_shape[0] > 1 and target_shape[1] > 1
), "target height/width should be greater than 1"
dtype = data.dtype
y_step = tir.const((2.0 - 1e-7) / (target_shape[0] - 1), dtype=dtype)
x_step = tir.const((2.0 - 1e-7) / (target_shape[1] - 1), dtype=dtype)
start = tir.const(-1.0, dtype=dtype)
def _compute(n, dim, i, j):
y = start + i * y_step
x = start + j * x_step
return data[n, dim, 0] * x + data[n, dim, 1] * y + data[n, dim, 2]
oshape = (data.shape[0], len(target_shape), *target_shape)
return te.compute(oshape, _compute, tag="affine_grid")
def _grid_sample_2d(
data, grid, method="bilinear", layout="NCHW", padding_mode="zeros", align_corners=True
):
"""Applies bilinear/nearest/bicubic sampling to input feature map.
Given :math:`data` and :math:`grid` assuming NCHW layout, then the output is computed by
.. math::
x_{src} = grid[batch, 0, y_{dst}, x_{dst}] \\
y_{src} = grid[batch, 1, y_{dst}, x_{dst}] \\
output[batch, channel, y_{dst}, x_{dst}] = G(data[batch, channel, y_{src}, x_{src})
:math:`x_{dst}`, :math:`y_{dst}` enumerate all spatial locations in :math:`output`, and
:math:`G()` denotes the interpolation method.
The out-boundary points will be padded with zeros if padding_mode is "zeros", or
border pixel value if padding_mode is "border", or
inner pixel value if padding_mode is "reflection".
The left-top corner (-1, -1) and right-bottom corner (1, 1) in grid will be map to
(0, 0) and (h - 1, w - 1) of data if align_corners is "True", or
(-0.5, -0.5) and (h - 0.5, w - 0.5) of data if align_corners is "False".
The shape of the output will be (data.shape[0], data.shape[1], grid.shape[2], grid.shape[3]).
The operator assumes that :math:`grid` has been normalized to [-1, 1].
grid_sample often cooperates with affine_grid which generates sampling grids for grid_sample.
Parameters
----------
data : tvm.Tensor
4-D with shape [batch, in_channel, in_height, in_width]
grid : tvm.Tensor
4-D with shape [batch, 2, out_height, out_width]
method : str
The interpolation method "nearest", "bilinear", "bicubic" are supported.
layout : str
The layout of input data and the output.
padding_mode : str
The padding mode for outside grid values, "zeros", "border", "reflection" are supported.
align_corners: bool
Geometrically, we consider the pixels of the input as squares rather than points.
If set to "True", the extrema ("-1" and "1") are considered as referring
to the center points of the input corner pixels. If set to "False", they
are instead considered as referring to the corner points of the input corner
pixels, making the sampling more resolution agnostic.
Returns
-------
Output : tvm.Tensor
4-D with shape [batch, in_channel, out_height, out_width]
"""
assert method in ("bilinear", "nearest", "bicubic"), f"{method} is not supported"
assert padding_mode in ("zeros", "border", "reflection"), f"{padding_mode} is not supported"
assert layout == "NCHW", f"{layout} is not supported"
batch, in_channel, in_height, in_width = data.shape
out_height, out_width = grid.shape[2:]
def _get_pixel_value(n, c, h, w):
return te.if_then_else(
te.all(h >= 0, w >= 0, h < in_height, w < in_width),
data[n, c, h, w],
tir.const(0.0, dtype=data.dtype),
)
def _unnormalize(h, w):
if align_corners:
y = (h + 1) * (in_height - 1) / 2
x = (w + 1) * (in_width - 1) / 2
else:
y = -0.5 + (h + 1) * in_height / 2
x = -0.5 + (w + 1) * in_width / 2
return (y, x)
def _clip_coordinates(x, size):
return te.min(te.max(x, 0), size - 1)
def _compute_source_index(n, h, w):
y = grid[n, 1, h, w]
x = grid[n, 0, h, w]
y, x = _unnormalize(y, x)
if padding_mode == "reflection":
y = _reflect_coordinates(y, in_height)
x = _reflect_coordinates(x, in_width)
y = _clip_coordinates(y, in_height)
x = _clip_coordinates(x, in_width)
elif padding_mode == "border":
y = _clip_coordinates(y, in_height)
x = _clip_coordinates(x, in_width)
return (y, x)
def _reflect_coordinates(x, size):
def __refelection(x, size, corner_start):
def __reflect(index, size, corner_start):
index_align_corner = te.abs(corner_start - index)
size_times = te.truncdiv(index_align_corner.astype("int32"), size).astype("int32")
t = tir.Mod(size_times, 2)
extra = index_align_corner - size_times * size
return tir.if_then_else(
tir.EQ(t, 0), extra + corner_start, size - extra + corner_start
)
return tir.if_then_else(
tir.all(x >= corner_start, x <= size + corner_start),
x,
__reflect(x, size, corner_start),
)
if align_corners:
new_x = __refelection(x, size - 1, 0)
else:
new_x = __refelection(x, size, -0.5)
return new_x
def _bilinear_sample(n, c, h, w):
y, x = _compute_source_index(n, h, w)
y0 = te.floor(y).astype("int32")
x0 = te.floor(x).astype("int32")
y1 = y0 + tir.const(1, "int32")
x1 = x0 + tir.const(1, "int32")
return (
_get_pixel_value(n, c, y0, x0) * (1.0 - (y - y0)) * (1.0 - (x - x0))
+ _get_pixel_value(n, c, y0, x1) * (1.0 - (y - y0)) * (x - x0)
+ _get_pixel_value(n, c, y1, x0) * (y - y0) * (1.0 - (x - x0))
+ _get_pixel_value(n, c, y1, x1) * (y - y0) * (x - x0)
)
def _nearest_sample(n, c, h, w):
y, x = _compute_source_index(n, h, w)
y_new = te.nearbyint(y).astype("int32")
x_new = te.nearbyint(x).astype("int32")
return _get_pixel_value(n, c, y_new, x_new)
def _bicubic_sample(n, c, h, w):
A = -0.75 # -0.75 is used in pytorch, it maybe different in other frameworks
def cubic_weight_1(fraction):
return ((A + 2) * fraction - (A + 3)) * fraction * fraction + 1
def cubic_weight_2(fraction):
return ((A * fraction - 5 * A) * fraction + 8 * A) * fraction - 4 * A
def cubic_interp_1d(pixel_0, pixel_1, pixel_2, pixel_3, fraction):
weights = [0] * 4
weights[0] = cubic_weight_2(fraction + 1)
weights[1] = cubic_weight_1(fraction)
weights[2] = cubic_weight_1(1 - fraction)
weights[3] = cubic_weight_2(2 - fraction)
return (
pixel_0 * weights[0]
+ pixel_1 * weights[1]
+ pixel_2 * weights[2]
+ pixel_3 * weights[3]
)
y = grid[n, 1, h, w]
x = grid[n, 0, h, w]
y, x = _unnormalize(y, x)
y_floor = te.floor(y).astype("int32")
x_floor = te.floor(x).astype("int32")
y_fraction = y - y_floor
x_fraction = x - x_floor
coefficients = [0] * 4
for i in range(4):
y_ = y_floor - 1 + i
x_0 = x_floor - 1
x_1 = x_floor + 0
x_2 = x_floor + 1
x_3 = x_floor + 2
if padding_mode == "border":
y_ = _clip_coordinates(y_, in_height).astype("int32")
x_0 = _clip_coordinates(x_0, in_width).astype("int32")
x_1 = _clip_coordinates(x_1, in_width).astype("int32")
x_2 = _clip_coordinates(x_2, in_width).astype("int32")
x_3 = _clip_coordinates(x_3, in_width).astype("int32")
elif padding_mode == "reflection":
y_ = _reflect_coordinates(y_, in_height)
x_0 = _reflect_coordinates(x_0, in_width)
x_1 = _reflect_coordinates(x_1, in_width)
x_2 = _reflect_coordinates(x_2, in_width)
x_3 = _reflect_coordinates(x_3, in_width)
y_ = _clip_coordinates(y_, in_height).astype("int32")
x_0 = _clip_coordinates(x_0, in_width).astype("int32")
x_1 = _clip_coordinates(x_1, in_width).astype("int32")
x_2 = _clip_coordinates(x_2, in_width).astype("int32")
x_3 = _clip_coordinates(x_3, in_width).astype("int32")
coefficients[i] = cubic_interp_1d(
_get_pixel_value(n, c, y_, x_0),
_get_pixel_value(n, c, y_, x_1),
_get_pixel_value(n, c, y_, x_2),
_get_pixel_value(n, c, y_, x_3),
x_fraction,
)
return cubic_interp_1d(
coefficients[0], coefficients[1], coefficients[2], coefficients[3], y_fraction
)
if method == "bilinear":
interpolation = _bilinear_sample
elif method == "nearest":
interpolation = _nearest_sample
else: # method == "bicubic"
interpolation = _bicubic_sample
return te.compute((batch, in_channel, out_height, out_width), interpolation, tag="grid_sample")
def _grid_sample_3d(
data, grid, method="bilinear", layout="NCDHW", padding_mode="zeros", align_corners=True
):
"""Applies bilinear/nearest sampling to input feature map.
Given :math:`data` and :math:`grid` assuming NCDHW layout, then the output is computed by
.. math::
x_{src} = grid[batch, 0, z_{dst}, y_{dst}, x_{dst}] \\
y_{src} = grid[batch, 1, z_{dst}, y_{dst}, x_{dst}] \\
z_{src} = grid[batch, 2, z_{dst}, y_{dst}, x_{dst}] \\
output[batch, channel, z_{src}, y_{dst}, x_{dst}]
= G(data[batch, channel, z_{src}, y_{src}, x_{src})
:math:`x_{dst}`, :math:`y_{dst}`, :math:`z_{dst}` enumerate all spatial locations
in :math:`output`, and :math:`G()` denotes the interpolation method.
The out-boundary points will be padded with zeros if padding_mode is "zeros", or
border pixel value if padding_mode is "border", or
inner pixel value if padding_mode is "reflection".
The left-top corner (-1, -1, -1) and right-bottom corner (1, 1, 1) in grid will be map to
(0, 0, 0) and (d - 1, h - 1, w - 1) of data if align_corners is "True", or
(-0.5, -0.5, -0.5) and (d - 0.5, h - 0.5, w - 0.5) of data if align_corners is "False".
The shape of the output will be
(data.shape[0], data.shape[1], grid.shape[2], grid.shape[3], grid.shape[4]).
The operator assumes that :math:`grid` has been normalized to [-1, 1].
grid_sample often cooperates with affine_grid which generates sampling grids for grid_sample.
Parameters
----------
data : tvm.Tensor
5-D with shape [batch, in_channel, in_depth, in_height, in_width]
grid : tvm.Tensor
5-D with shape [batch, 3, out_depth, out_height, out_width]
method : str
The interpolation method "nearest", "bilinear"("trilinear") are supported.
layout : str
The layout of input data and the output.
padding_mode : str
The padding mode for outside grid values, "zeros", "border", "reflection" are supported.
align_corners: bool
Geometrically, we consider the pixels of the input as squares rather than points.
If set to "True", the extrema ("-1" and "1") are considered as referring
to the center points of the input corner pixels. If set to "False", they
are instead considered as referring to the corner points of the input corner
pixels, making the sampling more resolution agnostic.
Returns
-------
Output : tvm.Tensor
5-D with shape [batch, in_channel, out_depth, out_height, out_width]
"""
assert method in ("bilinear", "nearest"), f"{method} is not supported"
assert padding_mode in ("zeros", "border", "reflection"), f"{padding_mode} is not supported"
assert layout == "NCDHW", f"{layout} is not supported"
batch, in_channel, in_depth, in_height, in_width = data.shape
out_depth, out_height, out_width = grid.shape[2:]
def _get_pixel_value(n, c, d, h, w):
return te.if_then_else(
te.all(d >= 0, h >= 0, w >= 0, d < in_depth, h < in_height, w < in_width),
data[n, c, d, h, w],
tir.const(0.0, dtype=data.dtype),
)
def _compute_source_index(n, d, h, w):
z = grid[n, 2, d, h, w]
y = grid[n, 1, d, h, w]
x = grid[n, 0, d, h, w]
if align_corners:
z = (z + 1) * (in_depth - 1) / 2
y = (y + 1) * (in_height - 1) / 2
x = (x + 1) * (in_width - 1) / 2
else:
z = -0.5 + (z + 1) * in_depth / 2
y = -0.5 + (y + 1) * in_height / 2
x = -0.5 + (x + 1) * in_width / 2
if padding_mode == "reflection":
z = _reflect_coordinates(z, in_depth)
y = _reflect_coordinates(y, in_height)
x = _reflect_coordinates(x, in_width)
z = _clip_coordinates(z, in_depth)
y = _clip_coordinates(y, in_height)
x = _clip_coordinates(x, in_width)
elif padding_mode == "border":
z = _clip_coordinates(z, in_depth)
y = _clip_coordinates(y, in_height)
x = _clip_coordinates(x, in_width)
return (z, y, x)
def _clip_coordinates(x, size):
return te.min(te.max(x, 0), size - 1)
def _reflect_coordinates(x, size):
def __refelection(x, size, corner_start):
def __reflect(index, size, corner_start):
index_align_corner = te.abs(corner_start - index)
size_times = te.truncdiv(index_align_corner.astype("int32"), size).astype("int32")
t = tir.Mod(size_times, 2)
extra = index_align_corner - size_times * size
return tir.if_then_else(
tir.EQ(t, 0), extra + corner_start, size - extra + corner_start
)
return tir.if_then_else(
tir.all(x >= corner_start, x <= size + corner_start),
x,
__reflect(x, size, corner_start),
)
if align_corners:
return __refelection(x, size - 1, 0)
return __refelection(x, size, -0.5)
def _trilinear_sample(n, c, d, h, w):
z, y, x = _compute_source_index(n, d, h, w)
z0 = te.floor(z).astype("int32")
y0 = te.floor(y).astype("int32")
x0 = te.floor(x).astype("int32")
z1 = z0 + tir.const(1, "int32")
y1 = y0 + tir.const(1, "int32")
x1 = x0 + tir.const(1, "int32")
return (
_get_pixel_value(n, c, z0, y0, x0) * (1 - (x - x0)) * (1 - (y - y0)) * (1 - (z - z0))
+ _get_pixel_value(n, c, z0, y0, x1) * (x - x0) * (1 - (y - y0)) * (1 - (z - z0))
+ _get_pixel_value(n, c, z1, y1, x0) * (1 - (x - x0)) * (y - y0) * (z - z0)
+ _get_pixel_value(n, c, z1, y1, x1) * (x - x0) * (y - y0) * (z - z0)
+ _get_pixel_value(n, c, z0, y1, x0) * (1 - (x - x0)) * (y - y0) * (1 - (z - z0))
+ _get_pixel_value(n, c, z1, y0, x1) * (x - x0) * (1 - (y - y0)) * (z - z0)
+ _get_pixel_value(n, c, z1, y0, x0) * (1 - (x - x0)) * (1 - (y - y0)) * (z - z0)
+ _get_pixel_value(n, c, z0, y1, x1) * (x - x0) * (y - y0) * (1 - (z - z0))
)
def _nearest_sample(n, c, d, h, w):
z, y, x = _compute_source_index(n, d, h, w)
z_new = te.nearbyint(z).astype("int32")
y_new = te.nearbyint(y).astype("int32")
x_new = te.nearbyint(x).astype("int32")
return _get_pixel_value(n, c, z_new, y_new, x_new)
if method == "bilinear":
interpolation = _trilinear_sample
else: # method == "nearest"
interpolation = _nearest_sample
return te.compute(
(batch, in_channel, out_depth, out_height, out_width), interpolation, tag="grid_sample"
)
def grid_sample(
data, grid, method="bilinear", layout="NCHW", padding_mode="zeros", align_corners=True
):
"""Applies grid sampling to input feature map.
Given :math:`data` and :math:`grid`, then for 4-D the output is computed by
.. math::
x_{src} = grid[batch, 0, y_{dst}, x_{dst}] \\
y_{src} = grid[batch, 1, y_{dst}, x_{dst}] \\
output[batch, channel, y_{dst}, x_{dst}] = G(data[batch, channel, y_{src}, x_{src}])
:math:`x_{dst}`, :math:`y_{dst}` enumerate all spatial locations in :math:`output`, and
:math:`G()` denotes the interpolation function.
The out-boundary points will be padded with zeros if padding_mode is "zeros", or
border pixel value if padding_mode is "border", or
inner pixel value if padding_mode is "reflection".
The left-top corner (-1, -1) and right-bottom corner (1, 1) in grid will be map to
(0, 0) and (h - 1, w - 1) of data if align_corners is "True", or
(-0.5, -0.5) and (h - 0.5, w - 0.5) of data if align_corners is "False".
The shape of the output will be
4-D (data.shape[0], data.shape[1], grid.shape[2], grid.shape[3]), or
5-D (data.shape[0], data.shape[1], grid.shape[2], grid.shape[3], grid.shape[4]).
The operator assumes that :math:`grid` has been normalized to [-1, 1].
grid_sample often cooperates with affine_grid which generates sampling grids for grid_sample.
Parameters
----------
data : tvm.Tensor
4-D with shape [batch, in_channel, in_height, in_width], or
5-D with shape [batch, in_channel, in_depth, in_height, in_width]
grid : tvm.Tensor
4-D with shape [batch, 2, out_height, out_width], or
5-D with shape [batch, 3, out_depth, out_height, out_width]
method : str
The interpolation method, 4-D "nearest", "bilinear", "bicubic" and
5-D "nearest", "bilinear"("trilinear") are supported.
layout : str
The layout of input data and the output.
padding_mode : str
The padding mode for outside grid values, "zeros", "border", "reflection" are supported.
align_corners: bool
Geometrically, we consider the pixels of the input as squares rather than points.
If set to "True", the extrema ("-1" and "1") are considered as referring
to the center points of the input corner pixels. If set to "False", they
are instead considered as referring to the corner points of the input corner
pixels, making the sampling more resolution agnostic.
Returns
-------
Output : tvm.Tensor
4-D with shape [batch, in_channel, out_height, out_width], or
5-D with shape [batch, in_channel, out_depth, out_height, out_width]
"""
if len(layout) == 4:
compute = _grid_sample_2d
elif len(layout) == 5:
compute = _grid_sample_3d
else:
msg = f"layout {layout} is not supported"
raise ValueError(msg)
return compute(data, grid, method, layout, padding_mode, align_corners)
| 20,738 | 38.130189 | 99 | py |
tvm | tvm-main/python/tvm/topi/image/resize.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""TVM operator input resize compute."""
from __future__ import absolute_import
import tvm
from tvm import te
from tvm.topi.utils import nchw_pack_layout, nchw_xc_layout
from .. import tag
def can_convert_multiply_to_intdiv(origin_size, scaled_size):
"""Check whether can convert multiplication to division"""
# Only support IntImm type
if not isinstance(scaled_size, tvm.tir.expr.IntImm):
return False
div = scaled_size / origin_size.astype("float")
if div.value % 1 != 0:
return False
epsilon = 1e-5
check = 1 / (epsilon * origin_size + epsilon)
if div > check:
return False
return True
def get_1d_indices(indices, layout="NCW"):
"""Get 1d indices"""
(cc, inum, ic) = (0, 0, 0)
if layout == "NWC":
n, x, c = indices
cc = None
elif layout == "NCW":
n, c, x = indices
cc = None
elif ncw_pack_layout(layout):
n, c, x, inum, ic = indices
else:
# else must be NCHWxc
assert ncw_xc_layout(layout)
n, c, x, cc = indices
return n, c, x, cc, inum, ic
def get_2d_indices(indices, layout="NCHW"):
"""Get 2d indices"""
(cc, inum, ic) = (0, 0, 0)
if layout == "NHWC":
n, y, x, c = indices
cc = None
elif layout == "NCHW":
n, c, y, x = indices
cc = None
elif nchw_pack_layout(layout):
n, c, y, x, inum, ic = indices
else:
# else must be NCHWxc
assert nchw_xc_layout(layout)
n, c, y, x, cc = indices
return n, c, y, x, cc, inum, ic
def get_3d_indices(indices, layout="NCDHW"):
"""Get 3d indices"""
if layout == "NDHWC":
n, z, y, x, c = indices
cc = None
elif layout == "NCDHW":
n, c, z, y, x = indices
cc = None
else:
n, c, z, y, x, cc = indices
return n, c, z, y, x, cc
def get_1d_pixel(data, layout, image_width, n, c, x, cc, ib, ic):
"""Get 1d pixel"""
x = tvm.te.max(tvm.te.min(x, image_width - 1), 0)
if layout == "NWC":
return data(n, x, c).astype("float")
if layout == "NCW":
return data(n, c, x).astype("float")
if ncw_pack_layout(layout):
return data(n, c, x, ib, ic).astype("float")
# else must be NCHWxc
assert ncw_xc_layout(layout)
return data(n, c, x, cc).astype("float")
def get_2d_pixel(data, layout, image_height, image_width, n, c, y, x, cc, ib, ic):
"""Get 2d pixel"""
y = tvm.te.max(tvm.te.min(y, image_height - 1), 0)
x = tvm.te.max(tvm.te.min(x, image_width - 1), 0)
if layout == "NHWC":
return data(n, y, x, c).astype("float")
if layout == "NCHW":
return data(n, c, y, x).astype("float")
if nchw_pack_layout(layout):
return data(n, c, y, x, ib, ic).astype("float")
# else must be NCHWxc
assert nchw_xc_layout(layout)
return data(n, c, y, x, cc).astype("float")
def get_3d_pixel(data, layout, image_depth, image_height, image_width, n, c, z, y, x, cc):
"""Get 3d pixel"""
z = tvm.te.max(tvm.te.min(z, image_depth - 1), 0)
y = tvm.te.max(tvm.te.min(y, image_height - 1), 0)
x = tvm.te.max(tvm.te.min(x, image_width - 1), 0)
if layout == "NDHWC":
return data(n, z, y, x, c).astype("float")
if layout == "NCDHW":
return data(n, c, z, y, x).astype("float")
# else must be NCDHWxc
return data(n, c, z, y, x, cc).astype("float")
def get_inx(
x,
image_width,
target_width,
coordinate_transformation_mode,
start_x=0,
end_x=-1,
use_int_div=False,
):
"""Infer input x from output x with various coordinate transformation methods"""
scale_x = te.div(image_width.astype("float"), target_width.astype("float"))
if coordinate_transformation_mode == "half_pixel":
in_x = (x + 0.5) * scale_x - 0.5
elif coordinate_transformation_mode == "align_corners":
in_x = (image_width - 1).astype("float") / (target_width - 1) * x
elif coordinate_transformation_mode == "asymmetric":
if use_int_div:
in_x = te.div(x, te.div(target_width, image_width))
else:
in_x = scale_x * x
elif coordinate_transformation_mode == "pytorch_half_pixel":
in_x = te.if_then_else(target_width > 1, (x + 0.5) * scale_x - 0.5, 0.0)
elif coordinate_transformation_mode == "tf_half_pixel_for_nn":
in_x = (x + 0.5) * scale_x
elif coordinate_transformation_mode == "tf_crop_and_resize":
in_x = te.if_then_else(
target_width > 1,
start_x * (image_width - 1)
+ x * (end_x - start_x) * (image_width - 1).astype("float") / (target_width - 1),
0.5 * (start_x + end_x) * (image_width - 1),
)
else:
raise ValueError(
f"Unsupported coordinate_transformation_mode: {coordinate_transformation_mode}"
)
return in_x
def get_closest_index(in_x, rounding_method, boxes, use_int_div=False):
"""get the closest index to a value based on a certain rounding method"""
if use_int_div:
closest_x_index = in_x.astype("int32")
return closest_x_index
if rounding_method == "round" or boxes is not None:
closest_x_index = te.round(in_x).astype("int32")
elif rounding_method == "round_prefer_floor":
closest_x_index = te.ceil(in_x - 0.5).astype("int32")
elif rounding_method == "round_prefer_ceil":
closest_x_index = te.floor(in_x + 0.5).astype("int32")
elif rounding_method == "floor":
# Add epsilon to floor to prevent gpu rounding errors.
epsilon = 1e-5
closest_x_index = te.floor(in_x + epsilon).astype("int32")
elif rounding_method == "ceil":
# Subract epsilon from ceil to prevent gpu rounding errors.
epsilon = 1e-5
closest_x_index = te.ceil(in_x - epsilon).astype("int32")
else:
raise ValueError(f"Unknown rounding method: {rounding_method}")
return closest_x_index
def _lerp(A, B, t):
"""Perform Linear interpolation in 1D"""
return A * (1.0 - t) + B * t
def _cubic_spline_weights(t, alpha):
"""create cubic spline weights in 1D"""
t2 = t * t
t3 = t * t * t
w1 = alpha * (t3 - 2 * t2 + t)
w2 = (alpha + 2) * t3 - (3 + alpha) * t2 + 1
w3 = -(alpha + 2) * t3 + (3 + 2 * alpha) * t2 - alpha * t
w4 = -alpha * t3 + alpha * t2
return [w1, w2, w3, w4]
def _cubic_kernel(inputs, w):
"""perform cubic interpolation in 1D"""
return sum([a_i * w_i for a_i, w_i in zip(inputs, w)])
def _resize_1d(
indices,
data,
roi,
image_width,
target_width,
boxes=None,
box_indices=None,
method=None,
extrapolation_value=0.0,
layout="NCW",
coordinate_transformation_mode="align_corners",
rounding_method="",
alpha=-0.5,
exclude_outside=0,
out_dtype=None,
):
"""Perform resize operation on the data with selected method and options.
Parameters
----------
indices : tuple
The indices of input data
data : tvm.te.Tensor
inputs is a 3-D tensor with shape
[batch, channel, in_width]
or [batch, in_width, channel]
roi: Tuple of Float or Expr
The region of interest for cropping the input image. Expected to be of
size 2, and format [start_w, end_w].
Only used if coordinate_transformation_mode is tf_crop_and_resize.
image_width : integer
Input image width
target_width : integer
The target resized image width
boxes : tvm.te.Tensor, optional
A 2-D tensor of shape [num_boxes, 4]. Each row of the tensor specifies
the coordinates of a box.
box_indices : tvm.te.Tensor, optional
A 1-D tensor of shape [num_boxes], box_indices[i] specifies the data that
the i-th box refers to.
extrapolation_value: float, optional
Value used for extrapolation, when applicable.
layout: string, optional
"NCW", "NWC", or "NCWc".
method: string, optional
method of interpolation ("nearest", "linear", "bicubic")
coordinate_transformation_mode : string, optional
Describes how to transform the coordinate in the resized tensor
to the coordinate in the original tensor.
[half_pixel, align_corners, asymmetric, pytorch_half_pixel,
tf_half_pixel_for_nn, and tf_crop_and_resize].
rounding_method: string, optional
indicates how to find the "nearest" pixel in nearest_neighbor method
[round, floor, ceil]
alpha: float, optional
Bicubic spline coefficient
exclude_outside: bool, optional:
Exclude values outside the image fdor bicubic interpolation
out_dtype: string, optional
Type to return. If left None will be same as input type.
Returns
-------
output : out_dtype
The computed result with type out_dtype
"""
def _cast_output(value, data_dtype="float32", out_dtype=None):
if out_dtype:
dtype = out_dtype
else:
dtype = data_dtype
return value.astype(dtype)
n, c, x, cc, inum, ic = get_1d_indices(indices, layout)
box_idx = box_indices(n) if box_indices is not None else n
if boxes is not None:
# TODO(mbrookhart): Find an example of this
raise NotImplementedError("resize1d with image boxes not yet implemented")
in_x = get_inx(x, image_width, target_width, coordinate_transformation_mode, roi[0], roi[1])
if method == "nearest_neighbor":
if rounding_method == "":
if coordinate_transformation_mode == "align_corners":
rounding_method = "round"
else:
rounding_method = "floor"
closest_x_index = get_closest_index(in_x, rounding_method, boxes)
value = get_1d_pixel(data, layout, image_width, box_idx, c, closest_x_index, cc, inum, ic)
elif method == "linear":
x_int = te.floor(in_x).astype("int32")
x_lerp = in_x - x_int
p = [0 for i in range(2)]
for i in range(2):
p[i] = get_1d_pixel(data, layout, image_width, box_idx, c, x_int + i, cc, inum, ic)
value = _lerp(*p, x_lerp)
elif method == "cubic":
xint = te.floor(in_x).astype("int32")
xfract = in_x - te.floor(in_x)
# Get the surrounding values
p = [0 for i in range(4)]
for i in range(4):
p[i] = get_1d_pixel(data, layout, image_width, box_idx, c, xint + i - 1, cc, inum, ic)
wx = _cubic_spline_weights(xfract, alpha)
if exclude_outside:
for i in range(4):
wx[i] = te.if_then_else(
te.any(xint - 1 + i < 0, xint + i > image_width), 0.0, wx[i]
)
sum_wx = sum(wx)
wx = [w / sum_wx for w in wx]
value = _cubic_kernel(p, wx)
else:
raise ValueError("Unknown resize method:", method)
if coordinate_transformation_mode == "tf_crop_and_resize":
# use extrapolation_value if in_x is out of boundary
value = tvm.tir.if_then_else(
in_x < 0,
extrapolation_value,
tvm.tir.if_then_else(in_x > image_width - 1, extrapolation_value, value),
)
return _cast_output(value, data.dtype, out_dtype=out_dtype)
def resize1d(
data,
roi,
size,
layout="NCW",
method="linear",
coordinate_transformation_mode="half_pixel",
rounding_method="",
bicubic_alpha=-0.5,
bicubic_exclude=0,
extrapolation_value=0.0,
out_dtype=None,
output_shape=None,
):
"""Perform resize operation on the data.
Parameters
----------
data : tvm.te.Tensor
inputs is a 3-D tensor with shape
[batch, channel in_width]
or [batch in_width, channel]
roi: Tuple of Float or Expr
The region of interest for cropping the input image. Expected to be of
size 2, and format [start_w, end_w].
Only used if coordinate_transformation_mode is tf_crop_and_resize.
size: Tuple
Output resolution scale to
layout: string, optional
"NCW", "NWC", or "NCWc".
coordinate_transformation_mode: string, optional
Describes how to transform the coordinate in the resized tensor
to the coordinate in the original tensor.
Refer to the ONNX Resize operator specification for details.
Available options are "half_pixel", "align_corners" and "asymmetric".
method: string, optional
method of interpolation ("nearest", "linear", "bicubic")
coordinate_transformation_mode : string, optional
Describes how to transform the coordinate in the resized tensor
to the coordinate in the original tensor.
[half_pixel, align_corners, asymmetric, pytorch_half_pixel,
tf_half_pixel_for_nn, and tf_crop_and_resize].
rounding_method:
Method for rounding coordinate locations
bicubic_alpha: float, optional
Bicubic spline coefficient
bicubic_exclude: bool, optional:
Exclude values outside the image fdor bicubic interpolation
extrapolation_value: float, optional
Value used for extrapolation, when applicable.
out_dtype: string, optional
Type to return. If left None will be same as input type.
output_shape: tvm.tir.container.Array, optional
Shape to return. If left None will be inferred
(If shape is determined dynamically, pass out_dtype.shape as output_shape)
Returns
-------
output : tvm.te.Tensor
4-D with shape [batch, chananel, in_width*scale]
or [batch, in_width*scale, channel]
or 5-D with shape [batch, channel-major, in_width*scale, channel-minor]
"""
method = method.lower()
if layout == "NWC":
in_n, in_w, in_c = data.shape
if output_shape is None:
output_shape = [in_n, size[0], in_c]
elif layout == "NCW":
in_n, in_c, in_w = data.shape
if output_shape is None:
output_shape = [in_n, in_c, size[0]]
elif ncw_pack_layout(layout): # for NCWinic
in_n, in_c, in_w, in_inum, in_ic = data.shape
if output_shape is None:
output_shape = [in_n, in_c, size[0], in_inum, in_ic]
elif ncw_xc_layout(layout): # for NCWxc
in_n, in_c, in_w, in_cc = data.shape
if output_shape is None:
output_shape = [in_n, in_c, size[0], in_cc]
else:
raise ValueError(f"{layout} layout is not supported.")
if isinstance(size, tuple):
size = list(size)
for i in range(1):
if isinstance(size[i], int):
size[i] = tvm.tir.IntImm("int32", size[i])
def compute_func(*indices):
return _resize_1d(
indices,
data,
roi,
in_w,
size[0],
method=method,
layout=layout,
coordinate_transformation_mode=coordinate_transformation_mode,
rounding_method=rounding_method,
alpha=bicubic_alpha,
exclude_outside=bicubic_exclude,
extrapolation_value=extrapolation_value,
out_dtype=out_dtype,
)
return te.compute(output_shape, compute_func, name="resize", tag=tag.INJECTIVE)
def _resize_2d(
indices,
data,
roi,
image_height,
image_width,
target_height,
target_width,
boxes=None,
box_indices=None,
method=None,
extrapolation_value=0.0,
layout="NCHW",
coordinate_transformation_mode="align_corners",
rounding_method="",
alpha=-0.5,
exclude_outside=0,
out_dtype=None,
):
"""Perform resize operation on the data with selected method and options.
Parameters
----------
indices : tuple
The indices of input data
data : tvm.te.Tensor
inputs is a 4-D tensor with shape
[batch, channel, in_height, in_width]
or [batch, in_height, in_width, channel]
roi: Tuple of Float or Expr
The region of interest for cropping the input image. Expected to be of
size 4, and format [start_h, start_w, end_h, end_w].
Only used if coordinate_transformation_mode is tf_crop_and_resize.
image_height : integer
Input image height
image_width : integer
Input image width
target_height : integer
The target resized image height
target_width : integer
The target resized image width
boxes : tvm.te.Tensor, optional
A 2-D tensor of shape [num_boxes, 4]. Each row of the tensor specifies
the coordinates of a box.
method: string, optional
method of interpolation ("nearest", "linear", "bicubic")
box_indices : tvm.te.Tensor, optional
A 1-D tensor of shape [num_boxes], box_indices[i] specifies the data that
the i-th box refers to.
extrapolation_value: float, optional
Value used for extrapolation, when applicable.
layout: string, optional
"NCHW", "NHWC", or "NCHWc".
coordinate_transformation_mode : string, optional
Describes how to transform the coordinate in the resized tensor
to the coordinate in the original tensor.
[half_pixel, align_corners, asymmetric, pytorch_half_pixel,
tf_half_pixel_for_nn, and tf_crop_and_resize].
rounding_method: string, optional
indicates how to find the "nearest" pixel in nearest_neighbor method
[round, floor, ceil]
alpha: float, optional
Bicubic spline coefficient
exclude_outside: bool, optional:
Exclude values outside the image fdor bicubic interpolation
out_dtype: string, optional
Type to return. If left None will be same as input type.
Returns
-------
output : out_dtype
The computed result with type out_dtype
"""
def _cast_output(value, data_dtype="float32", out_dtype=None):
if out_dtype:
dtype = out_dtype
else:
dtype = data_dtype
return value.astype(dtype)
height_use_int_div = False
width_use_int_div = False
if method == "nearest_neighbor" and coordinate_transformation_mode == "asymmetric":
height_use_int_div = can_convert_multiply_to_intdiv(image_height, target_height)
width_use_int_div = can_convert_multiply_to_intdiv(image_width, target_width)
n, c, y, x, cc, inum, ic = get_2d_indices(indices, layout)
box_idx = box_indices(n) if box_indices is not None else n
if boxes is not None:
y1, x1 = boxes(n, 0), boxes(n, 1)
y2, x2 = boxes(n, 2), boxes(n, 3)
in_h = (image_height - 1) * (y2 - y1)
in_w = (image_width - 1) * (x2 - x1)
h_scale = in_h.astype("float") / (target_height - 1)
w_scale = in_w.astype("float") / (target_width - 1)
in_y = y1 * (image_height - 1) + h_scale * y
in_x = x1 * (image_width - 1) + w_scale * x
else:
in_x = get_inx(
x,
image_width,
target_width,
coordinate_transformation_mode,
roi[1],
roi[3],
width_use_int_div,
)
in_y = get_inx(
y,
image_height,
target_height,
coordinate_transformation_mode,
roi[0],
roi[2],
height_use_int_div,
)
if method == "nearest_neighbor":
if rounding_method == "":
if coordinate_transformation_mode == "align_corners":
rounding_method = "round"
else:
rounding_method = "floor"
closest_x_index = get_closest_index(in_x, rounding_method, boxes, width_use_int_div)
closest_y_index = get_closest_index(in_y, rounding_method, boxes, height_use_int_div)
value = get_2d_pixel(
data,
layout,
image_height,
image_width,
box_idx,
c,
closest_y_index,
closest_x_index,
cc,
inum,
ic,
)
elif method == "linear":
y_int = te.floor(in_y).astype("int32")
x_int = te.floor(in_x).astype("int32")
y_lerp = in_y - y_int
x_lerp = in_x - x_int
p = [[0 for i in range(2)] for j in range(2)]
for j in range(2):
for i in range(2):
p[j][i] = get_2d_pixel(
data,
layout,
image_height,
image_width,
box_idx,
c,
y_int + j,
x_int + i,
cc,
inum,
ic,
)
top = _lerp(*p[0], x_lerp)
bottom = _lerp(*p[1], x_lerp)
value = _lerp(top, bottom, y_lerp)
elif method == "cubic":
xint = te.floor(in_x).astype("int32")
xfract = in_x - te.floor(in_x)
yint = te.floor(in_y).astype("int32")
yfract = in_y - te.floor(in_y)
# Get the surrounding values
p = [[0 for i in range(4)] for j in range(4)]
for j in range(4):
for i in range(4):
p[j][i] = get_2d_pixel(
data,
layout,
image_height,
image_width,
box_idx,
c,
yint + j - 1,
xint + i - 1,
cc,
inum,
ic,
)
wx = _cubic_spline_weights(xfract, alpha)
wy = _cubic_spline_weights(yfract, alpha)
if exclude_outside:
for i in range(4):
wx[i] = te.if_then_else(
te.any(xint - 1 + i < 0, xint + i > image_width), 0.0, wx[i]
)
wy[i] = te.if_then_else(
te.any(yint - 1 + i < 0, yint + i > image_height), 0.0, wy[i]
)
sum_wx = sum(wx)
sum_wy = sum(wy)
wx = [w / sum_wx for w in wx]
wy = [w / sum_wy for w in wy]
col0 = _cubic_kernel(p[0], wx)
col1 = _cubic_kernel(p[1], wx)
col2 = _cubic_kernel(p[2], wx)
col3 = _cubic_kernel(p[3], wx)
value = _cubic_kernel([col0, col1, col2, col3], wy)
else:
raise ValueError("Unknown resize method:", method)
if coordinate_transformation_mode == "tf_crop_and_resize":
out = tvm.tir.if_then_else(
in_y < 0,
extrapolation_value,
tvm.tir.if_then_else(in_y > image_height - 1, extrapolation_value, value),
)
# use extrapolation_value if in_x is out of boundary
value = tvm.tir.if_then_else(
in_x < 0,
extrapolation_value,
tvm.tir.if_then_else(in_x > image_width - 1, extrapolation_value, out),
)
return _cast_output(value, data.dtype, out_dtype=out_dtype)
def resize2d(
data,
roi,
size,
layout="NCHW",
method="linear",
coordinate_transformation_mode="half_pixel",
rounding_method="",
bicubic_alpha=-0.5,
bicubic_exclude=0,
extrapolation_value=0.0,
out_dtype=None,
output_shape=None,
):
"""Perform resize operation on the data.
Parameters
----------
data : tvm.te.Tensor
inputs is a 4-D tensor with shape
[batch, channel, in_height, in_width]
or [batch, in_height, in_width, channel]
roi: Tuple of Float or Expr
The region of interest for cropping the input image. Expected to be of
size 4, and format [start_h, start_w, end_h, end_w].
Only used if coordinate_transformation_mode is tf_crop_and_resize.
size: Tuple
Output resolution scale to
layout: string, optional
"NCHW", "NHWC", or "NCHWc".
method: string, optional
method of interpolation ("nearest", "linear", "bicubic")
coordinate_transformation_mode : string, optional
Describes how to transform the coordinate in the resized tensor
to the coordinate in the original tensor.
[half_pixel, align_corners, asymmetric, pytorch_half_pixel,
tf_half_pixel_for_nn, and tf_crop_and_resize].
rounding_method:
Method for rounding coordinate locations
bicubic_alpha: float, optional
Bicubic spline coefficient
bicubic_exclude: bool, optional:
Exclude values outside the image fdor bicubic interpolation
extrapolation_value: float, optional
Value used for extrapolation, when applicable.
out_dtype: string, optional
Type to return. If left None will be same as input type.
output_shape: tvm.tir.container.Array, optional
Shape to return. If left None will be inferred
(If shape is determined dynamically, pass out_dtype.shape as output_shape)
Returns
-------
output : tvm.te.Tensor
4-D with shape [batch, channel, in_height*scale, in_width*scale]
or [batch, in_height*scale, in_width*scale, channel]
or 5-D with shape [batch, channel-major, in_height*scale, in_width*scale, channel-minor]
"""
method = method.lower()
if layout == "NHWC":
in_n, in_h, in_w, in_c = data.shape
if output_shape is None:
output_shape = [in_n, size[0], size[1], in_c]
elif layout == "NCHW":
in_n, in_c, in_h, in_w = data.shape
if output_shape is None:
output_shape = [in_n, in_c, size[0], size[1]]
elif nchw_pack_layout(layout): # for NCHWinic
in_n, in_c, in_h, in_w, in_inum, in_ic = data.shape
if output_shape is None:
output_shape = [in_n, in_c, size[0], size[1], in_inum, in_ic]
elif nchw_xc_layout(layout): # for NCHWxc
in_n, in_c, in_h, in_w, in_cc = data.shape
if output_shape is None:
output_shape = [in_n, in_c, size[0], size[1], in_cc]
else:
raise ValueError(f"{layout} layout is not supported.")
if isinstance(size, tuple):
size = list(size)
for i in range(2):
if isinstance(size[i], int):
size[i] = tvm.tir.IntImm("int32", size[i])
def compute_func(*indices):
return _resize_2d(
indices,
data,
roi,
in_h,
in_w,
size[0],
size[1],
method=method,
layout=layout,
coordinate_transformation_mode=coordinate_transformation_mode,
rounding_method=rounding_method,
alpha=bicubic_alpha,
exclude_outside=bicubic_exclude,
extrapolation_value=extrapolation_value,
out_dtype=out_dtype,
)
return te.compute(output_shape, compute_func, name="resize", tag=tag.INJECTIVE)
def crop_and_resize(
data,
boxes,
box_indices,
crop_size,
layout="NCHW",
method="bilinear",
extrapolation_value=None,
out_dtype=None,
):
"""Perform crop and resize operation on the data.
Parameters
----------
data : tvm.te.Tensor
inputs is a 4-D tensor with shape
[batch, channel, in_height, in_width]
or [batch, in_height, in_width, channel]
boxes : tvm.te.Tensor
A 2-D tensor of shape [num_boxes, 4]. Each row of the tensor specifies
the coordinates of a box.
box_indices : tvm.te.Tensor
A 1-D tensor of shape [num_boxes], box_indices[i] specifies the data that
the i-th box refers to.
crop_size : Tuple
The target size of each box.
layout : string, optional
"NCHW", "NHWC"
method : {"bilinear", "nearest_neighbor"}
Method to be used for resizing.
extrapolation_value: float, optional
Value used for extrapolation, when applicable.
out_dtype : string, optional
Type to return. If left None will be same as input type.
Returns
-------
output : tvm.te.Tensor
4-D with shape [num_boxes, channel, crop_height, crop_width]
or [num_boxes, crop_height, crop_width, channel]
"""
method = method.lower()
target_h = crop_size[0]
target_w = crop_size[1]
if layout == "NHWC":
output_shape = [box_indices.shape[0], crop_size[0], crop_size[1], data.shape[3]]
image_h = data.shape[1].astype("int32")
image_w = data.shape[2].astype("int32")
elif layout == "NCHW":
output_shape = [box_indices.shape[0], data.shape[1], crop_size[0], crop_size[1]]
image_h = data.shape[2].astype("int32")
image_w = data.shape[3].astype("int32")
elif layout.startswith("NCHW"): # for NCHWxc
output_shape = [
box_indices.shape[0],
data.shape[1],
crop_size[0],
crop_size[1],
data.shape[4],
]
image_h = data.shape[2].astype("int32")
image_w = data.shape[3].astype("int32")
else:
raise ValueError(f"{layout} layout is not supported.")
if method == "bilinear":
method = "linear"
def compute_func(*indices):
return _resize_2d(
indices,
data,
[0.0] * 4,
image_h,
image_w,
target_h,
target_w,
boxes,
box_indices,
method=method,
extrapolation_value=extrapolation_value,
layout=layout,
coordinate_transformation_mode="tf_crop_and_resize",
out_dtype=out_dtype,
)
return te.compute(output_shape, compute_func, name="crop_and_resize", tag=tag.INJECTIVE)
def _resize_3d(
indices,
data,
roi,
image_depth,
image_height,
image_width,
target_depth,
target_height,
target_width,
boxes=None,
box_indices=None,
method=None,
extrapolation_value=0.0,
layout="NCHW",
coordinate_transformation_mode="align_corners",
rounding_method="",
alpha=-0.5,
exclude_outside=0,
out_dtype=None,
):
"""Perform resize operation on the data with selected method and options.
Parameters
----------
indices : tuple
The indices of input data
data : tvm.te.Tensor
inputs is a 4-D tensor with shape
[batch, channel, in_height, in_width]
or [batch, in_height, in_width, channel]
roi: Tuple of Float or Expr
The region of interest for cropping the input image. Expected to be of
size 6, and format [start_d, start_h, start_w, end_d, end_h, end_w].
Only used if coordinate_transformation_mode is tf_crop_and_resize.
image_depth : integer
Input image depth
image_height : integer
Input image height
image_width : integer
Input image width
target_depth : integer
The target resized image depth
target_height : integer
The target resized image height
target_width : integer
The target resized image width
boxes : tvm.te.Tensor, optional
A 2-D tensor of shape [num_boxes, 4]. Each row of the tensor specifies
the coordinates of a box.
box_indices : tvm.te.Tensor, optional
A 1-D tensor of shape [num_boxes], box_indices[i] specifies the data that
the i-th box refers to.
method: string, optional
method of interpolation ("nearest", "linear", "bicubic")
extrapolation_value: float, optional
Value used for extrapolation, when applicable.
layout: string, optional
"NCHW", "NHWC", or "NCHWc".
coordinate_transformation_mode : string, optional
Describes how to transform the coordinate in the resized tensor
to the coordinate in the original tensor.
[half_pixel, align_corners, asymmetric, pytorch_half_pixel,
tf_half_pixel_for_nn, and tf_crop_and_resize].
rounding_method: string, optional
indicates how to find the "nearest" pixel in nearest_neighbor method
[round, floor, ceil]
alpha: float, optional
Bicubic spline coefficient
exclude_oiutside: bool, optional:
Exclude values outside the image fdor bicubic interpolation
out_dtype: string, optional
Type to return. If left None will be same as input type.
Returns
-------
output : out_dtype
The computed result with type out_dtype
"""
def _cast_output(value, data_dtype="float32", out_dtype=None):
if out_dtype:
dtype = out_dtype
else:
dtype = data_dtype
return value.astype(dtype)
n, c, z, y, x, cc = get_3d_indices(indices, layout)
box_idx = box_indices(n) if box_indices is not None else n
if boxes is not None:
# TODO(mbrookhart): Find an example of this
raise NotImplementedError("resize1d with image boxes not yet implemented")
in_z = get_inx(z, image_depth, target_depth, coordinate_transformation_mode, roi[2], roi[5])
in_y = get_inx(y, image_height, target_height, coordinate_transformation_mode, roi[1], roi[4])
in_x = get_inx(x, image_width, target_width, coordinate_transformation_mode, roi[0], roi[3])
if method == "nearest_neighbor":
if rounding_method == "":
if coordinate_transformation_mode == "align_corners":
rounding_method = "round"
else:
rounding_method = "floor"
closest_z_index = get_closest_index(in_z, rounding_method, boxes)
closest_y_index = get_closest_index(in_y, rounding_method, boxes)
closest_x_index = get_closest_index(in_x, rounding_method, boxes)
value = get_3d_pixel(
data,
layout,
image_depth,
image_height,
image_width,
box_idx,
c,
closest_z_index,
closest_y_index,
closest_x_index,
cc,
)
elif method == "linear":
z_int = te.floor(in_z).astype("int32")
y_int = te.floor(in_y).astype("int32")
x_int = te.floor(in_x).astype("int32")
z_lerp = in_z - z_int
y_lerp = in_y - y_int
x_lerp = in_x - x_int
p = [[[0 for i in range(2)] for j in range(2)] for k in range(2)]
for k in range(2):
for j in range(2):
for i in range(2):
p[k][j][i] = get_3d_pixel(
data,
layout,
image_depth,
image_height,
image_width,
box_idx,
c,
z_int + k,
y_int + j,
x_int + i,
cc,
)
l = [[0 for i in range(2)] for j in range(2)]
for j in range(2):
for i in range(2):
l[j][i] = _lerp(*p[j][i], x_lerp)
top = _lerp(*l[0], y_lerp)
bottom = _lerp(*l[1], y_lerp)
value = _lerp(top, bottom, z_lerp)
elif method == "cubic":
zint = te.floor(in_z).astype("int32")
zfract = in_z - te.floor(in_z)
yint = te.floor(in_y).astype("int32")
yfract = in_y - te.floor(in_y)
xint = te.floor(in_x).astype("int32")
xfract = in_x - te.floor(in_x)
# Get the surrounding values
p = [[[0 for i in range(4)] for j in range(4)] for k in range(4)]
for k in range(4):
for j in range(4):
for i in range(4):
p[k][j][i] = get_3d_pixel(
data,
layout,
image_depth,
image_height,
image_width,
box_idx,
c,
zint + k - 1,
yint + j - 1,
xint + i - 1,
cc,
)
wz = _cubic_spline_weights(zfract, alpha)
wy = _cubic_spline_weights(yfract, alpha)
wx = _cubic_spline_weights(xfract, alpha)
if exclude_outside:
for i in range(4):
wz[i] = te.if_then_else(
te.any(xint - 1 + i < 0, xint + i > image_height), 0.0, wx[i]
)
wy[i] = te.if_then_else(
te.any(yint - 1 + i < 0, yint + i > image_height), 0.0, wy[i]
)
wx[i] = te.if_then_else(
te.any(xint - 1 + i < 0, xint + i > image_width), 0.0, wx[i]
)
sum_wz = sum(wz)
sum_wy = sum(wy)
sum_wx = sum(wx)
wz = [w / sum_wz for w in wz]
wy = [w / sum_wy for w in wy]
wx = [w / sum_wx for w in wx]
l = [[0 for i in range(4)] for j in range(4)]
for j in range(4):
for i in range(4):
l[j][i] = _cubic_kernel(p[j][i], wx)
col0 = _cubic_kernel(l[0], wy)
col1 = _cubic_kernel(l[1], wy)
col2 = _cubic_kernel(l[2], wy)
col3 = _cubic_kernel(l[3], wy)
value = _cubic_kernel([col0, col1, col2, col3], wz)
else:
raise ValueError("Unknown resize method:", method)
if coordinate_transformation_mode == "tf_crop_and_resize":
out = tvm.tir.if_then_else(
in_z < 0,
extrapolation_value,
tvm.tir.if_then_else(in_z > image_depth - 1, extrapolation_value, value),
)
out = tvm.tir.if_then_else(
in_y < 0,
extrapolation_value,
tvm.tir.if_then_else(in_y > image_height - 1, extrapolation_value, value),
)
# use extrapolation_value if in_x is out of boundary
value = tvm.tir.if_then_else(
in_x < 0,
extrapolation_value,
tvm.tir.if_then_else(in_x > image_width - 1, extrapolation_value, out),
)
return _cast_output(value, data.dtype, out_dtype=out_dtype)
def resize3d(
data,
roi,
size,
layout="NCDHW",
method="linear",
coordinate_transformation_mode="half_pixel",
rounding_method="",
bicubic_alpha=-0.5,
bicubic_exclude=0,
extrapolation_value=0.0,
out_dtype=None,
output_shape=None,
):
"""Perform resize operation on the data.
Parameters
----------
data : tvm.te.Tensor
inputs is a 5-D tensor with shape
[batch, channel, in_depth, in_height, in_width]
or [batch, in_depth, in_height, in_width, channel]
roi: Tuple of Float or Expr
The region of interest for cropping the input image. Expected to be of
size 6, and format [start_d, start_h, start_w, end_d, end_h, end_w].
Only used if coordinate_transformation_mode is tf_crop_and_resize.
size: Tuple
Output resolution scale to
layout: string, optional
"NCDHW", "NDHWC", or "NCDHWc".
method: string, optional
method of interpolation ("nearest", "linear", "bicubic")
coordinate_transformation_mode : string, optional
Describes how to transform the coordinate in the resized tensor
to the coordinate in the original tensor.
[half_pixel, align_corners, asymmetric, pytorch_half_pixel,
tf_half_pixel_for_nn, and tf_crop_and_resize].
rounding_method:
Method for rounding coordinate locations
bicubic_alpha: float, optional
Bicubic spline coefficient
bicubic_exclude: bool, optional:
Exclude values outside the image fdor bicubic interpolation
extrapolation_value: float, optional
Value used for extrapolation, when applicable.
out_dtype: string, optional
Type to return. If left None will be same as input type.
output_shape: tvm.tir.container.Array, optional
Shape to return. If left None will be inferred
(If shape is determined dynamically, pass out_dtype.shape as output_shape)
Returns
-------
output : tvm.te.Tensor
4-D with shape [batch, channel, in_depth*scale, in_height*scale, in_width*scale]
or [batch, in_depth*scale, in_height*scale, in_width*scale, channel]
or 5-D with shape
[batch, channel-major, in_depth*scale, in_height*scale, in_width*scale, channel-minor]
"""
method = method.lower()
if layout == "NDHWC":
in_n, in_d, in_h, in_w, in_c = data.shape
output_shape = [in_n, size[0], size[1], size[2], in_c]
elif layout == "NCDHW":
in_n, in_c, in_d, in_h, in_w = data.shape
output_shape = [in_n, in_c, size[0], size[1], size[2]]
# Otherwise layout must be NCHWxc
else:
in_n, in_c, in_d, in_h, in_w, in_cc = data.shape
output_shape = [in_n, in_c, size[0], size[1], size[2], in_cc]
if isinstance(size, tuple):
size = list(size)
for i in range(3):
if isinstance(size[i], int):
size[i] = tvm.tir.IntImm("int32", size[i])
def compute_func(*indices):
return _resize_3d(
indices,
data,
roi,
in_d,
in_h,
in_w,
size[0],
size[1],
size[2],
method=method,
layout=layout,
coordinate_transformation_mode=coordinate_transformation_mode,
rounding_method=rounding_method,
alpha=bicubic_alpha,
exclude_outside=bicubic_exclude,
extrapolation_value=extrapolation_value,
out_dtype=out_dtype,
)
return te.compute(output_shape, compute_func, name="resize", tag=tag.INJECTIVE)
| 42,595 | 31.099472 | 98 | py |
tvm | tvm-main/python/tvm/autotvm/tuner/xgboost_cost_model.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""XGBoost as cost model"""
import logging
import time
from typing import Dict
import numpy as np
from tvm.contrib.popen_pool import PopenPoolExecutor, StatusKind
from .. import feature
from ..utils import get_rank
from .metric import cover_curve, max_curve, recall_curve
from .model_based_tuner import CostModel, FeatureCache
try:
from xgboost.callback import TrainingCallback # type: ignore
except ImportError:
class TrainingCallback: # type: ignore
pass
xgb = None
logger = logging.getLogger("autotvm")
class XGBoostCostModel(CostModel):
"""XGBoost as cost model
Parameters
----------
task: Task
The tuning task
feature_type: str, optional
If is 'itervar', use features extracted from IterVar (loop variable).
If is 'knob', use flatten ConfigEntity directly.
If is 'curve', use sampled curve feature (relation feature).
Note on choosing feature type:
For single task tuning, 'itervar' and 'knob' are good.
'itervar' is more accurate but 'knob' is much faster.
There are some constraints on 'itervar', if you meet
problems with feature extraction when using 'itervar',
you can switch to 'knob'.
For cross-shape tuning (e.g. many convolutions with different shapes),
'itervar' and 'curve' has better transferability,
'knob' is faster.
For cross-device or cross-operator tuning, you can use 'curve' only.
loss_type: str
If is 'reg', use regression loss to train cost model.
The cost model predicts the normalized flops.
If is 'rank', use pairwise rank loss to train cost model.
The cost model predicts relative rank score.
If is 'rank-binary', use pairwise rank loss with binarized labels to train cost model.
The cost model predicts relative rank score.
num_threads: int, optional
The number of threads.
log_interval: int, optional
If is not none, the cost model will print training log every `log_interval` iterations.
upper_model: XGBoostCostModel, optional
The upper model used in transfer learning
"""
def __init__(
self,
task,
feature_type,
loss_type="reg",
num_threads=None,
log_interval=25,
upper_model=None,
):
global xgb
super(XGBoostCostModel, self).__init__()
try:
if xgb is None:
xgb = __import__("xgboost")
except ImportError:
raise ImportError(
"XGBoost is required for XGBoostCostModel. "
"Please install its python package first. "
"Help: (https://xgboost.readthedocs.io/en/latest/) "
)
self.task = task
self.target = task.target
self.space = task.config_space
self.fea_type = feature_type
self.loss_type = loss_type
self.num_threads = num_threads
self.log_interval = log_interval
self.loss_type = loss_type
if loss_type == "reg":
self.xgb_params = {
"max_depth": 3,
"gamma": 0.0001,
"min_child_weight": 1,
"subsample": 1.0,
"eta": 0.3,
"lambda": 1.00,
"alpha": 0,
"objective": "reg:linear",
}
elif loss_type in ("rank", "rank-binary"):
self.xgb_params = {
"max_depth": 3,
"gamma": 0.0001,
"min_child_weight": 1,
"subsample": 1.0,
"eta": 0.3,
"lambda": 1.00,
"alpha": 0,
"objective": "rank:pairwise",
}
else:
raise RuntimeError("Invalid loss type: " + loss_type)
self.xgb_params["verbosity"] = 0
if num_threads:
self.xgb_params["nthread"] = num_threads
self.bst = None
if feature_type == "itervar":
self.feature_extract_func = _extract_itervar_feature_index
elif feature_type == "knob":
self.feature_extract_func = _extract_knob_feature_index
elif feature_type == "curve":
self.feature_extract_func = _extract_curve_feature_index
else:
raise RuntimeError("Invalid feature type " + feature_type)
if upper_model: # share a same feature cache with upper model
self.feature_cache = upper_model.feature_cache
else:
self.feature_cache = FeatureCache()
self.upper_model = upper_model
self.feature_extra_ct = 0
self.pool = None
self.base_model = None
self._sample_size = 0
self._reset_pool(self.space, self.target, self.task)
def _reset_pool(self, space, target, task):
"""reset processing pool for feature extraction"""
if self.upper_model: # base model will reuse upper model's pool,
self.upper_model._reset_pool(space, target, task)
return
self._close_pool()
self.pool = PopenPoolExecutor(
max_workers=self.num_threads,
initializer=_extract_popen_initializer,
initargs=(space, target, task),
)
def _close_pool(self):
if self.pool:
self.pool = None
def _get_pool(self):
if self.upper_model:
return self.upper_model._get_pool()
return self.pool
def _base_model_discount(self):
return 1.0 / (2 ** (self._sample_size / 64.0))
def fit(self, xs, ys, plan_size):
tic = time.time()
self._reset_pool(self.space, self.target, self.task)
x_train = self._get_feature(xs)
y_train = np.array(ys)
y_max = np.max(y_train)
y_train = y_train / max(y_max, 1e-8)
valid_index = y_train > 1e-6
index = np.random.permutation(len(x_train))
dtrain = xgb.DMatrix(x_train[index], y_train[index])
self._sample_size = len(x_train)
if self.base_model:
discount = self._base_model_discount()
if discount < 0.05: # discard base model
self.base_model.upper_model = None
self.base_model = None
else:
dtrain.set_base_margin(discount * self.base_model.predict(xs, output_margin=True))
self.bst = xgb.train(
self.xgb_params,
dtrain,
num_boost_round=8000,
callbacks=[
CustomCallback(
stopping_rounds=20,
metric=f"tr-a-recall@{plan_size}",
evals=[(dtrain, "tr")],
maximize=True,
fevals=[xgb_average_recalln_curve_score(plan_size)],
verbose_eval=self.log_interval,
loss_type=self.loss_type,
)
],
)
logger.debug(
"XGB train: %.2f\tobs: %d\terror: %d\tn_cache: %d",
time.time() - tic,
len(xs),
len(xs) - np.sum(valid_index),
self.feature_cache.size(self.fea_type),
)
def fit_log(self, records, plan_size, min_seed_records=500):
tic = time.time()
# filter data, only pick the data with a same task
data = []
for inp, res in records:
if inp.task.name == self.task.name:
data.append((inp, res))
logger.debug("XGB load %d entries from history log file", len(data))
# extract feature
self._reset_pool(self.space, self.target, self.task)
pool = self._get_pool()
if self.fea_type == "itervar":
feature_extract_func = _extract_itervar_feature_log
elif self.fea_type == "knob":
feature_extract_func = _extract_knob_feature_log
elif self.fea_type == "curve":
feature_extract_func = _extract_curve_feature_log
else:
raise RuntimeError("Invalid feature type: " + self.fea_type)
result = pool.map_with_error_catching(feature_extract_func, data)
result = list(result) # store results so we can iterate through them twice
# get maximum feature length
fea_len = -1
for res in result:
if res.status != StatusKind.COMPLETE:
continue
x, _ = res.value
fea_len = max(fea_len, x.shape[0])
xs, ys = [], []
for res in result:
if res.status != StatusKind.COMPLETE:
continue
x, y = res.value
# Features may not be the same size, pad them until they are
if fea_len > len(x):
xs.append(np.pad(x, (0, fea_len - len(x))))
else:
xs.append(x)
ys.append(y)
if len(xs) < min_seed_records: # no enough samples
return False
xs, ys = np.array(xs), np.array(ys)
x_train = xs
y_train = ys
y_max = np.max(y_train)
y_train = y_train / max(y_max, 1e-8)
index = np.random.permutation(len(x_train))
dtrain = xgb.DMatrix(x_train[index], y_train[index])
plan_size *= 2
self.bst = xgb.train(
self.xgb_params,
dtrain,
num_boost_round=400,
callbacks=[
CustomCallback(
stopping_rounds=100,
metric=f"tr-a-recall@{plan_size}",
evals=[(dtrain, "tr")],
maximize=True,
fevals=[xgb_average_recalln_curve_score(plan_size)],
verbose_eval=self.log_interval,
loss_type=self.loss_type,
)
],
)
logger.debug("XGB train: %.2f\tobs: %d", time.time() - tic, len(xs))
return True
def predict(self, xs, output_margin=False):
feas = self._get_feature(xs)
dtest = xgb.DMatrix(feas)
if self.base_model:
dtest.set_base_margin(
self._base_model_discount() * self.base_model.predict(xs, output_margin=True)
)
return self.bst.predict(dtest, output_margin=output_margin)
def load_basemodel(self, base_model):
self.base_model = base_model
self.base_model._close_pool()
self.base_model.upper_model = self
def spawn_base_model(self):
return XGBoostCostModel(
self.task, self.fea_type, self.loss_type, self.num_threads, self.log_interval, self
)
def _get_feature(self, indexes):
"""get features for indexes, run extraction if we do not have cache for them"""
# free feature cache
if self.feature_cache.size(self.fea_type) >= 100000:
self.feature_cache.clear(self.fea_type)
fea_cache = self.feature_cache.get(self.fea_type)
indexes = np.array(indexes)
need_extract = [x for x in indexes if x not in fea_cache]
if need_extract:
pool = self._get_pool()
feas = pool.map_with_error_catching(self.feature_extract_func, need_extract)
for i, fea in zip(need_extract, feas):
fea_cache[i] = fea.value if fea.status == StatusKind.COMPLETE else None
feature_len = -1
for idx in indexes:
if fea_cache[idx] is not None:
feature_len = max(fea_cache[idx].shape[-1], feature_len)
ret = np.empty((len(indexes), feature_len), dtype=np.float32)
for i, ii in enumerate(indexes):
t = fea_cache[ii]
if t is not None and t.shape[0] < feature_len:
t = np.pad(t, (0, feature_len - t.shape[0]))
ret[i, :] = t if t is not None else 0
return ret
def __del__(self):
self._close_pool()
# Global variables for passing arguments to extract functions.
_extract_space = None
_extract_target = None
_extract_task = None
def _extract_popen_initializer(space, target, task):
global _extract_space, _extract_target, _extract_task
_extract_space = space
_extract_target = target
_extract_task = task
def _extract_itervar_feature_index(args):
"""extract iteration var feature for an index in extract_space"""
config = _extract_space.get(args)
with _extract_target:
sch, fargs = _extract_task.instantiate(config)
fea = feature.get_itervar_feature_flatten(sch, fargs, take_log=True)
fea = np.concatenate((fea, list(config.get_other_option().values())))
return fea
def _extract_itervar_feature_log(arg):
"""extract iteration var feature for log items"""
inp, res = arg
config = inp.config
with inp.target:
sch, args = inp.task.instantiate(config)
fea = feature.get_itervar_feature_flatten(sch, args, take_log=True)
x = np.concatenate((fea, list(config.get_other_option().values())))
if res.error_no == 0:
y = inp.task.flop / np.mean(res.costs)
else:
y = 0.0
return x, y
def _extract_knob_feature_index(args):
"""extract knob feature for an index in extract_space"""
config = _extract_space.get(args)
return config.get_flatten_feature()
def _extract_knob_feature_log(arg):
"""extract knob feature for log items"""
inp, res = arg
config = inp.config
x = config.get_flatten_feature()
if res.error_no == 0:
with inp.target: # necessary, for calculating flops of this task
inp.task.instantiate(config)
y = inp.task.flop / np.mean(res.costs)
else:
y = 0.0
return x, y
def _extract_curve_feature_index(args):
"""extract sampled curve feature for an index in extract_space"""
config = _extract_space.get(args)
with _extract_target:
sch, fargs = _extract_task.instantiate(config)
fea = feature.get_buffer_curve_sample_flatten(sch, fargs, sample_n=20)
fea = np.concatenate((fea, list(config.get_other_option().values())))
return np.array(fea)
def _extract_curve_feature_log(arg):
"""extract sampled curve feature for log items"""
inp, res = arg
config = inp.config
with inp.target:
sch, args = inp.task.instantiate(config)
fea = feature.get_buffer_curve_sample_flatten(sch, args, sample_n=20)
x = np.concatenate((fea, list(config.get_other_option().values())))
if res.error_no == 0:
y = inp.task.flop / np.mean(res.costs)
else:
y = 0.0
return x, y
def _binarize_evals(evals):
"""binarize evaluation labels"""
bin_evals = []
for evalset in evals:
# binarize labels in xgb.dmatrix copy
barray = evalset[0].get_data().copy()
blabel = evalset[0].get_label().copy()
blabel[blabel < 0.5] = 0.0
blabel[blabel >= 0.5] = 1.0
# pylint: disable=R1721
bin_evals.append(tuple([xgb.DMatrix(barray, blabel)] + [e for e in evalset[1:]]))
return bin_evals
class XGBoostCallback(TrainingCallback):
"""Base class for XGBoost callbacks."""
def __call__(self, env: "xgb.core.CallbackEnv"):
# Compatibility with xgboost < 1.3
return self.after_iteration(env.model, env.iteration, env.evaluation_result_list)
def after_iteration(self, model: "xgb.Booster", epoch: int, evals_log: Dict):
raise NotImplementedError
class CustomCallback(XGBoostCallback):
"""
Callback function for xgboost.
Support custom evaluation function and early-stopping.
"""
def __init__(
self,
stopping_rounds,
metric,
fevals,
loss_type="reg",
evals=(),
log_file=None,
maximize=False,
verbose_eval=True,
skip_every=2,
):
"""Init function"""
self.stopping_rounds = stopping_rounds
self.metric = metric
self.metric_shortname = metric.split("-")[1]
self.fevals = fevals
self.evals = evals
self.log_file = log_file
self.maximize = maximize
self.verbose_eval = verbose_eval
self.loss_type = loss_type
self.skip_every = skip_every
self.state = {}
def after_iteration(self, model: "xgb.Booster", epoch: int, evals_log: Dict):
"""Run after each iteration. Return True when training should stop."""
# pylint:disable = import-outside-toplevel
try:
from xgboost.callback import _fmt_metric # type: ignore
except ImportError:
# Compatibility with xgboost >= 1.6
def _fmt_metric(value, show_stdv=True):
"""format metric string"""
if len(value) == 2:
return f"{value[0]}:{value[1]:.5f}"
if len(value) == 3:
if show_stdv:
return f"{value[0]}:{value[1]:.5f}+{value[2]:.5f}"
return f"{value[0]}:{value[1]:.5f}"
raise ValueError("wrong metric value", value)
##### init state #####
if not self.state:
self.state["maximize_score"] = self.maximize
self.state["best_iteration"] = 0
if self.maximize:
self.state["best_score"] = float("-inf")
else:
self.state["best_score"] = float("inf")
assert model is not None
if model.attr("best_score") is not None:
self.state["best_score"] = float(model.attr("best_score"))
self.state["best_iteration"] = int(model.attr("best_iteration"))
self.state["best_msg"] = model.attr("best_msg")
else:
model.set_attr(best_iteration=str(self.state["best_iteration"]))
model.set_attr(best_score=str(self.state["best_score"]))
res_dict = {}
if epoch % self.skip_every == 1:
return False
##### evaluation #####
mod_evals = self.evals
if self.loss_type == "rank-binary":
mod_evals = _binarize_evals(self.evals)
if self.loss_type == "rank" and int(xgb.__version__[0]) >= 2:
# since xgboost pr#8931
raise RuntimeError(
"Use 'rank-binary' instead of 'rank' loss_type with xgboost %s >= 2.0.0"
% xgb.__version__
)
for feval in self.fevals:
bst_eval = model.eval_set(mod_evals, epoch, feval)
res = [x.split(":") for x in bst_eval.split()]
for kv in res[1:]:
res_dict[kv[0]] = [float(kv[1])]
eval_res = []
keys = list(res_dict.keys())
keys.sort(key=lambda x: x if self.metric_shortname not in x else "a" + x)
for key in keys:
v = res_dict[key]
eval_res.append([key] + v)
##### print eval result #####
if (
not isinstance(self.verbose_eval, bool)
and self.verbose_eval
and epoch % self.verbose_eval == 0
):
infos = [f"XGB iter: {epoch:3d}"]
for item in eval_res:
if "null" in item[0]:
continue
infos.append(f"{item[0]}: {item[1]:.6f}")
logger.debug("\t".join(infos))
if self.log_file:
with open(self.log_file, "a") as fout:
fout.write("\t".join(infos) + "\n")
##### choose score and do early stopping #####
score = None
for item in eval_res:
if item[0] == self.metric:
score = item[1]
break
assert score is not None
best_score = self.state["best_score"]
best_iteration = self.state["best_iteration"]
maximize_score = self.state["maximize_score"]
if (maximize_score and score > best_score) or (not maximize_score and score < best_score):
msg = f"[{epoch}] " + "\t".join([_fmt_metric(x) for x in eval_res])
self.state["best_msg"] = msg
self.state["best_score"] = score
self.state["best_iteration"] = epoch
# save the property to attributes, so they will occur in checkpoint.
if model is not None:
model.set_attr(
best_score=str(self.state["best_score"]),
best_iteration=str(self.state["best_iteration"]),
best_msg=self.state["best_msg"],
)
elif epoch - best_iteration >= self.stopping_rounds:
best_msg = self.state["best_msg"]
if self.verbose_eval:
logger.debug("XGB stopped. Best iteration: %s ", best_msg)
return True
return False
# feval wrapper for xgboost
def xgb_max_curve_score(N):
"""evaluate max curve score for xgb"""
def feval(preds, labels):
labels = labels.get_label()
trials = np.argsort(preds)[::-1]
scores = labels[trials]
curve = max_curve(scores)
return f"Smax@{N}", curve[N] / np.max(labels)
return feval
def xgb_recalln_curve_score(N):
"""evaluate recall-n curve score for xgb"""
def feval(preds, labels):
labels = labels.get_label()
trials = np.argsort(preds)[::-1]
ranks = get_rank(labels[trials])
curve = recall_curve(ranks)
return f"recall@{N}", curve[N]
return feval
def xgb_average_recalln_curve_score(N):
"""evaluate average recall-n curve score for xgb"""
def feval(preds, labels):
labels = labels.get_label()
trials = np.argsort(preds)[::-1]
ranks = get_rank(labels[trials])
curve = recall_curve(ranks)
return f"a-recall@{N}", np.sum(curve[:N]) / N
return feval
def xgb_recallk_curve_score(N, topk):
"""evaluate recall-k curve score for xgb"""
def feval(preds, labels):
labels = labels.get_label()
trials = np.argsort(preds)[::-1]
ranks = get_rank(labels[trials])
curve = recall_curve(ranks, topk)
return f"recall@{topk}", curve[N]
return feval
def xgb_cover_curve_score(N):
"""evaluate cover curve score for xgb"""
def feval(preds, labels):
labels = labels.get_label()
trials = np.argsort(preds)[::-1]
ranks = get_rank(labels[trials])
curve = cover_curve(ranks)
return f"cover@{N}", curve[N]
return feval
def xgb_null_score(_):
"""empty score function for xgb"""
def feval(__, ___):
return "null", 0
return feval
| 23,505 | 32.247525 | 98 | py |
tvm | tvm-main/python/tvm/autotvm/tuner/xgboost_tuner.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tuner that uses xgboost as cost model"""
from .model_based_tuner import ModelBasedTuner, ModelOptimizer
from .xgboost_cost_model import XGBoostCostModel
from .sa_model_optimizer import SimulatedAnnealingOptimizer
class XGBTuner(ModelBasedTuner):
"""Tuner that uses xgboost as cost model
Parameters
----------
task: Task
The tuning task
plan_size: int
The size of a plan. After `plan_size` trials, the tuner will refit a new cost model
and do planing for the next `plan_size` trials.
feature_type: str, optional
If is 'itervar', use features extracted from IterVar (loop variable).
If is 'knob', use flatten ConfigEntity directly.
If is 'curve', use sampled curve feature (relation feature).
Note on choosing feature type:
For single task tuning, 'itervar' and 'knob' are good.
'itervar' is more accurate but 'knob' is much faster.
There are some constraints on 'itervar', if you meet
problems with feature extraction when using 'itervar',
you can switch to 'knob'.
For cross-shape tuning (e.g. many convolutions with different shapes),
'itervar' and 'curve' has better transferability,
'knob' is faster.
For cross-device or cross-operator tuning, you can use 'curve' only.
loss_type: str
If is 'reg', use regression loss to train cost model.
The cost model predicts the normalized flops.
If is 'rank', use pairwise rank loss to train cost model.
The cost model predicts relative rank score.
If is 'rank-binary', use pairwise rank loss with binarized labels to train cost model.
The cost model predicts relative rank score.
num_threads: int, optional
The number of threads.
optimizer: str or ModelOptimizer, optional
If is 'sa', use a default simulated annealing optimizer.
Otherwise it should be a ModelOptimizer object.
diversity_filter_ratio: int or float, optional
If is not None, the tuner will first select
top-(plan_size * diversity_filter_ratio) candidates according to the cost model
and then pick batch_size of them according to the diversity metric.
log_interval: int = 50
The verbose level.
If is 0, output nothing.
Otherwise, output debug information every `verbose` iterations.
"""
def __init__(
self,
task,
plan_size=64,
feature_type="itervar",
loss_type="reg",
num_threads=None,
optimizer="sa",
diversity_filter_ratio=None,
log_interval=50,
):
cost_model = XGBoostCostModel(
task,
feature_type=feature_type,
loss_type=loss_type,
num_threads=num_threads,
log_interval=log_interval // 2,
)
if optimizer == "sa":
optimizer = SimulatedAnnealingOptimizer(task, log_interval=log_interval)
else:
assert isinstance(optimizer, ModelOptimizer), (
"Optimizer must be " "a supported name string" "or a ModelOptimizer object."
)
super(XGBTuner, self).__init__(
task, cost_model, optimizer, plan_size, diversity_filter_ratio
)
def tune(self, *args, **kwargs): # pylint: disable=arguments-differ
super(XGBTuner, self).tune(*args, **kwargs)
# manually close pool to avoid multiprocessing issues
self.cost_model._close_pool()
| 4,304 | 37.783784 | 94 | py |
tvm | tvm-main/python/tvm/autotvm/tuner/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
A tuner takes a task as input. It proposes some promising :any:`ConfigEntity`
in the :any:`ConfigSpace` and measure them on the real hardware. Then it
proposed the next batch of :any:`ConfigEntity` according to the measure results.
This tuning loop is repeated.
"""
from . import callback
from .tuner import Tuner
from .index_based_tuner import GridSearchTuner, RandomTuner
from .ga_tuner import GATuner
from .xgboost_tuner import XGBTuner
from .droplet_turner import DropletTuner
| 1,273 | 38.8125 | 80 | py |
tvm | tvm-main/tests/python/conftest.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Configure pytest"""
import sys
import pytest
COLLECT_IGNORE = []
if sys.platform.startswith("win"):
COLLECT_IGNORE.append("frontend/caffe")
COLLECT_IGNORE.append("frontend/caffe2")
COLLECT_IGNORE.append("frontend/coreml")
COLLECT_IGNORE.append("frontend/darknet")
COLLECT_IGNORE.append("frontend/keras")
COLLECT_IGNORE.append("frontend/mxnet")
COLLECT_IGNORE.append("frontend/pytorch")
COLLECT_IGNORE.append("frontend/tensorflow")
COLLECT_IGNORE.append("frontend/tflite")
COLLECT_IGNORE.append("frontend/onnx")
COLLECT_IGNORE.append("driver/tvmc/test_autoscheduler.py")
COLLECT_IGNORE.append("unittest/test_auto_scheduler_cost_model.py") # stack overflow
# COLLECT_IGNORE.append("unittest/test_auto_scheduler_measure.py") # exception ignored
COLLECT_IGNORE.append("unittest/test_auto_scheduler_search_policy.py") # stack overflow
# COLLECT_IGNORE.append("unittest/test_auto_scheduler_measure.py") # exception ignored
COLLECT_IGNORE.append("unittest/test_tir_intrin.py")
def pytest_addoption(parser):
parser.addoption(
"--enable-corstone300-tests",
action="store_true",
default=False,
help="Run Corstone-300 FVP tests",
)
def pytest_collection_modifyitems(config, items):
if not config.getoption("--enable-corstone300-tests"):
for item in items:
if "corstone300" in item.keywords:
item.add_marker(
pytest.mark.skip(
reason="Need --enable-corstone300-tests option to run this test"
)
)
| 2,404 | 39.083333 | 92 | py |
tvm | tvm-main/tests/python/unittest/test_target_texture_codegen_opencl.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# 'License'); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import sys
import numpy as np
import pytest
import tvm
import tvm.testing
from tvm import autotvm
from tvm import te
from tvm.topi import testing
from tvm.topi.utils import get_const_tuple, simplify
from tvm.topi import nn
def compute_plus_one_rank3(shape):
X = te.placeholder(shape, name="X", dtype="float32")
Y = te.compute(shape, lambda i, j, k: X[i, j, k] + 1, name="Compute_Y")
return X, Y
def schedule_plus_one_rank3(X, Y):
s = te.create_schedule(Y.op)
# Xt = s.cache_read(X, "texture", [Y])
# Xt = s.cache_read(X, "global", [Y])
Xt = s.cache_read(X, "global.texture", [Y])
# copy to texture stage
x, y, c = s[Xt].op.axis
s[Xt].bind(x, te.thread_axis("blockIdx.x"))
s[Xt].bind(y, te.thread_axis("threadIdx.x"))
s[Xt].vectorize(c)
# the compute stage
x, y, c = s[Y].op.axis
xo, yo, xi, yi = s[Y].tile(x, y, 4, 4)
s[Y].bind(xo, te.thread_axis("blockIdx.x"))
s[Y].bind(yo, te.thread_axis("threadIdx.x"))
s[Y].vectorize(c)
return s
def compute_plus_one_rank5(shape):
X = te.placeholder(shape, name="X", dtype="float32")
Y = te.compute(shape, lambda i, j, k, l, m: X[i, j, k, l, m] + 1, name="Compute_Y")
return X, Y
def schedule_plus_one_rank5(X, Y):
s = te.create_schedule(Y.op)
Xt = s.cache_read(X, "global.texture", [Y])
# copy to texture stage
a, b, c, d, e = s[Xt].op.axis
abc = s[Xt].fuse(a, b, c)
s[Xt].bind(abc, te.thread_axis("blockIdx.x"))
s[Xt].bind(d, te.thread_axis("threadIdx.x"))
s[Xt].vectorize(e)
# the compute stage
a, b, c, d, e = s[Y].op.axis
abc = s[Y].fuse(a, b, c)
xo, yo, xi, yi = s[Y].tile(abc, d, 4, 4)
s[Y].bind(xo, te.thread_axis("blockIdx.x"))
s[Y].bind(yo, te.thread_axis("threadIdx.x"))
s[Y].vectorize(e)
return s
def compute_matmul(shape):
A = te.placeholder(shape, name="A", dtype="float32")
B = te.placeholder(shape, name="B", dtype="float32")
k = te.reduce_axis((0, shape[1]), name="k")
C = te.compute(
(shape[0] * shape[2], shape[0] * shape[2]),
lambda i, j: te.sum(
A[i // shape[2], k, i % shape[2]].astype("float32")
* B[j // shape[2], k, j % shape[2]].astype("float32"),
axis=[k],
),
name="Compute_MatMul",
)
return A, B, C
def schedule_matmul(A, B, C, local=False):
s = te.create_schedule(C.op)
At = s.cache_read(A, "global.texture", [C])
Bt = s.cache_read(B, "global.texture", [C])
if local:
Al = s.cache_read(At, "local", [C])
Bl = s.cache_read(Bt, "local", [C])
Cl = s.cache_write(C, "local")
bx = te.thread_axis("blockIdx.x")
tx = te.thread_axis("threadIdx.x")
def copy_to_texture(stage):
_io, _k, _ii = s[stage].op.axis
s[stage].vectorize(_ii)
s[stage].bind(_io, bx)
s[stage].bind(_k, tx)
copy_to_texture(At)
copy_to_texture(Bt)
# copy to global stage
_i, _j = s[C].op.axis
xo, yo, xi, yi = s[C].tile(_i, _j, 4, 4)
s[C].unroll(xi)
s[C].vectorize(yi)
s[C].bind(xo, te.thread_axis("blockIdx.x"))
s[C].bind(yo, te.thread_axis("threadIdx.x"))
# the compute stage
s[Cl].compute_at(s[C], yo)
(_k,) = Cl.op.reduce_axis
_x, _y = s[Cl].op.axis
s[Cl].reorder(_k, _x, _y)
s[Cl].unroll(_x)
s[Cl].vectorize(_y)
if local:
s[Al].compute_at(s[Cl], _k)
s[Al].vectorize(s[Al].op.axis[-1])
s[Bl].compute_at(s[Cl], _k)
s[Bl].vectorize(s[Bl].op.axis[-1])
return s
def compute_matmul_inner(shape):
A = te.placeholder(shape, name="A", dtype="float32")
B = te.placeholder(shape, name="B", dtype="float32")
k = te.reduce_axis((0, shape[1] * shape[2]), name="k")
# (M, K) x (N, K)
# (32, 256) x (32, 256)
# (32, 64, 4) x (32, 64, 4)
C = te.compute(
(shape[0], shape[0]),
lambda i, j: te.sum(
A[i, k // shape[2], k % shape[2]].astype("float32")
* B[j, k // shape[2], k % shape[2]].astype("float32"),
axis=[k],
),
name="Compute_MatMul",
)
return A, B, C
def schedule_matmul_inner(A, B, C, local=False):
s = te.create_schedule(C.op)
At = s.cache_read(A, "global.texture", [C])
Bt = s.cache_read(B, "global.texture", [C])
if local:
Al = s.cache_read(At, "local", [C])
Bl = s.cache_read(Bt, "local", [C])
Cl = s.cache_write(C, "local")
bx = te.thread_axis("blockIdx.x")
tx = te.thread_axis("threadIdx.x")
def copy_to_texture(stage):
_i, _ko, _ki = s[stage].op.axis
s[stage].vectorize(_ki)
s[stage].bind(_i, bx)
s[stage].bind(_ko, tx)
copy_to_texture(At)
copy_to_texture(Bt)
# copy to global stage
_i, _j = s[C].op.axis
xo, yo, xi, yi = s[C].tile(_i, _j, 4, 4)
s[C].unroll(xi)
s[C].vectorize(yi)
s[C].bind(xo, te.thread_axis("blockIdx.x"))
s[C].bind(yo, te.thread_axis("threadIdx.x"))
# the compute stage
s[Cl].compute_at(s[C], yo)
(_k,) = Cl.op.reduce_axis
_x, _y = s[Cl].op.axis
s[Cl].reorder(_x, _y, _k)
s[Cl].unroll(_x)
# TODO(csullivan): consider whether the below error is worth resolving
# s[Cl].vectorize(_y) # error
if local:
s[Al].compute_at(s[Cl], _x)
s[Al].vectorize(s[Al].op.axis[-1])
s[Bl].compute_at(s[Cl], _x)
s[Bl].vectorize(s[Bl].op.axis[-1])
return s
def compute_matmul_vector_accumulator(shapeA, shapeB):
# A x B
# (K/4, M, K%4) x (K, N/4, N%4) = (M, N)
# (32, 64, 4) x (128, 16, 4) = (64, 64)
A = te.placeholder(shapeA, name="A", dtype="float32")
B = te.placeholder(shapeB, name="B", dtype="float32")
k = te.reduce_axis((0, shapeB[0]), name="k")
C = te.compute(
(shapeA[1], shapeB[1] * shapeB[2]),
lambda i, j: te.sum(
A[k // shapeA[-1], i, k % shapeA[-1]].astype("float32")
* B[k, j // shapeB[-1], j % shapeB[-1]].astype("float32"),
axis=[k],
),
name="Compute_MatMul",
)
return A, B, C
def schedule_matmul_vector_accumulator(A, B, C, local=False):
s = te.create_schedule(C.op)
At = s.cache_read(A, "global.texture", [C])
Bt = s.cache_read(B, "global.texture", [C])
if local:
Al = s.cache_read(At, "local", [C])
Bl = s.cache_read(Bt, "local", [C])
Cl = s.cache_write(C, "local")
def copy_to_texture(stage):
_y, _x, _v = s[stage].op.axis
# TODO(csullivan): removing this vectorize results in numerical errors, autovectorize
s[stage].vectorize(_v)
s[stage].bind(_y, te.thread_axis("blockIdx.x"))
s[stage].bind(_x, te.thread_axis("threadIdx.x"))
copy_to_texture(At)
copy_to_texture(Bt)
# copy to global stage
_i, _j = s[C].op.axis
xo, yo, xi, yi = s[C].tile(_i, _j, 4, 4)
s[C].unroll(xi)
s[C].vectorize(yi)
s[C].bind(xo, te.thread_axis("blockIdx.x"))
s[C].bind(yo, te.thread_axis("threadIdx.x"))
# the compute stage
s[Cl].compute_at(s[C], yo)
(_k,) = Cl.op.reduce_axis
_a, _b = s[Cl].op.axis
_ko, _ki = s[Cl].split(_k, factor=4)
s[Cl].reorder(_ko, _a, _ki, _b)
s[Cl].unroll(_ki)
s[Cl].unroll(_a)
s[Cl].vectorize(_b)
if local:
s[Al].compute_at(s[Cl], _a)
_aa, _ka, _ba = s[Al].op.axis
# TODO(csullivan)[BEFORE PR]: removing this vectorize command causes a crash. This needs to be autovectorized.
s[Al].vectorize(_ba)
s[Bl].compute_at(s[Cl], _ko)
_ab, _kb, _bb = s[Bl].op.axis
s[Bl].vectorize(_bb)
s[Bl].unroll(_ab)
return s
def compute_conv2d_1x1_NCHWc_RSCKk(input_shape, filter_shape):
# conv2d( [N, C, H, W, c] , [1, 1, C, K, k]
data = te.placeholder(input_shape, name="data", dtype="float32")
filt = te.placeholder(filter_shape, name="filter", dtype="float32")
c = te.reduce_axis((0, input_shape[1]), name="C")
c4 = te.reduce_axis((0, input_shape[-1]), name="c4")
kh = te.reduce_axis((0, filter_shape[0]), name="kh")
kw = te.reduce_axis((0, filter_shape[1]), name="kw")
conv = te.compute(
(input_shape[0], filter_shape[-2], input_shape[2], input_shape[3], filter_shape[-1]),
lambda n, ko, i, j, ki: te.sum(
data[n, c, i, j, c4].astype("float32")
* filt[kh, kw, c * input_shape[-1] + c4, ko, ki].astype("float32"),
axis=[kh, kw, c, c4],
),
# name="Compute_conv2d_1x1_NCHWc_RSCKk",
name="conv2d_1x1",
)
return data, filt, conv
def schedule_conv2d_1x1_NCHWc_RSCKk(data, filt, conv):
# inputs: (1, 128//4, 56, 56, 4), (1, 1, 128, 128//4, 4)
# outputs:
s = te.create_schedule(conv.op)
A, B, C = data, filt, conv
At = s.cache_read(A, "global.texture", [C])
Bt = s.cache_read(B, "global.texture", [C])
Al = s.cache_read(At, "local", [C])
Bl = s.cache_read(Bt, "local", [C])
Cl = s.cache_write(C, "local")
def copy_to_texture(stage):
axes = s[stage].op.axis
fused = s[stage].fuse(*axes[:-1])
block, thread = s[stage].split(fused, factor=32)
s[stage].vectorize(axes[-1])
s[stage].bind(block, te.thread_axis("blockIdx.x"))
s[stage].bind(thread, te.thread_axis("threadIdx.x"))
copy_to_texture(At)
copy_to_texture(Bt)
_n, _ko, _h, _w, _ki = s[C].op.axis
s[C].vectorize(_ki)
s[C].bind(_n, te.thread_axis("blockIdx.x"))
s[C].bind(_ko, te.thread_axis("threadIdx.x"))
s[Cl].compute_at(s[C], _w)
_nl, _kol, _hl, _wl, _kil = s[Cl].op.axis
_khl, _kwl, _cl, _cl4 = s[Cl].op.reduce_axis
_clo, _cli = s[Cl].split(_cl, factor=4)
s[Cl].reorder(_clo, _cli, _cl4, _kil)
s[Cl].unroll(_cli)
s[Cl].unroll(_cl4)
s[Cl].vectorize(_kil)
s[Al].compute_at(s[Cl], _cli)
s[Al].vectorize(s[Al].op.axis[-1])
s[Bl].compute_at(s[Cl], _kwl)
s[Bl].vectorize(s[Bl].op.axis[-1])
return s
def compute_conv2d_1x1_WCHNc_CRSKk(input_shape, filter_shape):
# input_shape = [W, C, H, N, c] -> [W, C, H*N, c]
# filter_shape = [C, R, S, K, k] -> [C, R*S*K, k]
# output_shape: [WK, HN, k] -> [W, K, H, N, k]
data = te.placeholder(input_shape, name="data", dtype="float32")
filt = te.placeholder(filter_shape, name="filter", dtype="float32")
packed_data = te.compute(
(input_shape[0], input_shape[1], input_shape[2] * input_shape[3], input_shape[4]),
lambda i, j, k, l: data[i, j, k // input_shape[3], k % input_shape[3], l],
name="packed_data",
)
# Logical transformation of Nd -> 3d tensor
# CRSKk -> C|RSK|k
# r = rsk // SK
# sk = rsk % SK
# s = sk // K == (rsk % SK) // K == (rsk // K) % S
# k = sk % K == (rsk % SK) % K == rsk % K
packed_filter = te.compute(
(filter_shape[0], filter_shape[1] * filter_shape[2] * filter_shape[3], filter_shape[4]),
lambda i, j, k: filt[
i,
j // (filter_shape[3] * filter_shape[2]),
(j // filter_shape[3]) % filter_shape[2],
j % filter_shape[3],
k,
],
name="packed_filter",
)
c = te.reduce_axis((0, input_shape[1]), name="C")
c4 = te.reduce_axis((0, input_shape[-1]), name="c4")
r = te.reduce_axis((0, filter_shape[1]), name="r")
s = te.reduce_axis((0, filter_shape[2]), name="s")
conv = te.compute(
(input_shape[0], filter_shape[3], input_shape[2], input_shape[3], filter_shape[4]),
lambda w, ko, h, n, ki: te.sum(
packed_data[w, c, h * input_shape[3] + n, c4].astype("float32")
* packed_filter[
c * input_shape[-1] + c4, ((r * filter_shape[2]) + s) * filter_shape[3] + ko, ki
].astype("float32"),
axis=[r, s, c, c4],
),
name="conv2d_1x1",
)
return data, filt, packed_data, packed_filter, conv
def schedule_conv2d_1x1_WCHNc_CRSKk(data, filt, packed_data, packed_filter, conv):
# data: [W, C, H*N, c]
# filter: [C, R*S*K, k]
# output: [W, K, H, N, k]
# conv2d( [N, C, H, W, c] , [1, 1, C, K, k]
# inputs: (1, 128//4, 56, 56, 4), (1, 1, 128, 128//4, 4)
# data: (56, 128//4, 56*1, 4) = (56, 32, 56, 4)
# filt: (128, 1*1*128//4, 4) = (128, 32, 4)
# conv: (56, 32, 56, 1, 4)
s = te.create_schedule(conv.op)
cfg = autotvm.get_config()
s[packed_data].compute_inline()
s[packed_filter].compute_inline()
A, B, C = packed_data, packed_filter, conv
At = s.cache_read(A, "global.texture", [C])
Bt = s.cache_read(B, "global.texture", [C])
Al = s.cache_read(At, "local", [C])
Bl = s.cache_read(Bt, "local", [C])
Cl = s.cache_write(C, "local")
def copy_to_texture(stage):
axes = s[stage].op.axis
fused = s[stage].fuse(*axes[:-1])
block, thread = s[stage].split(fused, factor=32)
s[stage].vectorize(axes[-1])
s[stage].bind(block, te.thread_axis("blockIdx.x"))
s[stage].bind(thread, te.thread_axis("threadIdx.x"))
copy_to_texture(At)
copy_to_texture(Bt)
_w, _ko, _h, _n, _ki = s[C].op.axis
kernel_scope, _n = s[C].split(_n, nparts=1)
cfg.define_split("tile_f", _ko, num_outputs=4)
cfg.define_split("tile_w", _w, num_outputs=4)
cfg.define_split("tile_h", _h, num_outputs=4)
cfg.define_knob("auto_unroll_max_step", [0, 512, 1500])
bk, vk, tk, ki = cfg["tile_f"].apply(s, C, _ko)
bw, vw, tw, wi = cfg["tile_w"].apply(s, C, _w)
bh, vh, th, hi = cfg["tile_h"].apply(s, C, _h)
s[C].reorder(bh, _n, vh, th, hi)
bhn = s[C].fuse(bh, _n)
s[C].bind(bk, te.thread_axis("blockIdx.z"))
s[C].bind(bhn, te.thread_axis("blockIdx.y"))
s[C].bind(bw, te.thread_axis("blockIdx.x"))
s[C].bind(vk, te.thread_axis("vthread"))
s[C].bind(vh, te.thread_axis("vthread"))
s[C].bind(vw, te.thread_axis("vthread"))
s[C].bind(tk, te.thread_axis("threadIdx.z"))
s[C].bind(th, te.thread_axis("threadIdx.y"))
s[C].bind(tw, te.thread_axis("threadIdx.x"))
s[C].reorder(bw, bk, bhn, vw, vk, vh, tw, tk, th, ki, hi, wi, _ki)
s[C].vectorize(_ki)
# TODO(csullivan): Try uneven workgroup split
# _wo, _wi = s[C].split(_w, factor=4)
# #_hno, _hni = s[C].split(_hn, factor=8)
# #s[C].reorder(_wo, _wi, _ko, _hno, _hni, _ki)
# s[C].reorder(_wo, _ko, _hn, _ki, _wi)
# s[C].unroll(_wi)
# # mace:
# # const int out_ch_blk = get_global_id(0);
# # const int out_w_blk = get_global_id(1);
# # const int out_hb = get_global_id(2);
# bx = te.thread_axis("blockIdx.x")
# by = te.thread_axis("blockIdx.y")
# bz = te.thread_axis("blockIdx.z")
# s[C].bind(_ko, bx)
# s[C].bind(_wo, by)
# s[C].bind(_hn, bz)
# s[Cl].compute_at(s[C], _hn)
s[Cl].compute_at(s[C], th)
_wl, _kol, _hl, _nl, _kil = s[Cl].op.axis
_khl, _kwl, _cl, _cl4 = s[Cl].op.reduce_axis
cfg.define_split("tile_c", _cl, num_outputs=2)
cfg.define_split("tile_kh", _khl, num_outputs=2)
cfg.define_split("tile_kw", _kwl, num_outputs=2)
_clo, _cli = cfg["tile_c"].apply(s, Cl, _cl)
_khlo, _khli = cfg["tile_kh"].apply(s, Cl, _khl)
_kwlo, _kwli = cfg["tile_kw"].apply(s, Cl, _kwl)
# s[OL].reorder(rco, ryo, rxo, rci, ryi, rxi, n, f, y, x)
s[Cl].reorder(_clo, _khlo, _kwlo, _cli, _cl4, _khli, _kwli, _kol, _hl, _nl, _kil, _wl)
# s[Cl].reorder(_clo, _khlo, _kwlo, _cli, _cl4, _khli, _kwli)
# s[Cl].reorder(_cl, _cl4, _kil, _wl)
s[Cl].unroll(_cl4)
s[Cl].unroll(_wl)
s[Cl].vectorize(_kil)
_wla, _cla, _hnla, _cl4a = s[Al].op.axis
s[Al].compute_at(s[Cl], _cli)
s[Al].vectorize(_cl4a)
s[Al].unroll(_wla)
_clb, _rskolb, _kilb = s[Bl].op.axis
s[Bl].compute_at(s[Cl], _cli)
s[Bl].vectorize(_kilb)
s[Bl].unroll(_clb)
s[C].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
WO, K, HO, N, K4 = get_const_tuple(C.shape)
RSC, _, _ = get_const_tuple(B.shape)
cfg.add_flop(2 * N * K * K4 * HO * WO * RSC)
return s
def compute_conv2d_NCHWc_KCRSk(Input, Filter, stride, padding, dilation, out_dtype=None):
"""Convolution operator in NCHWc layout."""
if out_dtype is None:
out_dtype = Input.dtype
assert isinstance(stride, int) or len(stride) == 2
assert isinstance(dilation, int) or len(dilation) == 2
if isinstance(stride, int):
stride_h = stride_w = stride
else:
stride_h, stride_w = stride
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
batch, in_channel_chunk, in_height, in_width, in_channel_block = Input.shape
num_filter_chunk, channel, kernel_h, kernel_w, num_filter_block = Filter.shape
# compute the output shape
dilated_kernel_h = (kernel_h - 1) * dilation_h + 1
dilated_kernel_w = (kernel_w - 1) * dilation_w + 1
pad_top, pad_left, pad_down, pad_right = nn.get_pad_tuple(
padding, (dilated_kernel_h, dilated_kernel_w)
)
out_height = simplify((in_height - dilated_kernel_h + pad_top + pad_down) // stride_h + 1)
out_width = simplify((in_width - dilated_kernel_w + pad_left + pad_right) // stride_w + 1)
# compute graph
pad_before = [0, 0, pad_top, pad_left, 0]
pad_after = [0, 0, pad_down, pad_right, 0]
temp = nn.pad(Input, pad_before, pad_after, name="pad_temp")
rcc = te.reduce_axis((0, in_channel_chunk), name="rc")
rcb = te.reduce_axis((0, in_channel_block), name="rc")
ry = te.reduce_axis((0, kernel_h), name="ry")
rx = te.reduce_axis((0, kernel_w), name="rx")
# NCHWc x KCRSk
# texture: NCH|W|c
# texture: K|CRS|k
# c = crs//RS
# rs = crs % RS
# r = rs // W == (crs // S) % R
# s = rs % W == crs % S
Filter = te.compute(
(num_filter_chunk, channel * kernel_h * kernel_w, num_filter_block),
lambda ffc, crs, ffb: Filter[
ffc, crs // (kernel_h * kernel_w), (crs // kernel_w) % kernel_h, crs % kernel_w, ffb
],
name="packed_filter",
)
return te.compute(
(batch, num_filter_chunk, out_height, out_width, num_filter_block),
lambda nn, ffc, yy, xx, ffb: te.sum(
temp[
nn, rcc, yy * stride_h + ry * dilation_h, xx * stride_w + rx * dilation_w, rcb
].astype(out_dtype)
* Filter[
ffc, ((rcc * in_channel_block + rcb) * kernel_h + ry) * kernel_w + rx, ffb
].astype(out_dtype),
axis=[rcc, rcb, ry, rx],
),
tag="conv2d_nchwc_kcrsk_texture",
)
def schedule_conv2d_NCHWc_KCRSk(cfg, s, conv):
"""schedule optimized for batch size = 1"""
##### space definition begin #####
n, fc, y, x, fb = s[conv].op.axis
rcc, rcb, ry, rx = s[conv].op.reduce_axis
cfg.define_split("tile_fc", fc, num_outputs=4)
cfg.define_split("tile_y", y, num_outputs=4)
cfg.define_split("tile_x", x, num_outputs=4)
cfg.define_split("tile_rcc", rcc, num_outputs=2)
cfg.define_split("tile_ry", ry, num_outputs=2)
cfg.define_split("tile_rx", rx, num_outputs=2)
cfg.define_knob("auto_unroll_max_step", [0, 512, 1500])
pad_data, flattened_kernel = s[conv].op.input_tensors
kernel = s[flattened_kernel].op.input_tensors[0]
s[flattened_kernel].compute_inline()
s[pad_data].compute_inline()
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
kernel = flattened_kernel
if conv.op in s.outputs:
output = conv
OL = s.cache_write(conv, "local")
else:
output = s.outputs[0].output(0)
s[conv].set_scope("local")
OL = conv
# create cache stage
AT = s.cache_read(pad_data, "global.texture", [OL])
WT = s.cache_read(kernel, "global.texture", [OL])
def copy_to_texture(stage):
axes = s[stage].op.axis
fused = s[stage].fuse(*axes[:-1])
block, thread = s[stage].split(fused, factor=32)
s[stage].vectorize(axes[-1])
s[stage].bind(block, te.thread_axis("blockIdx.x"))
s[stage].bind(thread, te.thread_axis("threadIdx.x"))
copy_to_texture(AT)
copy_to_texture(WT)
AA = s.cache_read(AT, "shared", [OL])
WW = s.cache_read(WT, "shared", [OL])
# tile and bind spatial axes
n, fc, y, x, fb = s[output].op.axis
kernel_scope, n = s[output].split(n, nparts=1)
bf, vf, tf, fi = cfg["tile_fc"].apply(s, output, fc)
by, vy, ty, yi = cfg["tile_y"].apply(s, output, y)
bx, vx, tx, xi = cfg["tile_x"].apply(s, output, x)
bf = s[output].fuse(n, bf)
s[output].bind(bf, te.thread_axis("blockIdx.z"))
s[output].bind(by, te.thread_axis("blockIdx.y"))
s[output].bind(bx, te.thread_axis("blockIdx.x"))
s[output].bind(vf, te.thread_axis("vthread"))
s[output].bind(vy, te.thread_axis("vthread"))
s[output].bind(vx, te.thread_axis("vthread"))
s[output].bind(tf, te.thread_axis("threadIdx.z"))
s[output].bind(ty, te.thread_axis("threadIdx.y"))
s[output].bind(tx, te.thread_axis("threadIdx.x"))
s[output].reorder(bf, by, bx, vf, vy, vx, tf, ty, tx, fi, yi, xi, fb)
s[output].vectorize(fb)
s[OL].compute_at(s[output], tx)
# tile reduction axes
n, fc, y, x, fb = s[OL].op.axis
rcc, rcb, ry, rx = s[OL].op.reduce_axis
rco, rci = cfg["tile_rcc"].apply(s, OL, rcc)
ryo, ryi = cfg["tile_ry"].apply(s, OL, ry)
rxo, rxi = cfg["tile_rx"].apply(s, OL, rx)
# TODO(csullivan): check position of rcb
s[OL].reorder(rco, ryo, rxo, rci, ryi, rxi, rcb, n, fc, y, x, fb)
s[OL].vectorize(fb)
s[OL].unroll(rcb)
s[AA].compute_at(s[OL], rxo)
s[WW].compute_at(s[OL], rxo)
# cooperative fetching
for load in [AA, WW]:
if load == WW:
n, fyx, v = s[load].op.axis
fused = s[load].fuse(n, fyx)
else:
n, f, y, x, v = s[load].op.axis
fused = s[load].fuse(n, f, y, x)
tz, fused = s[load].split(fused, nparts=cfg["tile_fc"].size[2])
ty, fused = s[load].split(fused, nparts=cfg["tile_y"].size[2])
tx, fused = s[load].split(fused, nparts=cfg["tile_x"].size[2])
s[load].bind(tz, te.thread_axis("threadIdx.z"))
s[load].bind(ty, te.thread_axis("threadIdx.y"))
s[load].bind(tx, te.thread_axis("threadIdx.x"))
s[load].vectorize(v)
# unroll
s[output].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
N, OCC, OH, OW, OCB = get_const_tuple(output.shape)
_, ICKHKW, _ = get_const_tuple(kernel.shape)
if isinstance(N, int):
cfg.add_flop(2 * N * OH * OW * OCC * OCB * ICKHKW)
def compute_conv2d_NCHWc_KCRSk_acc32(Input, Filter, stride, padding, dilation, out_dtype=None):
"""Convolution operator in NCHWc layout."""
if out_dtype is None:
out_dtype = Input.dtype
assert isinstance(stride, int) or len(stride) == 2
assert isinstance(dilation, int) or len(dilation) == 2
if isinstance(stride, int):
stride_h = stride_w = stride
else:
stride_h, stride_w = stride
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
batch, in_channel_chunk, in_height, in_width, in_channel_block = Input.shape
num_filter_chunk, channel, kernel_h, kernel_w, num_filter_block = Filter.shape
# compute the output shape
dilated_kernel_h = (kernel_h - 1) * dilation_h + 1
dilated_kernel_w = (kernel_w - 1) * dilation_w + 1
pad_top, pad_left, pad_down, pad_right = nn.get_pad_tuple(
padding, (dilated_kernel_h, dilated_kernel_w)
)
out_height = simplify((in_height - dilated_kernel_h + pad_top + pad_down) // stride_h + 1)
out_width = simplify((in_width - dilated_kernel_w + pad_left + pad_right) // stride_w + 1)
# compute graph
pad_before = [0, 0, pad_top, pad_left, 0]
pad_after = [0, 0, pad_down, pad_right, 0]
temp = nn.pad(Input, pad_before, pad_after, name="pad_temp")
rcc = te.reduce_axis((0, in_channel_chunk), name="rc")
rcb = te.reduce_axis((0, in_channel_block), name="rc")
ry = te.reduce_axis((0, kernel_h), name="ry")
rx = te.reduce_axis((0, kernel_w), name="rx")
# NCHWc x KCRSk
# texture: NCH|W|c
# texture: K|CRS|k
# c = crs//RS
# rs = crs % RS
# r = rs // W == (crs // S) % R
# s = rs % W == crs % S
Filter = te.compute(
(num_filter_chunk, channel * kernel_h * kernel_w, num_filter_block),
lambda ffc, crs, ffb: Filter[
ffc, crs // (kernel_h * kernel_w), (crs // kernel_w) % kernel_h, crs % kernel_w, ffb
],
name="packed_filter",
)
conv = te.compute(
(batch, num_filter_chunk, out_height, out_width, num_filter_block),
lambda nn, ffc, yy, xx, ffb: te.sum(
(
temp[nn, rcc, yy * stride_h + ry * dilation_h, xx * stride_w + rx * dilation_w, rcb]
* Filter[ffc, ((rcc * in_channel_block + rcb) * kernel_h + ry) * kernel_w + rx, ffb]
).astype(out_dtype),
axis=[rcc, rcb, ry, rx],
),
tag="conv2d_nchwc_kcrsk_texture",
)
output = te.compute(conv.shape, lambda n, fc, y, x, fb: conv[n, fc, y, x, fb].astype("float32"))
return output
def schedule_conv2d_NCHWc_KCRSk_acc32(cfg, s, output):
"""schedule optimized for batch size = 1"""
conv = output.op.input_tensors[0]
##### space definition begin #####
n, fc, y, x, fb = s[conv].op.axis
rcc, rcb, ry, rx = s[conv].op.reduce_axis
cfg.define_split("tile_fc", fc, num_outputs=4)
cfg.define_split("tile_y", y, num_outputs=4)
cfg.define_split("tile_x", x, num_outputs=4)
cfg.define_split("tile_rcc", rcc, num_outputs=2)
cfg.define_split("tile_ry", ry, num_outputs=2)
cfg.define_split("tile_rx", rx, num_outputs=2)
cfg.define_knob("auto_unroll_max_step", [0, 512, 1500])
pad_data, flattened_kernel = s[conv].op.input_tensors
kernel = s[flattened_kernel].op.input_tensors[0]
s[flattened_kernel].compute_inline()
s[pad_data].compute_inline()
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
kernel = flattened_kernel
if conv.op in s.outputs:
output = conv
OL = s.cache_write(conv, "local")
else:
output = s.outputs[0].output(0)
s[conv].set_scope("local")
OL = conv
# create cache stage
AT = s.cache_read(pad_data, "global.texture", [OL])
WT = s.cache_read(kernel, "global.texture", [OL])
def copy_to_texture(stage):
axes = s[stage].op.axis
fused = s[stage].fuse(*axes[:-1])
block, thread = s[stage].split(fused, factor=32)
s[stage].vectorize(axes[-1])
s[stage].bind(block, te.thread_axis("blockIdx.x"))
s[stage].bind(thread, te.thread_axis("threadIdx.x"))
copy_to_texture(AT)
copy_to_texture(WT)
AA = s.cache_read(AT, "shared", [OL])
WW = s.cache_read(WT, "shared", [OL])
# tile and bind spatial axes
n, fc, y, x, fb = s[output].op.axis
kernel_scope, n = s[output].split(n, nparts=1)
bf, vf, tf, fi = cfg["tile_fc"].apply(s, output, fc)
by, vy, ty, yi = cfg["tile_y"].apply(s, output, y)
bx, vx, tx, xi = cfg["tile_x"].apply(s, output, x)
bf = s[output].fuse(n, bf)
s[output].bind(bf, te.thread_axis("blockIdx.z"))
s[output].bind(by, te.thread_axis("blockIdx.y"))
s[output].bind(bx, te.thread_axis("blockIdx.x"))
s[output].bind(vf, te.thread_axis("vthread"))
s[output].bind(vy, te.thread_axis("vthread"))
s[output].bind(vx, te.thread_axis("vthread"))
s[output].bind(tf, te.thread_axis("threadIdx.z"))
s[output].bind(ty, te.thread_axis("threadIdx.y"))
s[output].bind(tx, te.thread_axis("threadIdx.x"))
s[output].reorder(bf, by, bx, vf, vy, vx, tf, ty, tx, fi, yi, xi, fb)
s[output].vectorize(fb)
s[OL].compute_at(s[output], tx)
# tile reduction axes
n, fc, y, x, fb = s[OL].op.axis
rcc, rcb, ry, rx = s[OL].op.reduce_axis
rco, rci = cfg["tile_rcc"].apply(s, OL, rcc)
ryo, ryi = cfg["tile_ry"].apply(s, OL, ry)
rxo, rxi = cfg["tile_rx"].apply(s, OL, rx)
# TODO(csullivan): check position of rcb
s[OL].reorder(rco, ryo, rxo, rci, ryi, rxi, rcb, n, fc, y, x, fb)
s[OL].vectorize(fb)
s[OL].unroll(rcb)
s[AA].compute_at(s[OL], rxo)
s[WW].compute_at(s[OL], rxo)
# cooperative fetching
for load in [AA, WW]:
if load == WW:
n, fyx, v = s[load].op.axis
fused = s[load].fuse(n, fyx)
else:
n, f, y, x, v = s[load].op.axis
fused = s[load].fuse(n, f, y, x)
tz, fused = s[load].split(fused, nparts=cfg["tile_fc"].size[2])
ty, fused = s[load].split(fused, nparts=cfg["tile_y"].size[2])
tx, fused = s[load].split(fused, nparts=cfg["tile_x"].size[2])
s[load].bind(tz, te.thread_axis("threadIdx.z"))
s[load].bind(ty, te.thread_axis("threadIdx.y"))
s[load].bind(tx, te.thread_axis("threadIdx.x"))
s[load].vectorize(v)
# unroll
s[output].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
N, OCC, OH, OW, OCB = get_const_tuple(output.shape)
_, ICKHKW, _ = get_const_tuple(kernel.shape)
if isinstance(N, int):
cfg.add_flop(2 * N * OH * OW * OCC * OCB * ICKHKW)
def compute_depthwise_conv2d_NCHWc_KCRSk_acc32(
Input, Filter, stride, padding, dilation, out_dtype=None
):
"""Depthwise convolution operator in NCHWc layout."""
if out_dtype is None:
out_dtype = Input.dtype
assert isinstance(stride, int) or len(stride) == 2
assert isinstance(dilation, int) or len(dilation) == 2
if isinstance(stride, int):
stride_h = stride_w = stride
else:
stride_h, stride_w = stride
if isinstance(dilation, int):
dilation_h = dilation_w = dilation
else:
dilation_h, dilation_w = dilation
batch, channel_chunk, in_height, in_width, channel_block = Input.shape
_, channel_multiplier, kernel_h, kernel_w, _ = Filter.shape
# compute the output shape
dilated_kernel_h = (kernel_h - 1) * dilation_h + 1
dilated_kernel_w = (kernel_w - 1) * dilation_w + 1
pad_top, pad_left, pad_down, pad_right = nn.get_pad_tuple(
padding, (dilated_kernel_h, dilated_kernel_w)
)
out_channel_chunk = simplify(channel_chunk * channel_multiplier)
out_height = simplify((in_height - dilated_kernel_h + pad_top + pad_down) // stride_h + 1)
out_width = simplify((in_width - dilated_kernel_w + pad_left + pad_right) // stride_w + 1)
# compute graph
pad_before = [0, 0, pad_top, pad_left, 0]
pad_after = [0, 0, pad_down, pad_right, 0]
temp = nn.pad(Input, pad_before, pad_after, name="pad_temp")
ry = te.reduce_axis((0, kernel_h), name="ry")
rx = te.reduce_axis((0, kernel_w), name="rx")
# NCHWc x CMRSc = [N,(C//4)M,OH,OW, 4c]
# NCHWc x CMRS
# texture: NCH|W|c
# texture: C|MRS|c
# output: N
# m = mrs//RS
# rs = mrs % RS
# r = rs // W == (mrs // S) % R
# s = rs % W == mrs % S
Filter = te.compute(
(channel_chunk, channel_multiplier * kernel_h * kernel_w, channel_block),
lambda ffc, mrs, ffb: Filter[
ffc, mrs // (kernel_h * kernel_w), (mrs // kernel_w) % kernel_h, mrs % kernel_w, ffb
],
name="packed_filter",
)
conv = te.compute(
(batch, out_channel_chunk, out_height, out_width, channel_block),
lambda nn, ffc, yy, xx, ffb: te.sum(
(
temp[
nn,
ffc // channel_multiplier,
yy * stride_h + ry * dilation_h,
xx * stride_w + rx * dilation_w,
ffb,
]
* Filter[
ffc // channel_multiplier,
((ffc % channel_multiplier) * kernel_h + ry) * kernel_w + rx,
ffb,
]
).astype(out_dtype),
axis=[ry, rx],
),
tag="depthwise_conv2d_nchwc_kcrsk_texture",
)
return te.compute(
conv.shape, lambda n, ffc, y, x, ffb: conv[n, ffc, y, x, ffb].astype("float32")
)
def schedule_depthwise_conv2d_NCHWc_KCRSk_acc32(cfg, s, output):
"""schedule optimized for batch size = 1"""
conv = output.op.input_tensors[0]
##### space definition begin #####
n, fc, y, x, fb = s[conv].op.axis
ry, rx = s[conv].op.reduce_axis
cfg.define_split("tile_fc", fc, num_outputs=4)
cfg.define_split("tile_y", y, num_outputs=4)
cfg.define_split("tile_x", x, num_outputs=4)
cfg.define_split("tile_ry", ry, num_outputs=2)
cfg.define_split("tile_rx", rx, num_outputs=2)
cfg.define_knob("auto_unroll_max_step", [0, 512, 1500])
pad_data, flattened_kernel = s[conv].op.input_tensors
kernel = s[flattened_kernel].op.input_tensors[0]
s[flattened_kernel].compute_inline()
s[pad_data].compute_inline()
if isinstance(kernel.op, tvm.te.ComputeOp) and "dilate" in kernel.op.tag:
s[kernel].compute_inline()
kernel = flattened_kernel
if conv.op in s.outputs:
output = conv
OL = s.cache_write(conv, "local")
else:
output = s.outputs[0].output(0)
s[conv].set_scope("local")
OL = conv
# create cache stage
AT = s.cache_read(pad_data, "global.texture", [OL])
WT = s.cache_read(kernel, "global.texture", [OL])
def copy_to_texture(stage):
axes = s[stage].op.axis
fused = s[stage].fuse(*axes[:-1])
block, thread = s[stage].split(fused, factor=32)
s[stage].vectorize(axes[-1])
s[stage].bind(block, te.thread_axis("blockIdx.x"))
s[stage].bind(thread, te.thread_axis("threadIdx.x"))
copy_to_texture(AT)
copy_to_texture(WT)
AA = s.cache_read(AT, "shared", [OL])
WW = s.cache_read(WT, "shared", [OL])
# tile and bind spatial axes
n, fc, y, x, fb = s[output].op.axis
kernel_scope, n = s[output].split(n, nparts=1)
bf, vf, tf, fi = cfg["tile_fc"].apply(s, output, fc)
by, vy, ty, yi = cfg["tile_y"].apply(s, output, y)
bx, vx, tx, xi = cfg["tile_x"].apply(s, output, x)
bf = s[output].fuse(n, bf)
s[output].bind(bf, te.thread_axis("blockIdx.z"))
s[output].bind(by, te.thread_axis("blockIdx.y"))
s[output].bind(bx, te.thread_axis("blockIdx.x"))
s[output].bind(vf, te.thread_axis("vthread"))
s[output].bind(vy, te.thread_axis("vthread"))
s[output].bind(vx, te.thread_axis("vthread"))
s[output].bind(tf, te.thread_axis("threadIdx.z"))
s[output].bind(ty, te.thread_axis("threadIdx.y"))
s[output].bind(tx, te.thread_axis("threadIdx.x"))
s[output].reorder(bf, by, bx, vf, vy, vx, tf, ty, tx, fi, yi, xi, fb)
s[output].vectorize(fb)
s[OL].compute_at(s[output], tx)
# tile reduction axes
n, fc, y, x, fb = s[OL].op.axis
ry, rx = s[OL].op.reduce_axis
ryo, ryi = cfg["tile_ry"].apply(s, OL, ry)
rxo, rxi = cfg["tile_rx"].apply(s, OL, rx)
s[OL].reorder(ryo, rxo, ryi, rxi, n, fc, y, x, fb)
s[OL].vectorize(fb)
# s[OL].unroll()
s[AA].compute_at(s[OL], rxo)
s[WW].compute_at(s[OL], rxo)
# cooperative fetching
for load in [AA, WW]:
if load == WW:
n, fyx, v = s[load].op.axis
fused = s[load].fuse(n, fyx)
else:
n, f, y, x, v = s[load].op.axis
fused = s[load].fuse(n, f, y, x)
tz, fused = s[load].split(fused, nparts=cfg["tile_fc"].size[2])
ty, fused = s[load].split(fused, nparts=cfg["tile_y"].size[2])
tx, fused = s[load].split(fused, nparts=cfg["tile_x"].size[2])
s[load].bind(tz, te.thread_axis("threadIdx.z"))
s[load].bind(ty, te.thread_axis("threadIdx.y"))
s[load].bind(tx, te.thread_axis("threadIdx.x"))
s[load].vectorize(v)
# unroll
s[output].pragma(kernel_scope, "auto_unroll_max_step", cfg["auto_unroll_max_step"].val)
N, OCC, OH, OW, OCB = get_const_tuple(output.shape)
ICC, MKHKW, ICB = get_const_tuple(kernel.shape)
M = (OCC * OCB) // (ICC * ICB)
KHKW = MKHKW // M
if isinstance(N, int):
cfg.add_flop(2 * N * OH * OW * OCC * OCB * KHKW)
def scheduler(compute, schedule, *args, **kwargs):
placeholders = compute(*args)
s = schedule(*placeholders, **kwargs)
return s, placeholders
def conv2d_1x1_NCHWc_RSCKk(input_shape, filter_shape):
placeholders = compute_conv2d_1x1_NCHWc_RSCKk(input_shape, filter_shape)
s = schedule_conv2d_1x1_NCHWc_RSCKk(*placeholders)
return s, placeholders
def conv2d_1x1_WCHNc_CRSKk(input_shape, filter_shape):
placeholders = compute_conv2d_1x1_WCHNc_CRSKk(input_shape, filter_shape)
s = schedule_conv2d_1x1_WCHNc_CRSKk(*placeholders)
return s, (placeholders[0], placeholders[1], placeholders[-1])
def conv2d_NCHWc_KCRSk(input_shape, filter_shape):
data = te.placeholder(input_shape, name="data", dtype="float32")
filt = te.placeholder(filter_shape, name="filter", dtype="float32")
conv = compute_conv2d_NCHWc_KCRSk(data, filt, [1, 1], [0, 0], [1, 1], "float32")
cfg = autotvm.get_config()
s = te.create_schedule([x.op for x in [conv]])
schedule_conv2d_NCHWc_KCRSk(cfg, s, conv)
return s, (data, filt, conv)
def conv2d_NCHWc_KCRSk_fp32_acc(input_shape, filter_shape):
data = te.placeholder(input_shape, name="data", dtype="float32")
filt = te.placeholder(filter_shape, name="filter", dtype="float32")
output = compute_conv2d_NCHWc_KCRSk_acc32(data, filt, [1, 1], [0, 0], [1, 1], "float32")
cfg = autotvm.get_config()
s = te.create_schedule([x.op for x in [output]])
schedule_conv2d_NCHWc_KCRSk_acc32(cfg, s, output)
return s, (data, filt, output)
def depthwise_conv2d_NCHWc_KCRSk_acc32(input_shape, filter_shape):
data = te.placeholder(input_shape, name="data", dtype="float32")
filt = te.placeholder(filter_shape, name="filter", dtype="float32")
output = compute_depthwise_conv2d_NCHWc_KCRSk_acc32(
data, filt, [1, 1], [0, 0], [1, 1], "float32"
)
cfg = autotvm.get_config()
s = te.create_schedule([x.op for x in [output]])
schedule_depthwise_conv2d_NCHWc_KCRSk_acc32(cfg, s, output)
return s, (data, filt, output)
def ref_convolution(data, kernel, stride, pad):
import mxnet as mx
groups = 1
kernel_size = (kernel.shape[2], kernel.shape[3])
num_filter = kernel.shape[0]
ref_res = mx.nd.Convolution(
data=mx.nd.array(data),
weight=mx.nd.array(kernel),
bias=None,
no_bias=True,
kernel=kernel_size,
stride=stride,
pad=pad,
num_filter=num_filter,
num_group=groups,
)
return ref_res.asnumpy()
def ref_depthwise_convolution(data, kernel, stride, pad):
import mxnet as mx
groups = kernel.shape[0]
kernel_size = (kernel.shape[2], kernel.shape[3])
num_filter = kernel.shape[0]
multiplier = kernel.shape[1]
ref_res = mx.nd.Convolution(
data=mx.nd.array(data),
weight=mx.nd.array(kernel),
bias=None,
no_bias=True,
kernel=kernel_size,
stride=stride,
pad=pad,
num_filter=num_filter,
num_group=groups,
)
return ref_res.asnumpy()
def validate(workload, target, dev, input_shapes, *args, **kwargs):
s, placeholders = workload(*input_shapes, *args, **kwargs)
func = tvm.driver.build(s, [*placeholders], target=target, name="TestFunction")
args_tvm = []
args_np = []
for var in placeholders[:-1]:
var_np = np.random.uniform(size=[i.value for i in var.shape]).astype(var.dtype)
args_np.append(var_np)
args_tvm.append(tvm.nd.array(var_np, dev))
args_tvm.append(
tvm.nd.array(
np.zeros([i.value for i in placeholders[-1].shape], dtype=placeholders[-1].dtype), dev
)
)
func(*args_tvm)
if "plus_one" in workload.__name__:
np_result = args_np[0] + 1.0
elif "matmul" in workload.__name__:
if "inner" in workload.__name__:
np_result = np.matmul(
args_np[0].reshape(32, 256), args_np[1].reshape(32, 256).transpose(1, 0)
)
elif "accum" in workload.__name__:
np_result = np.matmul(
args_np[0].transpose((1, 0, 2)).reshape(64, 128), args_np[1].reshape(128, 64)
)
else:
np_result = np.matmul(
args_np[0].transpose((0, 2, 1)).reshape(128, 64),
args_np[1].transpose(1, 0, 2).reshape(64, 128),
)
elif "conv2d_1x1_NCHWc_RSCKk" in workload.__name__:
vec_length = args_np[1].shape[-1]
# nchwc -> nchw
args_np[0] = (
args_np[0]
.transpose((0, 1, 4, 2, 3))
.reshape(
args_np[0].shape[0],
args_np[0].shape[1] * args_np[0].shape[-1],
args_np[0].shape[2],
args_np[0].shape[3],
)
)
# rsckk -> rsck -> kcrs
args_np[1] = (
args_np[1]
.reshape(
args_np[1].shape[0],
args_np[1].shape[1],
args_np[1].shape[2],
args_np[1].shape[3] * args_np[1].shape[4],
)
.transpose((3, 2, 0, 1))
)
np_result = testing.conv2d_nchw_python(args_np[0], args_np[1], 1, 0)
# nkhw -> nkhwk
np_result = np_result.reshape(
np_result.shape[0],
np_result.shape[1] // vec_length,
vec_length,
np_result.shape[2],
np_result.shape[3],
).transpose(0, 1, 3, 4, 2)
elif "conv2d_1x1_WCHNc_CRSKk" in workload.__name__:
vec_length = args_np[1].shape[-1]
# wchnc -> nchw
args_np[0] = (
args_np[0]
.transpose((3, 1, 4, 2, 0))
.reshape(
args_np[0].shape[3],
args_np[0].shape[1] * args_np[0].shape[-1],
args_np[0].shape[2],
args_np[0].shape[0],
)
)
# crskk -> crsk -> kcrs
args_np[1] = (
args_np[1]
.reshape(
args_np[1].shape[0],
args_np[1].shape[1],
args_np[1].shape[2],
args_np[1].shape[3] * args_np[1].shape[4],
)
.transpose((3, 0, 1, 2))
)
np_result = testing.conv2d_nchw_python(args_np[0], args_np[1], 1, 0)
# nkhw -> nkkhw -> wkhnk
np_result = np_result.reshape(
np_result.shape[0],
np_result.shape[1] // vec_length,
vec_length,
np_result.shape[2],
np_result.shape[3],
).transpose(4, 1, 3, 0, 2)
elif "NCHW_KCRS" in workload.__name__:
np_result = testing.conv2d_nchw_python(args_np[0], args_np[1], 1, 0)
elif "NCHWc_KCRSk" in workload.__name__:
vec_length = args_np[1].shape[-1]
# nchwc -> nchw
args_np[0] = (
args_np[0]
.transpose((0, 1, 4, 2, 3))
.reshape(
args_np[0].shape[0],
args_np[0].shape[1] * args_np[0].shape[-1],
args_np[0].shape[2],
args_np[0].shape[3],
)
)
# kcrsk/cmrsc -> kcrs/cmrs
args_np[1] = (
args_np[1]
.transpose((0, 4, 1, 2, 3))
.reshape(
args_np[1].shape[0] * args_np[1].shape[4],
args_np[1].shape[1],
args_np[1].shape[2],
args_np[1].shape[3],
)
)
if "depthwise" in workload.__name__:
# np_result = testing.depthwise_conv2d_python_nchw(args_np[0], args_np[1], 1, "VALID")
np_result = ref_depthwise_convolution(args_np[0], args_np[1], [], [])
else:
# np_result = testing.conv2d_nchw_python(args_np[0], args_np[1], 1, 0)
np_result = ref_convolution(args_np[0], args_np[1], [], [])
# nkhw -> nkhwk
np_result = np_result.reshape(
np_result.shape[0],
np_result.shape[1] // vec_length,
vec_length,
np_result.shape[2],
np_result.shape[3],
).transpose(0, 1, 3, 4, 2)
np.testing.assert_allclose(args_tvm[-1].asnumpy(), np_result, rtol=1e-2, atol=1e-2)
class BaseSingleShapeValidator:
@tvm.testing.parametrize_targets("opencl")
def test_unary(self, test_func, input_shape, target, dev):
validate(test_func, target, dev, [input_shape])
class TestPlusOneRank3(BaseSingleShapeValidator):
input_shape = tvm.testing.parameter((32, 32, 4))
def plus_one(input_shape):
return scheduler(compute_plus_one_rank3, schedule_plus_one_rank3, input_shape)
test_func = tvm.testing.parameter(plus_one)
class TestPlusOneRank5(BaseSingleShapeValidator):
input_shape = tvm.testing.parameter((32, 2, 4, 4, 4))
def plus_one(input_shape):
return scheduler(compute_plus_one_rank5, schedule_plus_one_rank5, input_shape)
test_func = tvm.testing.parameter(plus_one)
class TestMatmul:
input_shape = tvm.testing.parameter((32, 64, 4))
local = tvm.testing.parameter(False, True)
def matmul(input_shape, local):
return scheduler(compute_matmul, schedule_matmul, input_shape, local=local)
def matmul_inner(input_shape, local):
return scheduler(compute_matmul_inner, schedule_matmul_inner, input_shape, local=local)
test_func = tvm.testing.parameter(matmul, matmul_inner)
@tvm.testing.parametrize_targets("opencl")
def test_matmul(self, test_func, input_shape, local, target, dev):
validate(test_func, target, dev, [input_shape], local=local)
class TestMatmulVectorAccumulator:
shapeA = tvm.testing.parameter((32, 64, 4))
shapeB = tvm.testing.parameter((128, 16, 4))
local = tvm.testing.parameter(False, True)
def matmul_vector_accumulator(shapeA, shapeB, local):
return scheduler(
compute_matmul_vector_accumulator,
schedule_matmul_vector_accumulator,
shapeA,
shapeB,
local=local,
)
test_func = tvm.testing.parameter(matmul_vector_accumulator)
@tvm.testing.parametrize_targets("opencl")
def test_matmul_vec_acc(self, test_func, shapeA, shapeB, local, target, dev):
validate(test_func, target, dev, [shapeA, shapeB], local=local)
class BaseConv2DValidator:
@tvm.testing.parametrize_targets("opencl")
def test_conv2d(self, test_func, input_shapes, target, dev):
validate(test_func, target, dev, input_shapes)
class TestConv2dNCHWcRSCKk(BaseConv2DValidator):
input_shapes = tvm.testing.parameter([(1, 32, 56, 56, 4), (1, 1, 128, 32, 4)])
test_func = tvm.testing.parameter(conv2d_1x1_NCHWc_RSCKk)
class TestConv2dWCHNcCRSKk(BaseConv2DValidator):
input_shapes = tvm.testing.parameter([(56, 32, 56, 1, 4), (128, 1, 1, 32, 4)])
test_func = tvm.testing.parameter(conv2d_1x1_WCHNc_CRSKk)
class TestConv2dNCHWcKCRSk(BaseConv2DValidator):
input_shapes = tvm.testing.parameter(
[(1, 32, 56, 56, 4), (32, 128, 1, 1, 4)], [(1, 32, 112, 112, 4), (32, 128, 3, 3, 4)]
)
test_func = tvm.testing.parameter(conv2d_NCHWc_KCRSk, conv2d_NCHWc_KCRSk_fp32_acc)
class TestDepthwiseConv2dNCHWcKCRSk(BaseConv2DValidator):
input_shapes = tvm.testing.parameter([(1, 24, 257, 257, 4), (24, 1, 3, 3, 4)])
test_func = tvm.testing.parameter(depthwise_conv2d_NCHWc_KCRSk_acc32)
def simple_texture_to_scalar_common(
target, input_info, output_info, find_patterns, dtype, cast_type
):
def _compute():
p0 = te.placeholder(input_info[1], name="p0", dtype=dtype)
p0_comp = te.compute(input_info[1], lambda *i: p0(*i), name="p0_comp")
if len(output_info[1]) == 4 and len(input_info[1]) == 5:
out = te.compute(
output_info[1],
lambda n, c, h, w: p0_comp[n][c // 4][h][w][c % 4].astype(cast_type),
name="out",
)
elif len(output_info[1]) == 5 and len(input_info[1]) == 5:
out = te.compute(
output_info[1],
lambda n, c, h, w, cb: p0_comp[n][c][h][w][cb].astype(cast_type),
name="out",
)
else:
raise Exception("Impossible case")
dummy_out = te.compute(output_info[1], lambda *i: out(*i), name="dummy_out")
return p0, dummy_out
def _schedule(dummy_out):
from tvm.topi.adreno.utils import bind_data_copy
s = te.create_schedule(dummy_out.op)
out = s[dummy_out].op.input_tensors[0]
p0_comp = s[out].op.input_tensors[0]
s[p0_comp].set_scope(input_info[0])
bind_data_copy(s[p0_comp])
s[out].set_scope(output_info[0])
bind_data_copy(s[out])
bind_data_copy(s[dummy_out])
return s
p0, dummy_out = _compute()
s = _schedule(dummy_out)
fun = tvm.build(s, [p0, dummy_out], target)
dev = tvm.device(target, 0)
opencl_source = fun.imported_modules[0].get_source()
start_idx = 0
for pattern in find_patterns:
start_idx = opencl_source.find(pattern, start_idx)
assert start_idx > -1
input_np = np.random.uniform(size=[i for i in input_info[1]]).astype(dtype)
input_tvm = tvm.nd.array(input_np, dev)
c = tvm.nd.empty(output_info[1], dtype, dev)
# Doesn't run OpenCL code for FP16 because GPUs in CI don't support FP16 inference
if cast_type == "float32":
fun(input_tvm, c)
# For output len == 5 it makes no sense to check the accuracy
if cast_type == "float32" and len(output_info[1]) == 4:
np_result = input_np.transpose(0, 2, 3, 1, 4) # NCHW4c -> NHWC4c
np_result = np.squeeze(np_result, axis=3)
np_result = np_result.transpose(0, 3, 1, 2) # NHWC -> NCHW
np.testing.assert_allclose(c.asnumpy(), np_result, rtol=1e-2, atol=1e-2)
class TestSimpleTextureToScalarFP16:
# (input [scope, shape], output [scope, shape], [find_patterns])
input_info, output_info, find_patterns = tvm.testing.parameters(
# 1. Texture (NCHW4c) -> Cast(FP16) -> Buffer (NCHW)
(
["global.texture", (1, 1, 40, 40, 4)],
["", (1, 4, 40, 40)],
[
"float4 v_ = READ_IMAGEF(p0_comp, image_sampler, ((int2)(((convert_int(get_local_id(0))) % 40), ((((convert_int(get_group_id(0))) & 1) * 20) + ((convert_int(get_local_id(0))) / 40)))));",
"out[(((convert_int(get_group_id(0))) * 800) + (convert_int(get_local_id(0))))] = (convert_half(((float*)&v_)[((convert_int(get_group_id(0))) >> 1)]));",
],
),
# 2. Buffer (NCHW4c) -> Cast(FP16) -> Buffer (NCHW)
(
["", (1, 1, 40, 40, 4)],
["", (1, 4, 40, 40)],
[
"out[(((convert_int(get_group_id(0))) * 800) + (convert_int(get_local_id(0))))] = (convert_half(p0_comp[(((((convert_int(get_group_id(0))) & 1) * 3200) + ((convert_int(get_local_id(0))) * 4)) + ((convert_int(get_group_id(0))) >> 1))]));"
],
),
# 3. Texture (NCHW4c) -> Cast(FP16) -> Texture (NCHW4c)
(
["global.texture", (1, 1, 40, 40, 4)],
["global.texture", (1, 1, 40, 40, 4)],
[
"float4 v_ = READ_IMAGEF(p0_comp, image_sampler, ((int2)(((((convert_int(get_group_id(0))) * 24) + (convert_int(get_local_id(0)))) % 40), ((((convert_int(get_group_id(0))) * 8) + ((convert_int(get_local_id(0))) >> 3)) / 5))));",
"write_imageh(out, (int2)(((((convert_int(get_group_id(0))) * 24) + (convert_int(get_local_id(0)))) % 40), ((((convert_int(get_group_id(0))) * 8) + ((convert_int(get_local_id(0))) >> 3)) / 5)), (convert_half4(v_)));",
],
),
)
dtype = tvm.testing.parameter("float32")
@tvm.testing.parametrize_targets("opencl")
def test_simple_texture_to_scalar_fp16(
self, input_info, output_info, find_patterns, dtype, target
):
simple_texture_to_scalar_common(
target, input_info, output_info, find_patterns, dtype, "float16"
)
class TestSimpleTextureToScalarFP32:
# (input [scope, shape], output [scope, shape], [find_patterns])
input_info, output_info, find_patterns = tvm.testing.parameters(
# 1. Texture (NCHW4c) -> Buffer (NCHW)
(
["global.texture", (1, 1, 40, 40, 4)],
["", (1, 4, 40, 40)],
[
"float4 v_ = READ_IMAGEF(p0_comp, image_sampler, ((int2)(((convert_int(get_local_id(0))) % 40), ((((convert_int(get_group_id(0))) & 1) * 20) + ((convert_int(get_local_id(0))) / 40)))));",
"out[(((convert_int(get_group_id(0))) * 800) + (convert_int(get_local_id(0))))] = ((float*)&v_)[((convert_int(get_group_id(0))) >> 1)];",
],
),
# 2. Buffer (NCHW4c) -> Buffer (NCHW)
(
["", (1, 1, 40, 40, 4)],
["", (1, 4, 40, 40)],
[
"out[(((convert_int(get_group_id(0))) * 800) + (convert_int(get_local_id(0))))] = p0_comp[(((((convert_int(get_group_id(0))) & 1) * 3200) + ((convert_int(get_local_id(0))) * 4)) + ((convert_int(get_group_id(0))) >> 1))];"
],
),
)
dtype = tvm.testing.parameter("float32")
@tvm.testing.parametrize_targets("opencl")
def test_simple_texture_to_scalar_fp32(
self, input_info, output_info, find_patterns, dtype, target
):
simple_texture_to_scalar_common(
target, input_info, output_info, find_patterns, dtype, "float32"
)
def texture_to_scalar_reuse_ssa_common(
target, input_info, output_info, find_patterns, dtype, cast_type
):
def _compute():
p0 = te.placeholder(input_info[1], name="p0", dtype=dtype)
p0_comp = te.compute(input_info[1], lambda *i: p0(*i), name="p0_comp")
if len(output_info[1]) == 4 and len(input_info[1]) == 5:
out = te.compute(
output_info[1],
lambda n, c, h, w: p0_comp[n][c // 4][h][w][c % 4].astype(cast_type),
name="out",
)
out2 = te.compute(
output_info[1],
lambda n, c, h, w: out[n][c][h][w]
+ p0_comp[n][c // 4][h][w][c % 4].astype(cast_type),
name="out",
)
elif len(output_info[1]) == 5 and len(input_info[1]) == 5:
out = te.compute(
output_info[1],
lambda n, c, h, w, cb: p0_comp[n][c][h][w][cb].astype(cast_type),
name="out",
)
out2 = te.compute(
output_info[1],
lambda n, c, h, w, cb: out[n][c][h][w][cb]
+ p0_comp[n][c][h][w][cb].astype(cast_type),
name="out",
)
else:
raise Exception("Impossible case")
out_sum = te.compute(output_info[1], lambda *i: out(*i) + out2(*i), name="out_sum")
dummy_out = te.compute(output_info[1], lambda *i: out_sum(*i), name="dummy_out")
return p0, dummy_out
def _schedule(dummy_out):
from tvm.topi.adreno.utils import bind_data_copy
s = te.create_schedule(dummy_out.op)
out_sum = s[dummy_out].op.input_tensors[0]
out, out2 = s[out_sum].op.input_tensors
p0_comp = s[out].op.input_tensors[0]
s[p0_comp].set_scope(input_info[0])
bind_data_copy(s[p0_comp])
s[out].set_scope(output_info[0])
s[out2].set_scope(output_info[0])
s[out2].compute_inline()
s[out].compute_inline()
s[out_sum].set_scope(output_info[0])
bind_data_copy(s[out_sum])
bind_data_copy(s[dummy_out])
return s
p0, dummy_out = _compute()
s = _schedule(dummy_out)
fun = tvm.build(s, [p0, dummy_out], target)
dev = tvm.device(target, 0)
opencl_source = fun.imported_modules[0].get_source()
start_idx = 0
for pattern in find_patterns:
start_idx = opencl_source.find(pattern, start_idx)
assert start_idx > -1
input_np = np.random.uniform(size=[i for i in input_info[1]]).astype(dtype)
input_tvm = tvm.nd.array(input_np, dev)
c = tvm.nd.empty(output_info[1], dtype, dev)
# Doesn't run OpenCL code for FP16 because GPUs in CI don't support FP16 inference
if cast_type == "float32":
fun(input_tvm, c)
# For output len == 5 it makes no sense to check the accuracy
if cast_type == "float32" and len(output_info[1]) == 4:
np_result = input_np * 3
np_result = np_result.transpose(0, 2, 3, 1, 4) # NCHW4c -> NHWC4c
np_result = np.squeeze(np_result, axis=3)
np_result = np_result.transpose(0, 3, 1, 2) # NHWC -> NCHW
np.testing.assert_allclose(c.asnumpy(), np_result, rtol=1e-2, atol=1e-2)
class TestTextureToScalarReuseSSAFP16:
# (input [scope, shape], output [scope, shape], [find_patterns])
input_info, output_info, find_patterns = tvm.testing.parameters(
# 1. Texture (NCHW4c) -> Cast(FP16) -> Buffer (NCHW)
(
["global.texture", (1, 1, 40, 40, 4)],
["", (1, 4, 40, 40)],
[
"float4 v_ = READ_IMAGEF(p0_comp, image_sampler, ((int2)(((convert_int(get_local_id(0))) % 40), ((((convert_int(get_group_id(0))) & 1) * 20) + ((convert_int(get_local_id(0))) / 40)))));",
"out_sum[(((convert_int(get_group_id(0))) * 800) + (convert_int(get_local_id(0))))] = ((convert_half(((float*)&v_)[((convert_int(get_group_id(0))) >> 1)])) + ((convert_half(((float*)&v_)[((convert_int(get_group_id(0))) >> 1)])) + (convert_half(((float*)&v_)[((convert_int(get_group_id(0))) >> 1)]))));",
],
),
# 2. Buffer (NCHW4c) -> Cast(FP16) -> Buffer (NCHW)
(
["", (1, 1, 40, 40, 4)],
["", (1, 4, 40, 40)],
[
" out_sum[(((convert_int(get_group_id(0))) * 800) + (convert_int(get_local_id(0))))] = ((convert_half(p0_comp[(((((convert_int(get_group_id(0))) & 1) * 3200) + ((convert_int(get_local_id(0))) * 4)) + ((convert_int(get_group_id(0))) >> 1))])) + ((convert_half(p0_comp[(((((convert_int(get_group_id(0))) & 1) * 3200) + ((convert_int(get_local_id(0))) * 4)) + ((convert_int(get_group_id(0))) >> 1))])) + (convert_half(p0_comp[(((((convert_int(get_group_id(0))) & 1) * 3200) + ((convert_int(get_local_id(0))) * 4)) + ((convert_int(get_group_id(0))) >> 1))]))));"
],
),
# 3. Texture (NCHW4c) -> Cast(FP16) -> Texture (NCHW4c)
(
["global.texture", (1, 1, 40, 40, 4)],
["global.texture", (1, 1, 40, 40, 4)],
[
"float4 v_ = READ_IMAGEF(p0_comp, image_sampler, ((int2)(((((convert_int(get_group_id(0))) * 24) + (convert_int(get_local_id(0)))) % 40), ((((convert_int(get_group_id(0))) * 8) + ((convert_int(get_local_id(0))) >> 3)) / 5))));",
"write_imageh(out_sum, (int2)(((((convert_int(get_group_id(0))) * 24) + (convert_int(get_local_id(0)))) % 40), ((((convert_int(get_group_id(0))) * 8) + ((convert_int(get_local_id(0))) >> 3)) / 5)), ((convert_half4(v_)) + ((convert_half4(v_)) + (convert_half4(v_)))));",
],
),
)
dtype = tvm.testing.parameter("float32")
@tvm.testing.parametrize_targets("opencl")
def test_texture_to_scalar_reuse_ssa_fp16(
self, input_info, output_info, find_patterns, dtype, target
):
texture_to_scalar_reuse_ssa_common(
target, input_info, output_info, find_patterns, dtype, "float16"
)
class TestTextureToScalarReuseSSAFP32:
# (input [scope, shape], output [scope, shape], [find_patterns])
input_info, output_info, find_patterns = tvm.testing.parameters(
# 1. Texture (NCHW4c) -> Buffer (NCHW)
(
["global.texture", (1, 1, 40, 40, 4)],
["", (1, 4, 40, 40)],
[
"float4 v_ = READ_IMAGEF(p0_comp, image_sampler, ((int2)(((convert_int(get_local_id(0))) % 40), ((((convert_int(get_group_id(0))) & 1) * 20) + ((convert_int(get_local_id(0))) / 40)))));",
"out_sum[(((convert_int(get_group_id(0))) * 800) + (convert_int(get_local_id(0))))] = (((float*)&v_)[((convert_int(get_group_id(0))) >> 1)] + (((float*)&v_)[((convert_int(get_group_id(0))) >> 1)] + ((float*)&v_)[((convert_int(get_group_id(0))) >> 1)]));",
],
),
# 2. Buffer (NCHW4c) -> Buffer (NCHW)
(
["", (1, 1, 40, 40, 4)],
["", (1, 4, 40, 40)],
[
"out_sum[(((convert_int(get_group_id(0))) * 800) + (convert_int(get_local_id(0))))] = (p0_comp[(((((convert_int(get_group_id(0))) & 1) * 3200) + ((convert_int(get_local_id(0))) * 4)) + ((convert_int(get_group_id(0))) >> 1))] + (p0_comp[(((((convert_int(get_group_id(0))) & 1) * 3200) + ((convert_int(get_local_id(0))) * 4)) + ((convert_int(get_group_id(0))) >> 1))] + p0_comp[(((((convert_int(get_group_id(0))) & 1) * 3200) + ((convert_int(get_local_id(0))) * 4)) + ((convert_int(get_group_id(0))) >> 1))]));"
],
),
)
dtype = tvm.testing.parameter("float32")
@tvm.testing.parametrize_targets("opencl")
def test_texture_to_scalar_reuse_ssa_fp32(
self, input_info, output_info, find_patterns, dtype, target
):
texture_to_scalar_reuse_ssa_common(
target, input_info, output_info, find_patterns, dtype, "float32"
)
class TestLocalArrayToTexture:
# 1. conv2d(Texture(NCHW4c), Texture(OIHW4o)) -> local_array[4] -> Texture (NCHW4c)
input_shape1, input_shape2, output_shape, find_patterns = tvm.testing.parameters(
(
(1, 1, 40, 40, 4),
(2, 4, 3, 3, 4),
(1, 2, 38, 38, 4),
[
"float out_local[4];",
"float4 v_ = READ_IMAGEF(p1_comp, image_sampler, ((int2)(((((convert_int(get_group_id(0))) * 14) + (convert_int(get_local_id(0)))) % 38), (((((convert_int(get_group_id(0))) * 64) + ((convert_int(get_local_id(0))) >> 1)) % 722) / 19))));",
"float4 v__1 = READ_IMAGEF(p2_comp, image_sampler, ((int2)(rw, (((((((convert_int(get_group_id(0))) * 32) + ((convert_int(get_local_id(0))) >> 2)) / 361) * 12) + (rcb * 3)) + rh))));",
"out_local[cb_c] = (out_local[cb_c] + (((float*)&v_)[rcb] * ((float*)&v__1)[cb_c]));",
"write_imagef(out, (int2)(((((convert_int(get_group_id(0))) * 14) + (convert_int(get_local_id(0)))) % 38), ((((convert_int(get_group_id(0))) * 64) + ((convert_int(get_local_id(0))) >> 1)) / 19)), vload4(0, out_local + 0));",
],
),
)
dtype = tvm.testing.parameter("float32")
@tvm.testing.parametrize_targets("opencl")
def test_local_array_to_texture(
self, input_shape1, input_shape2, output_shape, find_patterns, dtype, target
):
def _compute():
p1 = te.placeholder(input_shape1, name="p1", dtype=dtype)
p1_comp = te.compute(input_shape1, lambda *i: p1(*i), name="p1_comp")
p2 = te.placeholder(input_shape2, name="p2", dtype=dtype)
p2_comp = te.compute(input_shape2, lambda *i: p2(*i), name="p2_comp")
KH, KW = input_shape2[2], input_shape2[3]
IC, ICB = input_shape1[1], input_shape1[4]
rh = te.reduce_axis((0, KH), name="rh")
rw = te.reduce_axis((0, KW), name="rw")
rc = te.reduce_axis((0, IC), name="rc")
rcb = te.reduce_axis((0, ICB), name="rcb")
out = te.compute(
output_shape,
lambda n, c, h, w, cb: te.sum(
(p1_comp[n, rc, h, w, rcb] * p2_comp[c, rc * ICB + rcb, rh, rw, cb]).astype(
dtype
),
axis=[rh, rw, rc, rcb],
),
name="out",
)
dummy_out = te.compute(output_shape, lambda *i: out(*i), name="dummy_out")
return p1, p2, dummy_out
def _schedule(dummy_out):
from tvm.topi.adreno.utils import bind_data_copy
s = te.create_schedule(dummy_out.op)
out = s[dummy_out].op.input_tensors[0]
p1_comp, p2_comp = s[out].op.input_tensors
bind_data_copy(s[p1_comp])
s[p1_comp].set_scope("global.texture")
bind_data_copy(s[p2_comp])
s[p2_comp].set_scope("global.texture")
OL = s.cache_write(out, "local")
n, c, h, w, cb = s[out].op.axis
fused = s[out].fuse(n, c, h, w)
bx, tx = s[out].split(fused, 128)
s[out].reorder(bx, tx, cb)
s[out].vectorize(cb)
s[out].set_scope("global.texture")
s[out].bind(bx, te.thread_axis("blockIdx.x"))
s[out].bind(tx, te.thread_axis("threadIdx.x"))
s[OL].compute_at(s[out], tx)
bind_data_copy(s[dummy_out])
return s
p1, p2, dummy_out = _compute()
s = _schedule(dummy_out)
fun = tvm.build(s, [p1, p2, dummy_out], target)
dev = tvm.device(target, 0)
opencl_source = fun.imported_modules[0].get_source()
start_idx = 0
for pattern in find_patterns:
start_idx = opencl_source.find(pattern, start_idx)
assert start_idx > -1
input_np1 = np.random.uniform(size=[i for i in input_shape1]).astype(dtype)
input_np2 = np.random.uniform(size=[i for i in input_shape2]).astype(dtype)
input_tvm1 = tvm.nd.array(input_np1, dev)
input_tvm2 = tvm.nd.array(input_np2, dev)
c = tvm.nd.empty(output_shape, dtype, dev)
fun(input_tvm1, input_tvm2, c)
if __name__ == "__main__":
tvm.testing.main()
| 66,834 | 36.611142 | 574 | py |
tvm | tvm-main/tests/python/unittest/test_meta_schedule_relay_integration.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Integration test for MetaSchedule"""
import platform
import tempfile
from typing import List
import numpy as np
import pytest
import tvm
import tvm.testing
from tvm import IRModule
from tvm import meta_schedule as ms
from tvm import relay, te, tir
from tvm._ffi import register_func
from tvm.contrib import graph_executor
from tvm.ir.transform import PassContext
from tvm.meta_schedule.database import TuningRecord, Workload
from tvm.meta_schedule.testing.relay_workload import get_network
from tvm.meta_schedule.testing.tlcbench import load_quantized_bert_base
from tvm.meta_schedule.tune_context import _normalize_mod
from tvm.script import tir as T
from tvm.target import Target
# pylint: disable=no-member,line-too-long,too-many-nested-blocks,unbalanced-tuple-unpacking,no-self-argument,missing-docstring,invalid-name
@tvm.script.ir_module
class MockModule:
@T.prim_func
def main(a: T.handle, b: T.handle) -> None: # type: ignore
T.func_attr({"global_symbol": "main", "tir.noalias": True})
A = T.match_buffer(a, (16,), "float32")
B = T.match_buffer(b, (16,), "float32")
for i in T.serial(0, 16):
with T.block("matmul"):
vi = T.axis.remap("S", [i])
B[vi] = A[vi]
# pylint: enable=no-member,line-too-long,too-many-nested-blocks,unbalanced-tuple-unpacking,no-self-argument
@pytest.mark.skip("Integration tests")
def test_meta_schedule_dynamic_loop_extent():
a = relay.var("a", shape=(1, 8, 8, 512), dtype="float32")
b = relay.nn.adaptive_avg_pool2d(a, (7, 7), "NHWC")
mod = IRModule({"main": relay.Function([a], b)})
extracted_tasks = ms.relay_integration.extract_tasks(mod, target="llvm", params={})
assert not extracted_tasks
@pytest.mark.skip("Integration tests")
@pytest.mark.skipif(
platform.machine() == "aarch64",
reason="Currently torch.jit.trace fails on AArch64",
)
@tvm.testing.requires_package("torch")
def test_meta_schedule_integration_extract_from_resnet():
mod, params, _ = get_network(name="resnet_18", input_shape=[1, 3, 224, 224])
extracted_tasks = ms.relay_integration.extract_tasks(mod, target="llvm", params=params)
expected_task_names = [
"fused_" + s
for s in [
"nn_max_pool2d",
"nn_adaptive_avg_pool2d",
"nn_dense_add",
"nn_conv2d_add",
"nn_conv2d_add_1",
"nn_conv2d_add_2",
"nn_conv2d_add_add_nn_relu",
"nn_conv2d_add_add_nn_relu_1",
"nn_conv2d_add_nn_relu",
"nn_conv2d_add_nn_relu_1",
"nn_conv2d_add_nn_relu_2",
"nn_conv2d_add_nn_relu_3",
"nn_conv2d_add_nn_relu_4",
"nn_conv2d_add_nn_relu_5",
"nn_contrib_conv2d_winograd_without_weight_transform_add_add_nn_relu",
"nn_contrib_conv2d_winograd_without_weight_transform_add_add_nn_relu_1",
"nn_contrib_conv2d_winograd_without_weight_transform_add_nn_relu",
"nn_contrib_conv2d_winograd_without_weight_transform_add_nn_relu_1",
# The two tasks below are purely spatial and are ruled out by AutoScheduler
"layout_transform",
"layout_transform_reshape_squeeze",
]
]
assert len(extracted_tasks) == len(expected_task_names)
for t in extracted_tasks:
assert t.task_name in expected_task_names, t.task_name
@pytest.mark.skip("Integration tests")
@pytest.mark.skipif(
platform.machine() == "aarch64",
reason="Currently torch.jit.trace fails on AArch64",
)
@tvm.testing.requires_package("torch")
def test_task_extraction_winograd_tensorcore():
mod, params, _ = get_network(name="resnet_50", input_shape=[16, 3, 224, 224])
seq = tvm.transform.Sequential(
[
relay.transform.ToMixedPrecision("float16"),
relay.transform.ConvertLayout({"nn.conv2d": ["NHWC", "HWIO"]}),
]
)
with tvm.transform.PassContext(opt_level=3):
mod = seq(mod)
target = tvm.target.Target("nvidia/geforce-rtx-3070")
extracted_tasks = ms.relay_integration.extract_tasks(mod, target=target, params=params)
assert len([t for t in extracted_tasks if "winograd" in t.task_name]) == 4
@pytest.mark.skip("Integration tests")
@pytest.mark.skipif(
platform.machine() == "aarch64",
reason="Currently torch.jit.trace fails on AArch64",
)
@tvm.testing.requires_package("torch")
def test_task_extraction_anchor_block():
mod, params, _ = get_network(name="resnet_18", input_shape=[1, 3, 224, 224])
extracted_tasks = ms.relay_integration.extract_tasks(
mod, target="llvm", params=params, module_equality="anchor-block"
)
# Note that there is no task from residual blocks
expected_task_names = [
"fused_" + s
for s in [
"nn_max_pool2d",
"nn_adaptive_avg_pool2d",
"nn_dense_add",
"nn_conv2d_add",
"nn_conv2d_add_1",
"nn_conv2d_add_2",
"nn_conv2d_add_nn_relu",
"nn_conv2d_add_nn_relu_1",
"nn_conv2d_add_nn_relu_2",
"nn_conv2d_add_nn_relu_3",
"nn_conv2d_add_nn_relu_4",
"nn_conv2d_add_nn_relu_5",
"nn_contrib_conv2d_winograd_without_weight_transform_add_nn_relu",
"nn_contrib_conv2d_winograd_without_weight_transform_add_nn_relu_1",
"layout_transform",
"layout_transform_reshape_squeeze",
]
]
assert len(extracted_tasks) == len(expected_task_names)
for t in extracted_tasks:
assert t.task_name in expected_task_names, t.task_name
@pytest.mark.skip("Integration tests")
@tvm.testing.requires_package("torch")
def test_meta_schedule_integration_extract_from_bert_base():
pytest.importorskip(
"transformers", reason="transformers package is required to import bert_base"
)
expected = {
"fused_nn_dense_2": (
12,
[[64, 3072], [768, 3072], [64, 768]],
),
"fused_nn_dense": (
48,
[[64, 768], [768, 768], [64, 768]],
),
"fused_nn_dense_1": (
12,
[[64, 768], [3072, 768], [64, 3072]],
),
"fused_subtract_add_rsqrt_multiply_multiply_add": (
25,
[[1, 64, 768], [1, 64, 1], [1, 64, 1], [768], [768], [1, 64, 768]],
),
"fused_nn_batch_matmul": (
24,
[[12, 64, 64], [12, 64, 64], [12, 64, 64]],
),
"fused_reshape_add_add": (
24,
[[64, 768], [768], [1, 64, 768], [1, 64, 768]],
),
"fused_variance": (
25,
[[1, 64, 768], [1, 64, 1], [1, 64, 1]],
),
"fused_mean": (
25,
[[1, 64, 768], [1, 64, 1]],
),
"fused_reshape_add_reshape_transpose_reshape": (
12,
[[64, 768], [768], [12, 64, 64]],
),
"fused_reshape_add_multiply_fast_erf_multiply_add_multiply_reshape": (
12,
[[64, 3072], [3072], [64, 3072]],
),
"fused_nn_fast_softmax": (
12,
[[1, 12, 64, 64], [1, 12, 64, 64]],
),
"fused_reshape_add_reshape_transpose_reshape_1": (
24,
[[64, 768], [768], [12, 64, 64]],
),
"fused_reshape_divide_add": (
12,
[[12, 64, 64], [1, 1, 1, 64], [1, 12, 64, 64]],
),
"fused_reshape_transpose_reshape": (
12,
[[12, 64, 64], [64, 768]],
),
"fused_nn_dense_add_fast_tanh": (
1,
[[1, 768], [768, 768], [1, 768], [1, 768]],
),
"fused_cast_take_add": (
1,
[[1, 64], [30522, 768], [1, 64, 768], [1, 64, 768]],
),
"fused_take": (
1,
[[1, 64, 768], [1, 768]],
),
"fused_reshape": (
12,
[[1, 12, 64, 64], [12, 64, 64]],
),
"fused_reshape_1": (
24,
[[1, 64, 768], [64, 768]],
),
}
mod, params, _ = get_network(name="bert_base", input_shape=[1, 64])
extracted_tasks = ms.relay_integration.extract_tasks(mod, target="llvm", params=params)
assert len(extracted_tasks) == len(expected)
for t in extracted_tasks:
prim_func = None
for _, v in t.dispatched[0].functions.items():
prim_func = v
shape = [[int(x) for x in prim_func.buffer_map[b].shape] for b in prim_func.params]
assert t.task_name in expected
expected_weight, expected_shape = expected[t.task_name]
assert expected_weight == t.weight, t.task_name
assert expected_shape == shape, t.task_name
@pytest.mark.skip("Integration tests")
@pytest.mark.skipif(
platform.machine() == "aarch64",
reason="Currently torch.jit.trace fails on AArch64",
)
@tvm.testing.requires_package("torch")
def test_meta_schedule_integration_extract_from_resnet_with_filter_func():
@register_func("relay.backend.tir_converter.remove_purely_spatial", override=True)
def filter_func(args, _) -> bool:
from tvm.te import create_prim_func # pylint: disable=import-outside-toplevel
has_complex_op = False
visited = set()
def traverse(t):
nonlocal has_complex_op
assert t.handle is not None
if t.handle.value in visited:
return
if isinstance(t.op, te.PlaceholderOp):
pass
elif isinstance(t.op, te.ComputeOp):
has_complex_op = has_complex_op or any(isinstance(e, tir.Reduce) for e in t.op.body)
for x in t.op.input_tensors:
traverse(x)
visited.add(t.handle.value)
for t in args:
traverse(t)
if not has_complex_op:
return None
return create_prim_func(args)
mod, params, _ = get_network(name="resnet_18", input_shape=[1, 3, 224, 224])
extracted_tasks = ms.relay_integration.extract_tasks(
mod,
target="llvm",
params=params,
pass_config={
"relay.backend.use_meta_schedule": True,
"relay.backend.tir_converter": "remove_purely_spatial",
},
)
expected_task_names = [
"fused_" + s
for s in [
"nn_max_pool2d",
"nn_adaptive_avg_pool2d",
"nn_dense_add",
"nn_conv2d_add",
"nn_conv2d_add_1",
"nn_conv2d_add_2",
"nn_conv2d_add_add_nn_relu",
"nn_conv2d_add_add_nn_relu_1",
"nn_conv2d_add_nn_relu",
"nn_conv2d_add_nn_relu_1",
"nn_conv2d_add_nn_relu_2",
"nn_conv2d_add_nn_relu_3",
"nn_conv2d_add_nn_relu_4",
"nn_conv2d_add_nn_relu_5",
"nn_contrib_conv2d_winograd_without_weight_transform_add_add_nn_relu",
"nn_contrib_conv2d_winograd_without_weight_transform_add_add_nn_relu_1",
"nn_contrib_conv2d_winograd_without_weight_transform_add_nn_relu",
"nn_contrib_conv2d_winograd_without_weight_transform_add_nn_relu_1",
]
]
assert len(extracted_tasks) == len(expected_task_names)
for t in extracted_tasks:
assert t.task_name in expected_task_names, t.task_name
def extract_task_qbert(target, sch_rule_tag):
def _test(mod, params, target, sch_rule_tag):
extracted_tasks = ms.relay_integration.extract_tasks(mod, target, params)
tune_tasks = list(
filter(
lambda task: "dense" in task.task_name or "batch_matmul" in task.task_name,
extracted_tasks,
)
)
# three int8 dense, two int8 bmm, and one fp32 dense
assert len(tune_tasks) == 6
for task in tune_tasks:
relay_func = list(task.mod.functions.values())[0]
out_type = relay_func.body.checked_type
if out_type.dtype == "float32":
continue
sch = tvm.tir.Schedule(_normalize_mod(task.dispatched[0]))
block = sch.get_block("compute")
annotations = sch.get(block).annotations
assert "schedule_rule" in annotations
assert sch_rule_tag in annotations["schedule_rule"]
mod, params, _ = load_quantized_bert_base(batch_size=1, seq_len=128)
_test(mod, params, target=target, sch_rule_tag=sch_rule_tag)
@pytest.mark.skip("Too slow on CI")
def extract_task_qbert_vnni():
extract_task_qbert("llvm -mcpu=cascadelake", "vnni")
@pytest.mark.skip("Too slow on CI")
def extract_task_qbert_avx512():
extract_task_qbert("llvm -mcpu=skylake-avx512", "avx512")
@pytest.mark.skip("Integration tests")
@tvm.testing.skip_if_32bit(reason="Apparently the LLVM version on i386 image is too old")
def test_extract_task_arm_conv2d_nchwc():
data_shape = (1, 64, 128, 128)
weight_shape = (32, 64, 1, 1)
bias_shape = (weight_shape[0],)
padding = (1, 1)
data = relay.var("data", shape=data_shape, dtype="int8")
weight = relay.var("weight", shape=weight_shape, dtype="int8")
bias = relay.var("bias", shape=bias_shape, dtype="int32")
conv2d = relay.nn.conv2d(
data=data,
weight=weight,
kernel_size=weight_shape[2:],
channels=weight_shape[0],
padding=padding,
strides=(1, 1),
out_dtype="int32",
)
bias_add = relay.nn.bias_add(conv2d, bias)
relay_mod = tvm.IRModule.from_expr(bias_add)
weight_np = np.random.uniform(1, 10, size=weight_shape).astype("int8")
bias_np = np.random.uniform(1, 10, size=bias_shape).astype("int32")
params = {"weight": weight_np, "bias": bias_np}
target = "llvm -device arm_cpu -mtriple aarch64-linux-gnu -mattr=+neon"
extracted_tasks = ms.relay_integration.extract_tasks(relay_mod, target, params)
tune_tasks = list(
filter(
lambda task: "conv2d" in task.task_name,
extracted_tasks,
)
)
assert len(tune_tasks) == 1
relay_func = list(tune_tasks[0].mod.functions.values())[0]
out_type = relay_func.body.checked_type
# Check that the output is in NCHWc layout
assert list(out_type.shape) == [1, 8, 130, 130, 4]
@pytest.mark.skip("Integration tests")
def test_meta_schedule_te2primfunc_argument_order_and_lowering():
# pylint: disable=invalid-name,no-member,line-too-long,too-many-nested-blocks,no-self-argument
# fmt: off
@tvm.script.ir_module
class _fused_layout_transform:
@T.prim_func
def main( # type: ignore
placeholder: T.Buffer((T.int64(1), T.int64(3), T.int64(16), T.int64(16)), "float32"), # type: ignore
T_layout_trans: T.Buffer((T.int64(1), T.int64(1), T.int64(16), T.int64(16), T.int64(3)), "float32"), # type: ignore
) -> None: # type: ignore
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
# body
# with T.block("root")
for i0, i1, i2, i3, i4 in T.grid(T.int64(1), T.int64(1), T.int64(16), T.int64(16), T.int64(3)):
with T.block("T_layout_trans"):
ax0, ax1, ax2, ax3, ax4 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(placeholder[ax0, ax1 * T.int64(3) + ax4, ax2, ax3])
T.writes(T_layout_trans[ax0, ax1, ax2, ax3, ax4])
T.block_attr({"dst_layout": "NCHW3c", "input_shape": [1, 3, 16, 16], "schedule_rule": "None", "src_layout": "NCHW"})
T_layout_trans[ax0, ax1, ax2, ax3, ax4] = T.if_then_else(
ax0 < T.int64(1) and ax1 * T.int64(3) + ax4 < T.int64(3) and ax2 < T.int64(16) and ax3 < T.int64(16), # type: ignore
placeholder[ax0, ax1 * T.int64(3) + ax4, ax2, ax3],
T.float32(0),
dtype="float32",
)
@tvm.script.ir_module
class _fused_layout_transform_1:
@T.prim_func
def main(placeholder: T.Buffer((T.int64(1), T.int64(2), T.int64(16), T.int64(16), T.int64(4)), "float32"), T_layout_trans: T.Buffer((T.int64(1), T.int64(8), T.int64(16), T.int64(16)), "float32")) -> None: # type: ignore
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
# body
# with T.block("root")
for i0, i1, i2, i3 in T.grid(T.int64(1), T.int64(8), T.int64(16), T.int64(16)):
with T.block("T_layout_trans"):
ax0, ax1, ax2, ax3 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(placeholder[ax0, ax1 // T.int64(4), ax2, ax3, ax1 % T.int64(4)]) # type: ignore
T.writes(T_layout_trans[ax0, ax1, ax2, ax3])
T.block_attr({"dst_layout": "NCHW", "input_shape": [1, 2, 16, 16, 4], "schedule_rule": "None", "src_layout": "NCHW4c"})
T_layout_trans[ax0, ax1, ax2, ax3] = T.if_then_else(ax0 < T.int64(1) and ax1 < T.int64(8) and ax2 < T.int64(16) and ax3 < T.int64(16), placeholder[ax0, ax1 // T.int64(4), ax2, ax3, ax1 % T.int64(4)], T.float32(0), dtype="float32") # type: ignore
@tvm.script.ir_module
class _fused_nn_contrib_conv2d_NCHWc:
@T.prim_func
def main(placeholder: T.Buffer((T.int64(1), T.int64(1), T.int64(16), T.int64(16), T.int64(3)), "float32"), placeholder_1: T.Buffer((T.int64(2), T.int64(1), T.int64(5), T.int64(5), T.int64(3), T.int64(4)), "float32"), conv2d_NCHWc: T.Buffer((T.int64(1), T.int64(2), T.int64(16), T.int64(16), T.int64(4)), "float32")) -> None: # type: ignore
# function attr dict
T.func_attr({"global_symbol": "main", "tir.noalias": True})
# body
# with T.block("root")
data_pad = T.alloc_buffer([T.int64(1), T.int64(1), T.int64(20), T.int64(20), T.int64(3)], dtype="float32")
for i0, i1, i2, i3, i4 in T.grid(T.int64(1), T.int64(1), T.int64(20), T.int64(20), T.int64(3)):
with T.block("data_pad"):
i0_1, i1_1, i2_1, i3_1, i4_1 = T.axis.remap("SSSSS", [i0, i1, i2, i3, i4])
T.reads(placeholder[i0_1, i1_1, i2_1 - T.int64(2), i3_1 - T.int64(2), i4_1])
T.writes(data_pad[i0_1, i1_1, i2_1, i3_1, i4_1])
data_pad[i0_1, i1_1, i2_1, i3_1, i4_1] = T.if_then_else(T.int64(2) <= i2_1 and i2_1 < T.int64(18) and T.int64(2) <= i3_1 and i3_1 < T.int64(18), placeholder[i0_1, i1_1, i2_1 - T.int64(2), i3_1 - T.int64(2), i4_1], T.float32(0), dtype="float32") # type: ignore # pylint: disable=R1716
for i0, i1, i2, i3, i4, i5, i6, i7 in T.grid(T.int64(1), T.int64(2), T.int64(16), T.int64(16), T.int64(4), T.int64(3), T.int64(5), T.int64(5)):
with T.block("conv2d_NCHWc"):
n, oc_chunk, oh, ow, oc_block, ic, kh, kw = T.axis.remap("SSSSSRRR", [i0, i1, i2, i3, i4, i5, i6, i7])
T.reads(data_pad[n, ic // T.int64(3), oh + kh, ow + kw, ic % T.int64(3)], placeholder_1[oc_chunk, ic // T.int64(3), kh, kw, ic % T.int64(3), oc_block]) # type: ignore
T.writes(conv2d_NCHWc[n, oc_chunk, oh, ow, oc_block])
with T.init():
conv2d_NCHWc[n, oc_chunk, oh, ow, oc_block] = T.float32(0)
conv2d_NCHWc[n, oc_chunk, oh, ow, oc_block] = conv2d_NCHWc[n, oc_chunk, oh, ow, oc_block] + data_pad[n, ic // T.int64(3), oh + kh, ow + kw, ic % T.int64(3)] * placeholder_1[oc_chunk, ic // T.int64(3), kh, kw, ic % T.int64(3), oc_block] # type: ignore
# fmt: on
# pylint: enable=invalid-name,no-member,line-too-long,too-many-nested-blocks,no-self-argument
def _create_verification_database():
@ms.derived_object
class VerificationDatabase(ms.database.PyDatabase):
def __init__(self):
super().__init__()
self.tuning_records_: List[TuningRecord] = []
self.workloads_: List[Workload] = []
def has_workload(self, mod: IRModule) -> bool:
for workload in self.workloads_:
if tvm.ir.structural_equal(mod, workload.mod):
return True
# Note: The database has already put in all correct workloads
# This is where we can check if the workload is correct
raise ValueError(
"The workload searched for is not in given database!"
+ " Incorrect TIR was generated from TE subgraph."
)
def commit_workload(self, mod: IRModule) -> ms.database.Workload:
# No need to deduplicate workload because they are specified
workload = ms.database.Workload(mod)
self.workloads_.append(workload)
return workload
def commit_tuning_record(self, record: TuningRecord) -> None:
self.tuning_records_.append(record)
def get_all_tuning_records(self) -> List[TuningRecord]:
return self.tuning_records_
def get_top_k(self, workload: ms.database.Workload, top_k: int) -> List[TuningRecord]:
return sorted(
list(
filter(
lambda x: tvm.ir.structural_equal(workload.mod, x.workload.mod),
self.tuning_records_,
)
),
key=lambda x: sum(x.run_secs) / len(x.run_secs) if x.run_secs else 1e9,
)[:top_k]
def __len__(self) -> int:
return len(self.tuning_records_)
database = VerificationDatabase()
def _commit(mod):
workload = database.commit_workload(mod)
database.commit_tuning_record(
ms.database.TuningRecord(
tir.schedule.Trace([], {}),
workload=workload,
run_secs=[0.1],
)
)
_commit(_fused_layout_transform)
_commit(_fused_layout_transform_1)
_commit(_fused_nn_contrib_conv2d_NCHWc)
return database
data_shape = (1, 3, 16, 16)
weight_shape = (8, 3, 5, 5)
def _create_relay_mod():
data = relay.var("data", relay.TensorType(data_shape, "float32"))
weight = relay.var("weight", relay.TensorType(weight_shape, "float32"))
y = relay.nn.conv2d(
data,
weight,
padding=(2, 2),
kernel_size=(5, 5),
kernel_layout="OIHW",
out_dtype="float32",
)
f = relay.Function([data, weight], y)
mod = tvm.IRModule.from_expr(f)
mod = relay.transform.InferType()(mod)
return mod
mod = _create_relay_mod()
dev = tvm.cpu()
target = Target("llvm --num-cores=16")
params = {
"weight": np.random.rand(*weight_shape).astype("float32"),
}
data = tvm.nd.array(
np.random.rand(*data_shape).astype("float32"),
dev,
)
with (
target
), _create_verification_database(), PassContext( # pylint: disable=not-context-manager
opt_level=3,
config={
"relay.backend.use_meta_schedule": True,
"relay.backend.use_meta_schedule_dispatch": 7,
"relay.backend.tir_converter": "default",
},
):
rt_mod1 = relay.build(mod, target=target, params=params)
# Compile without meta-schedule for correctness check
with tvm.transform.PassContext(opt_level=0):
rt_mod2 = relay.build(mod, target=target, params=params)
def get_output(data, lib):
module = graph_executor.GraphModule(lib["default"](dev))
module.set_input("data", data)
module.run()
return module.get_output(0).numpy()
# Check correctness
actual_output = get_output(data, rt_mod1)
expected_output = get_output(data, rt_mod2)
assert np.allclose(actual_output, expected_output, rtol=1e-4, atol=2e-4)
@pytest.mark.skip("Integration tests")
def test_rewrite_layout_link_params():
I, O, H, W = 64, 64, 56, 56
kH = kW = 3
strides = (1, 1)
padding = (1, 1)
data_shape = (1, H, W, I)
w_shape = (kH, kW, I, O)
bias_shape = (1, 1, 1, O)
data = relay.var("data", shape=data_shape, dtype="float32")
weight = relay.var("weight1", shape=w_shape, dtype="float32")
bias = relay.var("bias", shape=bias_shape, dtype="float32")
conv = relay.nn.conv2d(
data=data,
weight=weight,
kernel_size=(kH, kW),
channels=O,
padding=padding,
strides=strides,
data_layout="NHWC",
kernel_layout="HWIO",
out_dtype="float32",
)
mod = tvm.IRModule.from_expr(conv + bias)
weight_np = np.random.randn(*w_shape).astype("float32")
bias_np = np.random.randn(*bias_shape).astype("float32")
params = {"weight1": weight_np, "bias": bias_np}
data_np = np.random.randn(*data_shape).astype("float32")
ref = (
relay.create_executor("graph", mod=mod, device=tvm.cpu(0), target="llvm")
.evaluate()(*[data_np, weight_np, bias_np])
.numpy()
)
link_params = True
target = "llvm --num-cores=4"
executor = relay.backend.Executor("graph", {"link-params": link_params})
mod = mod.with_attr("executor", executor)
for strategy in ["replay-trace", "evolutionary"]:
with tempfile.TemporaryDirectory() as work_dir:
database = ms.relay_integration.tune_relay(
mod=mod,
target=target,
params=params,
work_dir=work_dir,
max_trials_global=4,
strategy=strategy,
)
lib = ms.relay_integration.compile_relay(
database=database,
mod=mod,
target=target,
params=params,
)
dev = tvm.device(target, 0)
runtime = tvm.contrib.graph_executor.GraphModule(lib["default"](dev))
runtime.set_input("data", data_np)
runtime.run()
out = runtime.get_output(0).numpy()
np.testing.assert_allclose(ref, out, rtol=1e-4, atol=1e-4)
@pytest.mark.skip("Integration tests")
def test_module_equality_ignore_ndarray():
target = "llvm --num-cores=4"
data_shape = (128, 128)
weight_shape1 = (128, 128)
weight_shape2 = (128, 128)
data = relay.var("data", shape=data_shape, dtype="float32")
weight1 = relay.var("weight1", shape=weight_shape1, dtype="float32")
weight2 = relay.var("weight2", shape=weight_shape2, dtype="float32")
dense1 = relay.nn.dense(data, weight1)
dense2 = relay.nn.dense(dense1, weight2)
mod = tvm.IRModule.from_expr(dense2)
weight1_np = np.random.randn(*weight_shape1).astype("float32")
weight2_np = np.random.randn(*weight_shape2).astype("float32")
params = {"weight1": weight1_np, "weight2": weight2_np}
executor = relay.backend.Executor("graph", {"link-params": True})
mod = mod.with_attr("executor", executor)
# Without using ignore-ndarray for module equality, we get duplicated tasks
assert len(ms.relay_integration.extract_tasks(mod, target, params)) == 2
module_eqality = "ignore-ndarray"
extracted_tasks = ms.relay_integration.extract_tasks(
mod, target, params, module_equality=module_eqality
)
assert len(extracted_tasks) == 1
with tempfile.TemporaryDirectory() as work_dir:
tasks, task_weights = ms.relay_integration.extracted_tasks_to_tune_contexts(
extracted_tasks, work_dir, strategy="replay-trace"
)
database = ms.tune.tune_tasks(
tasks=tasks,
task_weights=task_weights,
work_dir=work_dir,
max_trials_global=4,
module_equality=module_eqality,
)
lib = ms.relay_integration.compile_relay(database, mod, target, params)
dev = tvm.device(target, 0)
runtime = tvm.contrib.graph_executor.GraphModule(lib["default"](dev))
data_np = np.random.randn(*data_shape).astype("float32")
runtime.set_input("data", data_np)
runtime.run()
out = runtime.get_output(0).numpy()
ref = np.dot(np.dot(data_np, weight1_np.transpose()), weight2_np.transpose())
np.testing.assert_allclose(ref, out, rtol=1e-4, atol=1e-4)
def _test_anchor_tuning(target, space):
data_shape = (128, 128)
weight_shape1 = (128, 128)
weight_shape2 = (128, 128)
data = relay.var("data", shape=data_shape, dtype="float32")
weight1 = relay.var("weight1", shape=weight_shape1, dtype="float32")
weight2 = relay.var("weight2", shape=weight_shape2, dtype="float32")
dense1 = relay.nn.dense(data, weight1)
dense2 = relay.nn.dense(dense1 + relay.const(1.0, dtype="float32"), weight2)
mod = tvm.IRModule.from_expr(dense2 - data + relay.const(1.0, dtype="float32"))
weight1_np = np.random.randn(*weight_shape1).astype("float32")
weight2_np = np.random.randn(*weight_shape2).astype("float32")
data_np = np.random.randn(*data_shape).astype("float32")
params = {"weight1": weight1_np, "weight2": weight2_np}
module_equality = "anchor-block"
extracted_tasks = ms.relay_integration.extract_tasks(
mod, target, params, module_equality=module_equality
)
assert len(extracted_tasks) == 1
with tempfile.TemporaryDirectory() as work_dir:
database = ms.relay_integration.tune_relay(
mod=mod,
target=target,
params=params,
work_dir=work_dir,
space=space,
max_trials_global=4,
strategy="replay-trace",
module_equality=module_equality,
num_tuning_cores=4,
)
lib = ms.relay_integration.compile_relay(database, mod, target, params)
dev = tvm.device(target, 0)
runtime = tvm.contrib.graph_executor.GraphModule(lib["default"](dev))
runtime.set_input("data", data_np)
runtime.run()
out = runtime.get_output(0).numpy()
ref = (
relay.create_executor("graph", mod=mod, device=tvm.cpu(0), target="llvm")
.evaluate()(*[data_np, weight1_np, weight2_np])
.numpy()
)
np.testing.assert_allclose(ref, out, atol=1e-3)
@pytest.mark.skip("Integration tests")
@pytest.mark.parametrize(
"space",
[
ms.space_generator.PostOrderApply(),
ms.space_generator.PostOrderApply(sch_rules=[], postprocs=[], mutator_probs={}),
],
)
def test_anchor_tuning_cpu(space):
_test_anchor_tuning("llvm --num-cores=4", space)
@pytest.mark.skip("Integration tests")
def test_anchor_tuning_cpu_link_params():
data_shape = (128, 128)
weight_shape1 = (128, 128)
weight_shape2 = (128, 128)
data = relay.var("data", shape=data_shape, dtype="float32")
weight1 = relay.var("weight1", shape=weight_shape1, dtype="float32")
weight2 = relay.var("weight2", shape=weight_shape2, dtype="float32")
dense1 = relay.nn.dense(data, weight1)
dense2 = relay.nn.dense(dense1, weight2)
mod = tvm.IRModule.from_expr(dense2 + relay.const(1.0, dtype="float32"))
weight1_np = np.random.randn(*weight_shape1).astype("float32")
weight2_np = np.random.randn(*weight_shape2).astype("float32")
data_np = np.random.randn(*data_shape).astype("float32")
params = {"weight1": weight1_np, "weight2": weight2_np}
module_equality = "anchor-block"
target = "llvm --num-cores=4"
executor = relay.backend.Executor("graph", {"link-params": True})
mod = mod.with_attr("executor", executor)
with tempfile.TemporaryDirectory() as work_dir:
database = ms.relay_integration.tune_relay(
mod=mod,
target=target,
params=params,
work_dir=work_dir,
max_trials_global=4,
strategy="replay-trace",
module_equality=module_equality,
)
lib = ms.relay_integration.compile_relay(database, mod, target, params)
dev = tvm.device(target, 0)
runtime = tvm.contrib.graph_executor.GraphModule(lib["default"](dev))
runtime.set_input("data", data_np)
runtime.run()
out = runtime.get_output(0).numpy()
ref = (
relay.create_executor("graph", mod=mod, device=tvm.cpu(0), target="llvm")
.evaluate()(*[data_np, weight1_np, weight2_np])
.numpy()
)
np.testing.assert_allclose(ref, out, atol=1e-3)
@pytest.mark.skip("Integration tests")
@pytest.mark.xfail(raises=tvm.error.TVMError)
def test_disabled_pass_param():
"""
Check 'disabled_pass' parameter in tune_relay. Should throw exception in
case of correct work.
"""
data_shape = [1, 4, 16, 16]
weight_shape = [32, 4, 2, 2]
data = relay.var("data", shape=data_shape, dtype="uint8")
weight = relay.var("weight", shape=weight_shape, dtype="int8")
op = relay.qnn.op.conv2d(
data,
weight,
input_zero_point=relay.const(0),
kernel_zero_point=relay.const(0),
input_scale=relay.const(0.7),
kernel_scale=relay.const(0.3),
kernel_size=[2, 2],
channels=32,
)
mod = tvm.IRModule.from_expr(op)
weight_np = np.random.randint(-10, 10, size=weight_shape).astype("int8")
params = {"weight": weight_np}
executor = relay.backend.Executor("graph", {"link-params": True})
mod = mod.with_attr("executor", executor)
with tempfile.TemporaryDirectory() as work_dir:
database = ms.relay_integration.tune_relay(
mod=mod,
target="llvm --num-cores=4",
params=params,
work_dir=work_dir,
max_trials_global=4,
strategy="replay-trace",
disabled_pass=["qnn.Legalize"],
)
# Test failed, otherwise we can not reach this point.
pytest.fail("'disabled_pass' argument does not work")
@pytest.mark.skip("Integration tests")
def test_rewrite_layout_link_params_1x1_conv2d():
I, O, H, W = 32, 16, 256, 256
kH = kW = 1
strides = (1, 1)
padding = (0, 0)
data_shape = (1, H, W, I)
w_shape = (kH, kW, I, O)
data = relay.var("data", shape=data_shape, dtype="float32")
weight = relay.var("weight", shape=w_shape, dtype="float32")
conv = relay.nn.conv2d(
data=data,
weight=weight,
kernel_size=(kH, kW),
channels=O,
padding=padding,
strides=strides,
data_layout="NHWC",
kernel_layout="HWIO",
out_dtype="float32",
)
mod = tvm.IRModule.from_expr(conv)
weight_np = np.random.randn(*w_shape).astype("float32")
params = {"weight": weight_np}
data_np = np.random.randn(*data_shape).astype("float32")
ref = (
relay.create_executor("graph", mod=mod, device=tvm.cpu(0), target="llvm")
.evaluate()(*[data_np, weight_np])
.numpy()
)
link_params = True
target = "llvm --num-cores=4"
executor = relay.backend.Executor("graph", {"link-params": link_params})
mod = mod.with_attr("executor", executor)
with tempfile.TemporaryDirectory() as work_dir:
database = ms.relay_integration.tune_relay(
mod=mod,
target=target,
params=params,
work_dir=work_dir,
max_trials_global=8,
strategy="replay-trace",
)
lib = ms.relay_integration.compile_relay(
database=database,
mod=mod,
target=target,
params=params,
)
dev = tvm.device(target, 0)
runtime = tvm.contrib.graph_executor.GraphModule(lib["default"](dev))
runtime.set_input("data", data_np)
runtime.run()
out = runtime.get_output(0).numpy()
np.testing.assert_allclose(ref, out, rtol=1e-4, atol=1e-4)
if __name__ == "__main__":
tvm.testing.main()
| 36,817 | 35.707876 | 347 | py |
tvm | tvm-main/tests/python/unittest/test_meta_schedule_cost_model.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-docstring
import os
import re
import shutil
import tempfile
import unittest
from functools import partial
from typing import List
import numpy as np
import tvm
import tvm.testing
from tvm.meta_schedule.cost_model import PyCostModel, RandomModel, XGBModel
from tvm.meta_schedule.cost_model.xgb_model import PackSum, _get_custom_call_back
from tvm.meta_schedule.feature_extractor import RandomFeatureExtractor
from tvm.meta_schedule.runner import RunnerResult
from tvm.meta_schedule.search_strategy import MeasureCandidate
from tvm.meta_schedule.tune_context import TuneContext
from tvm.meta_schedule.utils import derived_object
from tvm.script import tir as T
from tvm.tir.schedule.schedule import Schedule
# pylint: disable=invalid-name,no-member,line-too-long,too-many-nested-blocks,missing-docstring
@tvm.script.ir_module
class Matmul:
@T.prim_func
def main(a: T.handle, b: T.handle, c: T.handle) -> None: # pylint: disable=no-self-argument
T.func_attr({"global_symbol": "main", "tir.noalias": True})
A = T.match_buffer(a, (1024, 1024), "float32")
B = T.match_buffer(b, (1024, 1024), "float32")
C = T.match_buffer(c, (1024, 1024), "float32")
for i, j, k in T.grid(1024, 1024, 1024):
with T.block("matmul"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj]
@tvm.script.ir_module
class FullModule:
@T.prim_func
def main(T_full: T.Buffer((T.int64(2), T.int64(3)), "float32")):
T.func_attr({"global_symbol": "main", "tir.noalias": True})
for ax0, ax1 in T.grid(T.int64(2), T.int64(3)):
with T.block("T_full"):
v_ax0, v_ax1 = T.axis.remap("SS", [ax0, ax1])
T.reads()
T.writes(T_full[v_ax0, v_ax1])
T_full[v_ax0, v_ax1] = T.float32(1)
# pylint: enable=invalid-name,no-member,line-too-long,too-many-nested-blocks,disable=unused-argument
def test_meta_schedule_cost_model():
@derived_object
class FancyCostModel(PyCostModel):
def load(self, path: str) -> None:
pass
def save(self, path: str) -> None:
pass
def update(
self,
context: TuneContext,
candidates: List[MeasureCandidate],
results: List[RunnerResult],
) -> None:
pass
def predict(self, context: TuneContext, candidates: List[MeasureCandidate]) -> np.ndarray:
return np.random.rand(10)
model = FancyCostModel()
model.save("fancy_test_location")
model.load("fancy_test_location")
model.update(TuneContext(), [], [])
results = model.predict(
TuneContext(), [MeasureCandidate(Schedule(mod=Matmul), []) for _ in range(10)]
)
assert results.shape == (10,)
def test_meta_schedule_cost_model_as_string():
@derived_object
class NotSoFancyCostModel(PyCostModel):
def load(self, path: str) -> None:
pass
def save(self, path: str) -> None:
pass
def update(
self,
context: TuneContext,
candidates: List[MeasureCandidate],
results: List[RunnerResult],
) -> None:
pass
def predict(self, context: TuneContext, candidates: List[MeasureCandidate]) -> np.ndarray:
return np.random.rand(10)
cost_model = NotSoFancyCostModel()
pattern = re.compile(r"meta_schedule.NotSoFancyCostModel\(0x[a-f|0-9]*\)")
assert pattern.match(str(cost_model))
def test_meta_schedule_random_model():
model = RandomModel()
model.update(TuneContext(), [], [])
res = model.predict(TuneContext(), [MeasureCandidate(Schedule(Matmul), []) for i in range(10)])
assert len(res) == 10
assert min(res) >= 0 and max(res) <= model.max_range
def test_meta_schedule_random_model_reseed():
model = RandomModel(seed=100)
res = model.predict(TuneContext(), [MeasureCandidate(Schedule(Matmul), []) for i in range(20)])
new_model = RandomModel(seed=100)
new_res = new_model.predict(
TuneContext(), [MeasureCandidate(Schedule(Matmul), []) for i in range(20)]
)
assert (res == new_res).all()
def test_meta_schedule_random_model_reload():
model = RandomModel(seed=25973)
model.predict(
TuneContext(), [MeasureCandidate(Schedule(Matmul), []) for i in range(30)]
) # change state
path = os.path.join(tempfile.mkdtemp(), "test_output_meta_schedule_random_model.npy")
model.save(path)
res1 = model.predict(TuneContext(), [MeasureCandidate(Schedule(Matmul), []) for i in range(70)])
model.load(path)
res2 = model.predict(TuneContext(), [MeasureCandidate(Schedule(Matmul), []) for i in range(70)])
shutil.rmtree(os.path.dirname(path))
assert (res1 == res2).all()
def _dummy_candidate():
return MeasureCandidate(Schedule(Matmul), [])
def _dummy_result(num_samples: int = 4, max_run_sec: int = 10):
return RunnerResult(list(np.random.rand(num_samples) * max_run_sec + 1e-6), None)
def test_meta_schedule_xgb_model():
extractor = RandomFeatureExtractor()
model = XGBModel(extractor=extractor, num_warmup_samples=2)
update_sample_count = 10
predict_sample_count = 100
model.update(
TuneContext(),
[_dummy_candidate() for i in range(update_sample_count)],
[_dummy_result() for i in range(update_sample_count)],
)
model.predict(TuneContext(), [_dummy_candidate() for i in range(predict_sample_count)])
def test_meta_schedule_xgb_model_no_feature():
model = XGBModel(num_warmup_samples=0)
tune_ctx = TuneContext(
FullModule,
target="llvm --num-cores 16",
space_generator="post-order-apply",
search_strategy="evolutionary",
)
candidate = MeasureCandidate(Schedule(FullModule), [])
model.update(tune_ctx, [candidate], [_dummy_result()])
model.predict(tune_ctx, [candidate])
def test_meta_schedule_xgb_model_reload():
extractor = RandomFeatureExtractor()
model = XGBModel(extractor=extractor, num_warmup_samples=10)
update_sample_count = 20
predict_sample_count = 30
model.update(
TuneContext(),
[_dummy_candidate() for i in range(update_sample_count)],
[_dummy_result() for i in range(update_sample_count)],
)
model.predict(TuneContext(), [_dummy_candidate() for i in range(predict_sample_count)])
with tempfile.NamedTemporaryFile() as path:
# Backup
random_state = model.extractor.random_state # save feature extractor's random state
old_data = model.data
old_data_size = model.data_size
model.save(path.name)
res1 = model.predict(
TuneContext(), [_dummy_candidate() for i in range(predict_sample_count)]
)
# Load
model.extractor.random_state = random_state # load feature extractor's random state
model.load(path.name)
new_data = model.data
new_data_size = model.data_size
res2 = model.predict(
TuneContext(), [_dummy_candidate() for i in range(predict_sample_count)]
)
assert (res1 == res2).all()
assert old_data_size == new_data_size
assert len(old_data) == len(new_data)
for (k1, g1), (k2, g2) in zip( # pylint: disable=invalid-name
old_data.items(), new_data.items()
):
assert k1 == k2
assert k1 == g1.group_hash
assert k2 == g2.group_hash
assert (g1.costs == g2.costs).all()
assert len(g1.features) == len(g2.features)
for f1, f2 in zip(g1.features, g2.features): # pylint: disable=invalid-name
assert (f1 == f2).all()
def test_meta_schedule_xgb_model_reupdate():
extractor = RandomFeatureExtractor()
model = XGBModel(extractor=extractor, num_warmup_samples=2)
update_sample_count = 60
predict_sample_count = 100
model.update(
TuneContext(),
[_dummy_candidate() for i in range(update_sample_count)],
[_dummy_result() for i in range(update_sample_count)],
)
model.update(
TuneContext(),
[_dummy_candidate() for i in range(update_sample_count)],
[_dummy_result() for i in range(update_sample_count)],
)
model.update(
TuneContext(),
[_dummy_candidate() for i in range(update_sample_count)],
[_dummy_result() for i in range(update_sample_count)],
)
model.predict(TuneContext(), [_dummy_candidate() for i in range(predict_sample_count)])
def xgb_version_check():
# pylint: disable=import-outside-toplevel
import xgboost as xgb
from packaging import version
# pylint: enable=import-outside-toplevel
return version.parse(xgb.__version__) >= version.parse("1.6.0")
@unittest.skipIf(xgb_version_check(), "test not supported for xgboost version after 1.6.0")
def test_meta_schedule_xgb_model_callback_as_function():
# pylint: disable=import-outside-toplevel
from itertools import chain as itertools_chain
import xgboost as xgb
# pylint: enable=import-outside-toplevel
extractor = RandomFeatureExtractor()
model = XGBModel(extractor=extractor, num_warmup_samples=10)
update_sample_count = 20
predict_sample_count = 30
model.update(
TuneContext(),
[_dummy_candidate() for i in range(update_sample_count)],
[_dummy_result() for i in range(update_sample_count)],
)
model.predict(TuneContext(), [_dummy_candidate() for i in range(predict_sample_count)])
with tempfile.NamedTemporaryFile() as path:
# Backup and train on new TrainingCallBack api
random_state = model.extractor.random_state # save feature extractor's random state
model.save(path.name)
old_booster = model.booster
xs = [ # pylint: disable=invalid-name
x.numpy().astype("float32")
for x in extractor.extract_from(
TuneContext(),
[_dummy_candidate() for i in range(predict_sample_count)],
)
]
d_test = PackSum(xs=xs, ys=None)
pred1 = old_booster.predict(d_test.dmatrix)
# Load and train on deprecated TrainingCallBack api
model.extractor.random_state = random_state # load feature extractor's random state
model.load(path.name)
d_train = PackSum(
xs=list(itertools_chain.from_iterable([g.features for g in model.data.values()])),
ys=np.concatenate(
[g.min_cost / g.costs for g in model.data.values()],
axis=0,
),
)
def obj(ys_pred: np.ndarray, d_train1: "xgb.DMatrix"): # type: ignore # pylint: disable = unused-argument
return d_train.obj_square_error(ys_pred)
def rmse(ys_pred: np.ndarray, d_train1: "xgb.DMatrix"): # type: ignore # pylint: disable = unused-argument
return d_train.rmse(ys_pred)
def avg_peak_score(ys_pred: np.ndarray, d_train1: "xgb.DMatrix"): # type: ignore # pylint: disable = unused-argument
return d_train.average_peak_score(ys_pred, model.average_peak_n)
new_booster = xgb.train(
model.config.to_dict(),
d_train.dmatrix,
num_boost_round=10000,
obj=obj,
callbacks=[
partial(
_get_custom_call_back(
early_stopping_rounds=model.early_stopping_rounds,
verbose_eval=model.verbose_eval,
fevals=[rmse, avg_peak_score],
evals=[(d_train.dmatrix, "tr")],
cvfolds=None,
)
)
],
)
xs = [ # pylint: disable=invalid-name
x.numpy().astype("float32")
for x in extractor.extract_from(
TuneContext(),
[_dummy_candidate() for i in range(predict_sample_count)],
)
]
d_test = PackSum(xs=xs, ys=None)
pred2 = new_booster.predict(d_test.dmatrix)
assert np.allclose(pred1, pred2, rtol=1e-3, atol=1e-3)
if __name__ == "__main__":
tvm.testing.main()
| 13,087 | 35.355556 | 125 | py |
tvm | tvm-main/tests/python/unittest/test_custom_datatypes.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for the Bring Your Own Datatype framework.
TODO(@gussmith23 @hypercubestart) link to documentation"""
import numpy as np
import pytest
import tvm
import tvm.topi.testing
import tvm.testing
from tvm import relay
from tvm.relay.testing.layers import batch_norm_infer
from tvm.target.datatype import (
create_lower_func,
create_min_lower_func,
lower_call_pure_extern,
lower_ite,
register,
register_min_func,
register_op,
)
from tvm.tir.op import call_pure_extern
from tvm.script import tir as T
# note: we can't use relay.testing models because params are randomly initialized,
# which lead the output to have the same values
# get mobilenet model from Gluon CV
# because: https://discuss.tvm.apache.org/t/mobilenet-intermediate-values-are-0/7812
def get_mobilenet():
dshape = (1, 3, 224, 224)
from mxnet.gluon.model_zoo.vision import get_model
block = get_model("mobilenet0.25", pretrained=True)
shape_dict = {"data": dshape}
return relay.frontend.from_mxnet(block, shape_dict)
# use real image instead of random data for end-to-end model training
# or else output would all be around the same value
def get_cat_image(dimensions):
from PIL import Image
from tvm.contrib.download import download_testdata
url = "https://gist.githubusercontent.com/zhreshold/bcda4716699ac97ea44f791c24310193/raw/fa7ef0e9c9a5daea686d6473a62aacd1a5885849/cat.png"
dst = "cat.png"
real_dst = download_testdata(url, dst, module="data")
img = Image.open(real_dst).resize(dimensions)
# CoreML's standard model image format is BGR
img_bgr = np.array(img)[:, :, ::-1]
img = np.transpose(img_bgr, (2, 0, 1))[np.newaxis, :]
return np.asarray(img, dtype="float32")
# we use a random seed to generate input_data
# to guarantee stable tests
np.random.seed(0)
def convert_ndarray(dst_dtype, array):
"""Converts NDArray(s) into the specified datatype"""
x = relay.var("x", shape=array.shape, dtype=str(array.dtype))
cast = relay.Function([x], x.astype(dst_dtype))
with tvm.transform.PassContext(config={"tir.disable_vectorize": True}):
return relay.create_executor("graph").evaluate(cast)(array)
def change_dtype(src, dst, module, params):
"""Convert constants and functions in module from src type to dst type.
Returns changed module and converted params of type dst_type.
"""
module = relay.frontend.ChangeDatatype(src, dst)(module)
module = relay.transform.InferType()(module)
params = {k: convert_ndarray(dst, v) for k, v in params.items()}
return module, params
def compare(module, input, src_dtype, dst_dtype, rtol, atol, params={}, target="llvm"):
module = relay.transform.InferType()(module)
module = relay.transform.SimplifyInference()(module)
correct = relay.create_executor("graph", mod=module).evaluate()(*input, **params)
module, converted_params = change_dtype(src_dtype, dst_dtype, module, params)
# converts all inputs to dst_dtype
x_converted = [convert_ndarray(dst_dtype, arr) for arr in input]
# Vectorization is not implemented with custom datatypes
with tvm.transform.PassContext(config={"tir.disable_vectorize": True}):
maybe_correct = relay.create_executor("graph", mod=module, target=target).evaluate()(
*x_converted, **converted_params
)
# currently this only works for comparing single output
maybe_correct_converted = convert_ndarray(src_dtype, maybe_correct)
np.testing.assert_allclose(
maybe_correct_converted.numpy(), correct.numpy(), rtol=rtol, atol=atol
)
def setup_myfloat():
"""Set up tests for myfloat (a custom datatype that under the hood is float)
Currently, this registers some custom datatypes using the Bring Your
Own Datatypes framework.
"""
def _setup_myfloat_inner():
# To use datatype operations in an external library, you should first load
# the library containing the datatype implementation:
# CDLL("libposit.so", RTLD_GLOBAL)
# In this case, the datatype library we are using is built right into TVM,
# so we do not need to explicitly load any library.
# You can pick a code for your datatype arbitrarily, as long as it is
# greater than 128 and has not already been chosen.
register("myfloat", 131)
register_op(
create_lower_func({(32, 32): "FloatToCustom32"}), "Cast", "llvm", "float", "myfloat"
)
register_op(
create_lower_func({(32, 32): "Custom32ToFloat"}), "Cast", "llvm", "myfloat", "float"
)
register_op(create_lower_func({32: "Custom32Add"}), "Add", "llvm", "myfloat")
register_op(
create_lower_func(
{
32: "Custom32Sub",
}
),
"Sub",
"llvm",
"myfloat",
)
register_op(create_lower_func({32: "Custom32Mul"}), "Mul", "llvm", "myfloat")
register_op(
create_lower_func(
{
32: "FloatToCustom32",
}
),
"FloatImm",
"llvm",
"myfloat",
)
register_op(
create_lower_func(
{
32: "Custom32Div",
}
),
"Div",
"llvm",
"myfloat",
)
register_op(create_lower_func({32: "Custom32Max"}), "Max", "llvm", "myfloat")
register_op(
create_lower_func({32: "Custom32Sqrt"}),
"Call",
"llvm",
"myfloat",
intrinsic_name="tir.sqrt",
)
register_op(
create_lower_func({32: "Custom32Exp"}),
"Call",
"llvm",
"myfloat",
intrinsic_name="tir.exp",
)
register_op(
create_lower_func({32: "Custom32Log"}),
"Call",
"llvm",
"myfloat",
intrinsic_name="tir.log",
)
register_op(
create_lower_func({32: "Custom32Sigmoid"}),
"Call",
"llvm",
"myfloat",
intrinsic_name="tir.sigmoid",
)
register_op(
create_lower_func({32: "Custom32Tanh"}),
"Call",
"llvm",
"myfloat",
intrinsic_name="tir.tanh",
)
register_op(lower_ite, "Call", "llvm", "myfloat", intrinsic_name="tir.if_then_else")
register_op(
lower_call_pure_extern, "Call", "llvm", "myfloat", intrinsic_name="tir.call_pure_extern"
)
register_min_func(create_min_lower_func({32: "MinCustom32"}, "myfloat"), "myfloat")
try:
_setup_myfloat_inner()
except tvm._ffi.base.TVMError as e:
# Ignore this specific error which can happen if another test
# that uses "myfloat" has already run.
if "float is already registered" not in str(e):
raise e
def setup_posites2():
"""Set up tests for posites2
Currently, this registers some custom datatypes using the Bring Your
Own Datatypes framework.
"""
# To use datatype operations in an external library, you should first load
# the library containing the datatype implementation:
# CDLL("libposit.so", RTLD_GLOBAL)
# In this case, the datatype library we are using is built right into TVM,
# so we do not need to explicitly load any library.
# You can pick a code for your datatype arbitrarily, as long as it is
# greater than 128 and has not already been chosen.
register("posites2", 132)
register_op(
create_lower_func(
{
(32, 32): "FloatToPosit32es2",
(32, 16): "FloatToPosit16es2",
(32, 8): "FloatToPosit8es2",
}
),
"Cast",
"llvm",
"float",
"posites2",
)
register_op(
create_lower_func(
{
(32, 32): "Posit32es2ToFloat",
(16, 32): "Posit16es2ToFloat",
(8, 32): "Posit8es2ToFloat",
}
),
"Cast",
"llvm",
"posites2",
"float",
)
register_op(
create_lower_func({32: "Posit32es2Add", 16: "Posit16es2Add", 8: "Posit8es2Add"}),
"Add",
"llvm",
"posites2",
)
register_op(
create_lower_func({32: "Posit32es2Sub", 16: "Posit16es2Sub", 8: "Posit8es2Sub"}),
"Sub",
"llvm",
"posites2",
)
register_op(
create_lower_func(
{32: "FloatToPosit32es2", 16: "FloatToPosit16es2", 8: "FloatToPosit8es2"}
),
"FloatImm",
"llvm",
"posites2",
)
register_op(
create_lower_func({32: "Posit32es2Mul", 16: "Posit16es2Mul", 8: "Posit8es2Mul"}),
"Mul",
"llvm",
"posites2",
)
register_op(
create_lower_func({32: "Posit32es2Div", 16: "Posit16es2Div", 8: "Posit8es2Div"}),
"Div",
"llvm",
"posites2",
)
register_op(
create_lower_func({32: "Posit32es2Max", 16: "Posit16es2Max", 8: "Posit8es2Max"}),
"Max",
"llvm",
"posites2",
)
register_op(
create_lower_func({32: "Posit32es2Sqrt", 16: "Posit16es2Sqrt", 8: "Posit8es2Sqrt"}),
"Call",
"llvm",
"posites2",
intrinsic_name="tir.sqrt",
)
register_op(lower_ite, "Call", "llvm", "posites2", intrinsic_name="tir.if_then_else")
register_op(
lower_call_pure_extern, "Call", "llvm", "posites2", intrinsic_name="tir.call_pure_extern"
)
register_op(
create_lower_func({32: "Posit32es2Exp", 16: "Posit16es2Exp", 8: "Posit8es2Exp"}),
"Call",
"llvm",
"posites2",
intrinsic_name="tir.exp",
)
register_op(
create_lower_func({32: "Posit32es2Log", 16: "Posit16es2Log", 8: "Posit8es2Log"}),
"Call",
"llvm",
"posites2",
intrinsic_name="tir.log",
)
register_op(
create_lower_func(
{32: "Posit32es2Sigmoid", 16: "Posit16es2Sigmoid", 8: "Posit8es2Sigmoid"}
),
"Call",
"llvm",
"posites2",
intrinsic_name="tir.sigmoid",
)
register_op(
create_lower_func({32: "Posit32es2Tanh", 16: "Posit16es2Tanh", 8: "Posit8es2Tanh"}),
"Call",
"llvm",
"posites2",
intrinsic_name="tir.tanh",
)
register_min_func(
create_min_lower_func(
{32: "MinPosit32es2", 16: "MinPosit16es2", 8: "MinPosit8es2"}, "posites2"
),
"posites2",
)
def run_ops(src_dtype, dst_dtype, rtol=1e-7, atol=1e-7):
"""Run the same op, but with two different datatypes"""
# used for unary ops, first shape in binary ops
shape1 = (5, 10, 5)
# second shape for binary ops
shape2 = (5,)
def check_unary_op(op, src_dtype, dst_dtype, shape):
t1 = relay.TensorType(shape, src_dtype)
x = relay.var("x", t1)
z = op(x)
x_data = np.random.rand(*shape).astype(t1.dtype)
module = tvm.IRModule.from_expr(relay.Function([x], z))
compare(module, (x_data,), src_dtype, dst_dtype, rtol, atol)
# test unary ops
for op in [
relay.nn.softmax,
tvm.relay.log,
tvm.relay.exp,
tvm.relay.sqrt,
tvm.relay.rsqrt,
tvm.relay.sigmoid,
tvm.relay.tanh,
relay.nn.relu,
relay.nn.batch_flatten,
]:
check_unary_op(op, src_dtype, dst_dtype, shape1)
# test unary ops over 4d data
for op in [relay.nn.max_pool2d, relay.nn.avg_pool2d, relay.nn.global_avg_pool2d]:
shape_2d = (3, 32, 32, 32)
check_unary_op(op, src_dtype, dst_dtype, shape_2d)
def check_binary_op(opfunc, src_dtype, dst_dtype):
t1 = relay.TensorType(shape1, src_dtype)
t2 = relay.TensorType(shape2, src_dtype)
x = relay.var("x", t1)
y = relay.var("y", t2)
z = opfunc(x, y)
x_data = np.random.rand(*shape1).astype(t1.dtype)
y_data = np.random.rand(*shape2).astype(t2.dtype)
module = tvm.IRModule.from_expr(relay.Function([x, y], z))
compare(module, (x_data, y_data), src_dtype, dst_dtype, rtol, atol)
for op in [
relay.add,
relay.subtract,
relay.divide,
relay.multiply,
]:
check_binary_op(op, src_dtype, dst_dtype)
# we would like to test tvm_if_then_else
# but Relay.IfNode is not lowered to this intrinsic,
# so to keep our tests consistent with relay, we decide to not unit test
# Note: tvm_if_then_else is tested as part of the mobile_net model
def run_model(get_workload, input, src_dtype, dst_dtype, rtol=1e-4, atol=1e-4):
module, params = get_workload()
# we don't generate random data here
# because then the output data would all be around the same value
compare(module, input, src_dtype, dst_dtype, rtol, atol, params)
def run_conv2d(src_dtype, dst_dtype, rtol=1e-7, atol=1e-4):
def run_test_conv2d(
src_dtype,
dst_dtype,
scale,
dshape,
kshape,
padding=(1, 1),
groups=1,
dilation=(1, 1),
**attrs,
):
x = relay.var("x", shape=dshape, dtype=src_dtype)
w = relay.var("w", shape=kshape, dtype=src_dtype)
y = relay.nn.conv2d(x, w, padding=padding, dilation=dilation, groups=groups, **attrs)
module = tvm.IRModule.from_expr(relay.Function([x, w], y))
data = np.random.uniform(-scale, scale, size=dshape).astype(src_dtype)
kernel = np.random.uniform(-scale, scale, size=kshape).astype(src_dtype)
compare(module, (data, kernel), src_dtype, dst_dtype, rtol, atol)
# depthwise conv2d
dshape = (1, 32, 18, 18)
kshape = (32, 1, 3, 3)
run_test_conv2d(
src_dtype,
dst_dtype,
1,
dshape,
kshape,
padding=(1, 1),
channels=32,
groups=32,
kernel_size=(3, 3),
)
# CUDA is disabled for 'direct' schedule:
# https://github.com/dmlc/tvm/pull/3070#issuecomment-486597553
# group conv2d
dshape = (1, 32, 18, 18)
kshape = (32, 4, 3, 3)
run_test_conv2d(
src_dtype,
dst_dtype,
1,
dshape,
kshape,
padding=(1, 1),
channels=32,
groups=8,
kernel_size=(3, 3),
)
# also group conv2d
dshape = (1, 32, 18, 18)
kshape = (64, 1, 3, 3)
run_test_conv2d(
src_dtype,
dst_dtype,
1,
dshape,
kshape,
padding=(1, 1),
channels=64,
groups=32,
kernel_size=(3, 3),
)
# normal conv2d
dshape = (1, 3, 224, 224)
kshape = (10, 3, 3, 3)
run_test_conv2d(
src_dtype, dst_dtype, 1, dshape, kshape, padding=(1, 1), channels=10, kernel_size=(3, 3)
)
# dilated conv2d
dshape = (1, 3, 18, 18)
kshape = (10, 3, 3, 3)
run_test_conv2d(
src_dtype,
dst_dtype,
1,
dshape,
kshape,
padding=(1, 1),
channels=10,
kernel_size=(3, 3),
dilation=(3, 3),
)
def run_batchnorm(src_dtype, dst_dtype, rtol=1e-6, atol=1e-6):
shape = (3, 32, 32)
t = relay.TensorType(shape, src_dtype)
x = relay.var("x", t)
bn = batch_norm_infer(data=x, epsilon=2e-5, scale=False, name="bn_x")
f = relay.Function(relay.analysis.free_vars(bn), bn)
x_data = np.random.rand(*shape).astype(t.dtype)
module = tvm.IRModule.from_expr(f)
zero_data = np.zeros((32), "float32")
compare(
module,
(x_data, zero_data, zero_data, zero_data, zero_data),
src_dtype,
dst_dtype,
rtol,
atol,
)
def test_myfloat():
setup_myfloat()
run_ops("float32", "custom[myfloat]32", rtol=1e-6, atol=1e-6)
run_conv2d("float32", "custom[myfloat]32", rtol=1e-6, atol=1e-6)
run_batchnorm("float32", "custom[myfloat]32", rtol=1e-6, atol=1e-6)
# mxnet python package not available
# run_model(get_mobilenet, (get_cat_image((224, 224)), ),
# 'float32',
# 'custom[myfloat]32')
class TestMyfloatLowering(tvm.testing.CompareBeforeAfter):
setup_myfloat()
transform = tvm.tir.transform.LowerCustomDatatypes()
def before(self):
dtype = "custom[myfloat]32"
@T.prim_func
def func(A_data: T.handle(dtype)):
T.func_attr({"target": T.target("llvm")})
A = T.Buffer(16, dtype=dtype, data=A_data)
B_data = T.allocate([16], dtype=dtype)
B = T.Buffer(16, dtype=dtype, data=B_data)
for i in range(16):
B[i] = A[i] + 1.0
return func
def expected(self):
dtype = "custom[myfloat]32"
@T.prim_func
def func(A_data: T.handle(dtype)):
T.func_attr({"target": T.target("llvm")})
A_uint32 = T.Buffer(16, "uint32", data=A_data)
B_data = T.allocate([16], dtype="uint32")
B_uint32 = T.Buffer(16, "uint32", data=B_data)
for i in range(16):
B_uint32[i] = T.call_pure_extern(
"uint32",
"FloatToCustom32",
T.call_pure_extern("float32", "Custom32ToFloat", A_uint32[i]) + T.float32(1),
)
return func
class TestMyfloatLoweringDeclBuffer(tvm.testing.CompareBeforeAfter):
"""Like TestMyfloatLoweringDeclBuffer, but using DeclBuffer"""
setup_myfloat()
transform = tvm.tir.transform.LowerCustomDatatypes()
def before(self):
dtype = "custom[myfloat]32"
@T.prim_func
def func(A_data: T.handle(dtype)):
T.func_attr({"target": T.target("llvm")})
A = T.decl_buffer(16, dtype=dtype, data=A_data)
B = T.decl_buffer(16, dtype=dtype)
for i in range(16):
B[i] = A[i] + 1.0
return func
def expected(self):
dtype = "custom[myfloat]32"
@T.prim_func
def func(A_data: T.handle(dtype)):
T.func_attr({"target": T.target("llvm")})
A_uint32 = T.decl_buffer(16, "uint32", data=A_data)
B_uint32 = T.decl_buffer(16, dtype="uint32")
for i in range(16):
B_uint32[i] = T.call_pure_extern(
"uint32",
"FloatToCustom32",
T.call_pure_extern("float32", "Custom32ToFloat", A_uint32[i]) + T.float32(1),
)
return func
def _has_posit():
return tvm.support.libinfo()["USE_BYODT_POSIT"] == "ON"
@pytest.mark.skipif(not _has_posit(), reason="compiled with USE_BYODT_POSIT flag OFF")
def test_posites2():
setup_posites2()
run_ops("float32", "custom[posites2]8", rtol=1, atol=1)
run_ops("float32", "custom[posites2]16", rtol=0.01, atol=1)
run_ops("float32", "custom[posites2]32", rtol=1e-6, atol=1e-6)
run_conv2d("float32", "custom[posites2]8", rtol=1, atol=1)
run_conv2d("float32", "custom[posites2]16", rtol=0.01, atol=1)
run_conv2d("float32", "custom[posites2]32")
run_batchnorm("float32", "custom[posites2]8", rtol=1, atol=1)
run_batchnorm("float32", "custom[posites2]16", rtol=0.01, atol=1)
run_batchnorm("float32", "custom[posites2]32", rtol=1e-4, atol=1e-4)
# Expected posit8 might be faster, but it's not.
# run_model(get_mobilenet, (get_cat_image((224, 224)), ), 'float32', 'custom[posit8]8')
# run_model(get_mobilenet, (get_cat_image((224, 224)), ), 'float32', 'custom[posit32]32')
# run_model(get_inception, (get_cat_image((229, 229)), ), 'float32', 'custom[posit32]32')
# run_model(get_resnet, (get_cat_image((224, 224)), ), 'float32', 'custom[posit32]32')
# can't run cifar-10 sizes because dimensions
# don't match pretrained weights
# runs on the order of minutes...
# run_model(get_inception, (get_cat_image((229, 229)), ),
# 'float32',
# 'custom[posites2]32')
# run_model(get_resnet, (get_cat_image((224, 224)), ),
# 'float32',
# 'custom[posites2]32')
if __name__ == "__main__":
tvm.testing.main()
| 21,123 | 31.006061 | 142 | py |
tvm | tvm-main/tests/python/unittest/test_runtime_module_based_interface.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import os
from tvm import relay, runtime
from tvm.relay import testing
import tvm
from tvm.contrib import graph_executor
from tvm.contrib.debugger import debug_executor
from tvm.contrib.cuda_graph import cuda_graph_executor
import tvm.testing
def input_shape(mod):
return [int(x) for x in mod["main"].checked_type.arg_types[0].shape]
def verify(data):
if not tvm.runtime.enabled("llvm"):
print("Skip because llvm is not enabled")
return
mod, params = relay.testing.synthetic.get_workload()
with relay.build_config(opt_level=3):
graph, lib, graph_params = relay.build_module.build(mod, "llvm", params=params)
dev = tvm.cpu()
module = graph_executor.create(graph, lib, dev)
module.set_input("data", data)
module.set_input(**graph_params)
module.run()
out = module.get_output(0).numpy()
return out
@tvm.testing.requires_llvm
def test_legacy_compatibility():
mod, params = relay.testing.synthetic.get_workload()
with relay.build_config(opt_level=3):
graph, lib, graph_params = relay.build_module.build(mod, "llvm", params=params)
data = np.random.uniform(-1, 1, size=input_shape(mod)).astype("float32")
dev = tvm.cpu()
module = graph_executor.create(graph, lib, dev)
module.set_input("data", data)
module.set_input(**graph_params)
module.run()
out = module.get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
@tvm.testing.requires_llvm
def test_cpu():
mod, params = relay.testing.synthetic.get_workload()
with relay.build_config(opt_level=3):
complied_graph_lib = relay.build_module.build(mod, "llvm", params=params)
data = np.random.uniform(-1, 1, size=input_shape(mod)).astype("float32")
# raw api
dev = tvm.cpu()
gmod = complied_graph_lib["default"](dev)
set_input = gmod["set_input"]
run = gmod["run"]
get_output = gmod["get_output"]
set_input("data", tvm.nd.array(data))
run()
out = get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
# graph executor wrapper
gmod = graph_executor.GraphModule(complied_graph_lib["default"](dev))
gmod.set_input("data", data)
gmod.run()
out = gmod.get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
@tvm.testing.requires_llvm
def test_cpu_get_graph_json():
mod, params = relay.testing.synthetic.get_workload()
with relay.build_config(opt_level=3):
complied_graph_lib = relay.build_module.build(mod, "llvm", params=params)
from tvm.contrib import utils
temp = utils.tempdir()
file_name = "deploy_lib.so"
path_lib = temp.relpath(file_name)
complied_graph_lib.export_library(path_lib)
loaded_lib = tvm.runtime.load_module(path_lib)
json = loaded_lib["get_graph_json"]()
assert isinstance(json, str) == True
assert json.find("tvmgen_default_fused_nn_softmax_add") > -1
@tvm.testing.requires_llvm
def test_cpu_get_graph_params_run():
mod, params = relay.testing.synthetic.get_workload()
with tvm.transform.PassContext(opt_level=3):
complied_graph_lib = relay.build_module.build(mod, "llvm", params=params)
data = np.random.uniform(-1, 1, size=input_shape(mod)).astype("float32")
dev = tvm.cpu()
from tvm.contrib import utils
temp = utils.tempdir()
file_name = "deploy_lib.so"
path_lib = temp.relpath(file_name)
complied_graph_lib.export_library(path_lib)
loaded_lib = tvm.runtime.load_module(path_lib)
loaded_params = loaded_lib["get_graph_params"]()
gmod = graph_executor.GraphModule(loaded_lib["default"](dev))
gmod.set_input(key="data", value=data, **loaded_params)
gmod.run()
out = gmod.get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
@tvm.testing.requires_llvm
def test_cpu_get_graph_params_compare():
# Create sample net
from tvm.relay.testing.init import create_workload, Constant
inp_shape = (1, 3, 24, 12)
dtype = "float32"
data = relay.var("data", shape=inp_shape, dtype=dtype)
conv_shape = [inp_shape[1], inp_shape[1], 3, 3]
conv = relay.nn.conv2d(
data,
relay.var("conv_weight", shape=conv_shape, dtype=dtype),
padding=1,
kernel_size=3,
)
args = relay.analysis.free_vars(conv)
func = relay.Function(args, conv)
mod, params = create_workload(func, initializer=Constant())
with tvm.transform.PassContext(opt_level=3):
complied_graph_lib = relay.build_module.build(mod, "llvm", params=params)
from tvm.contrib import utils
temp = utils.tempdir()
file_name = "deploy_lib.so"
path_lib = temp.relpath(file_name)
complied_graph_lib.export_library(path_lib)
loaded_lib = tvm.runtime.load_module(path_lib)
loaded_params = loaded_lib["get_graph_params"]()
tvm.testing.assert_allclose(
params["conv_weight"].numpy(), loaded_params["p0"].numpy()[0][0], atol=1e-5
)
@tvm.testing.requires_cuda
@tvm.testing.requires_gpu
def test_gpu():
mod, params = relay.testing.synthetic.get_workload()
with relay.build_config(opt_level=3):
complied_graph_lib = relay.build_module.build(mod, "cuda", params=params)
data = np.random.uniform(-1, 1, size=input_shape(mod)).astype("float32")
dev = tvm.cuda()
# raw api
gmod = complied_graph_lib["default"](dev)
set_input = gmod["set_input"]
run = gmod["run"]
get_output = gmod["get_output"]
set_input("data", tvm.nd.array(data))
run()
out = get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
# graph executor wrapper
gmod = graph_executor.GraphModule(complied_graph_lib["default"](dev))
gmod.set_input("data", data)
gmod.run()
out = gmod.get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
@tvm.testing.uses_gpu
def test_mod_export():
def verify_cpu_export(obj_format):
mod, params = relay.testing.synthetic.get_workload()
with relay.build_config(opt_level=3):
complied_graph_lib = relay.build_module.build(mod, "llvm", params=params)
from tvm.contrib import utils
temp = utils.tempdir()
if obj_format == ".so":
file_name = "deploy_lib.so"
else:
assert obj_format == ".tar"
file_name = "deploy_lib.tar"
path_lib = temp.relpath(file_name)
complied_graph_lib.export_library(path_lib)
# run the setup in a separate function, so the load_lib
# can get destructed right away
# test the robustness wrt to parent module destruction
def setup_gmod():
loaded_lib = tvm.runtime.load_module(path_lib)
dev = tvm.cpu(0)
return loaded_lib["default"](dev)
gmod = setup_gmod()
# raw api
set_input = gmod["set_input"]
run = gmod["run"]
get_output = gmod["get_output"]
data = np.random.uniform(-1, 1, size=input_shape(mod)).astype("float32")
set_input("data", tvm.nd.array(data))
run()
out = get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
# graph executor wrapper
gmod = graph_executor.GraphModule(setup_gmod())
gmod.set_input("data", data)
gmod.run()
out = gmod.get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
def verify_gpu_export(obj_format):
if not tvm.testing.device_enabled("cuda"):
print("Skip because cuda is not enabled")
return
mod, params = relay.testing.synthetic.get_workload()
with relay.build_config(opt_level=3):
complied_graph_lib = relay.build_module.build(mod, "cuda", params=params)
from tvm.contrib import utils
temp = utils.tempdir()
if obj_format == ".so":
file_name = "deploy_lib.so"
else:
assert obj_format == ".tar"
file_name = "deploy_lib.tar"
path_lib = temp.relpath(file_name)
complied_graph_lib.export_library(path_lib)
data = np.random.uniform(-1, 1, size=input_shape(mod)).astype("float32")
# run the setup in a separate function, so the load_lib
# can get destructed right away
# test the robustness wrt to parent module destruction
def setup_gmod():
loaded_lib = tvm.runtime.load_module(path_lib)
dev = tvm.cuda()
return loaded_lib["default"](dev)
gmod = setup_gmod()
# raw api
set_input = gmod["set_input"]
run = gmod["run"]
get_output = gmod["get_output"]
set_input("data", tvm.nd.array(data))
run()
out = get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
# graph executor wrapper
gmod = graph_executor.GraphModule(setup_gmod())
gmod.set_input("data", data)
gmod.run()
out = gmod.get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
@tvm.testing.requires_llvm
def verify_rpc_cpu_export(obj_format):
mod, params = relay.testing.synthetic.get_workload()
with relay.build_config(opt_level=3):
complied_graph_lib = relay.build_module.build(mod, "llvm", params=params)
from tvm.contrib import utils
temp = utils.tempdir()
if obj_format == ".so":
file_name = "deploy_lib.so"
else:
assert obj_format == ".tar"
file_name = "deploy_lib.tar"
path_lib = temp.relpath(file_name)
complied_graph_lib.export_library(path_lib)
from tvm import rpc
remote = rpc.LocalSession()
remote.upload(path_lib)
loaded_lib = remote.load_module(path_lib)
data = np.random.uniform(-1, 1, size=input_shape(mod)).astype("float32")
dev = remote.cpu()
# raw api
gmod = loaded_lib["default"](dev)
set_input = gmod["set_input"]
run = gmod["run"]
get_output = gmod["get_output"]
set_input("data", tvm.nd.array(data, device=dev))
run()
out = get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
# graph executor wrapper
gmod = graph_executor.GraphModule(loaded_lib["default"](dev))
gmod.set_input("data", data)
gmod.run()
out = gmod.get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
def verify_rpc_gpu_export(obj_format):
if not tvm.testing.device_enabled("cuda"):
print("Skip because cuda is not enabled")
return
mod, params = relay.testing.synthetic.get_workload()
with relay.build_config(opt_level=3):
complied_graph_lib = relay.build_module.build(mod, "cuda", params=params)
from tvm.contrib import utils
temp = utils.tempdir()
if obj_format == ".so":
file_name = "deploy_lib.so"
else:
assert obj_format == ".tar"
file_name = "deploy_lib.tar"
path_lib = temp.relpath(file_name)
complied_graph_lib.export_library(path_lib)
from tvm import rpc
def check_remote(server):
remote = rpc.connect(server.host, server.port)
remote.upload(path_lib)
loaded_lib = remote.load_module(path_lib)
data = np.random.uniform(-1, 1, size=input_shape(mod)).astype("float32")
dev = remote.cuda()
# raw api
gmod = loaded_lib["default"](dev)
set_input = gmod["set_input"]
run = gmod["run"]
get_output = gmod["get_output"]
set_input("data", tvm.nd.array(data, device=dev))
run()
out = get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
# graph executor wrapper
gmod = graph_executor.GraphModule(loaded_lib["default"](dev))
gmod.set_input("data", data)
gmod.run()
out = gmod.get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
check_remote(rpc.Server("127.0.0.1"))
for obj_format in [".so", ".tar"]:
verify_cpu_export(obj_format)
verify_gpu_export(obj_format)
verify_rpc_cpu_export(obj_format)
verify_rpc_gpu_export(obj_format)
@tvm.testing.requires_llvm
@tvm.testing.uses_gpu
def test_remove_package_params():
def verify_cpu_remove_package_params(obj_format):
mod, params = relay.testing.synthetic.get_workload()
with relay.build_config(opt_level=3):
complied_graph_lib = relay.build_module.build(mod, "llvm", params=params)
from tvm.contrib import utils
temp = utils.tempdir()
if obj_format == ".so":
file_name = "deploy_lib.so"
else:
assert obj_format == ".tar"
file_name = "deploy_lib.tar"
path_lib = temp.relpath(file_name)
complied_graph_lib_no_params = complied_graph_lib["remove_params"]()
complied_graph_lib_no_params.export_library(path_lib)
with open(temp.relpath("deploy_param.params"), "wb") as fo:
fo.write(runtime.save_param_dict(complied_graph_lib.get_params()))
loaded_lib = tvm.runtime.load_module(path_lib)
data = np.random.uniform(-1, 1, size=input_shape(mod)).astype("float32")
dev = tvm.cpu(0)
# raw api
gmod = loaded_lib["default"](dev)
set_input = gmod["set_input"]
run = gmod["run"]
get_output = gmod["get_output"]
load_params = gmod["load_params"]
loaded_params = bytearray(open(temp.relpath("deploy_param.params"), "rb").read())
set_input("data", tvm.nd.array(data))
load_params(loaded_params)
run()
out = get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
# graph executor wrapper
gmod = graph_executor.GraphModule(loaded_lib["default"](dev))
loaded_params = bytearray(open(temp.relpath("deploy_param.params"), "rb").read())
gmod.set_input("data", data)
gmod.load_params(loaded_params)
gmod.run()
out = gmod.get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
def verify_gpu_remove_package_params(obj_format):
if not tvm.testing.device_enabled("cuda"):
print("Skip because cuda is not enabled")
return
mod, params = relay.testing.synthetic.get_workload()
with relay.build_config(opt_level=3):
complied_graph_lib = relay.build_module.build(mod, "cuda", params=params)
from tvm.contrib import utils
temp = utils.tempdir()
if obj_format == ".so":
file_name = "deploy_lib.so"
else:
assert obj_format == ".tar"
file_name = "deploy_lib.tar"
path_lib = temp.relpath(file_name)
complied_graph_lib_no_params = complied_graph_lib["remove_params"]()
complied_graph_lib_no_params.export_library(path_lib)
with open(temp.relpath("deploy_param.params"), "wb") as fo:
fo.write(runtime.save_param_dict(complied_graph_lib.get_params()))
loaded_lib = tvm.runtime.load_module(path_lib)
data = np.random.uniform(-1, 1, size=input_shape(mod)).astype("float32")
dev = tvm.cuda(0)
# raw api
gmod = loaded_lib["default"](dev)
set_input = gmod["set_input"]
run = gmod["run"]
get_output = gmod["get_output"]
load_params = gmod["load_params"]
loaded_params = bytearray(open(temp.relpath("deploy_param.params"), "rb").read())
set_input("data", tvm.nd.array(data))
load_params(loaded_params)
run()
out = get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
# graph executor wrapper
gmod = graph_executor.GraphModule(loaded_lib["default"](dev))
loaded_params = bytearray(open(temp.relpath("deploy_param.params"), "rb").read())
gmod.set_input("data", data)
gmod.load_params(loaded_params)
gmod.run()
out = gmod.get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
@tvm.testing.requires_llvm
def verify_rpc_cpu_remove_package_params(obj_format):
mod, params = relay.testing.synthetic.get_workload()
with relay.build_config(opt_level=3):
complied_graph_lib = relay.build_module.build(mod, "llvm", params=params)
from tvm.contrib import utils
temp = utils.tempdir()
if obj_format == ".so":
file_name = "deploy_lib.so"
else:
assert obj_format == ".tar"
file_name = "deploy_lib.tar"
path_lib = temp.relpath(file_name)
complied_graph_lib_no_params = complied_graph_lib["remove_params"]()
complied_graph_lib_no_params.export_library(path_lib)
path_params = temp.relpath("deploy_param.params")
with open(path_params, "wb") as fo:
fo.write(runtime.save_param_dict(complied_graph_lib.get_params()))
from tvm import rpc
remote = rpc.LocalSession()
remote.upload(path_lib)
loaded_lib = remote.load_module(path_lib)
data = np.random.uniform(-1, 1, size=input_shape(mod)).astype("float32")
dev = remote.cpu()
# raw api
gmod = loaded_lib["default"](dev)
set_input = gmod["set_input"]
run = gmod["run"]
get_output = gmod["get_output"]
load_params = gmod["load_params"]
loaded_params = bytearray(open(path_params, "rb").read())
set_input("data", tvm.nd.array(data, device=dev))
load_params(loaded_params)
run()
out = get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
# graph executor wrapper
gmod = graph_executor.GraphModule(loaded_lib["default"](dev))
loaded_params = bytearray(open(path_params, "rb").read())
gmod.set_input("data", data)
gmod.load_params(loaded_params)
gmod.run()
out = gmod.get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
def verify_rpc_gpu_remove_package_params(obj_format):
if not tvm.testing.device_enabled("cuda"):
print("Skip because cuda is not enabled")
return
mod, params = relay.testing.synthetic.get_workload()
with relay.build_config(opt_level=3):
complied_graph_lib = relay.build_module.build(mod, "cuda", params=params)
from tvm.contrib import utils
temp = utils.tempdir()
if obj_format == ".so":
file_name = "deploy_lib.so"
else:
assert obj_format == ".tar"
file_name = "deploy_lib.tar"
path_lib = temp.relpath(file_name)
complied_graph_lib_no_params = complied_graph_lib["remove_params"]()
complied_graph_lib_no_params.export_library(path_lib)
path_params = temp.relpath("deploy_param.params")
with open(path_params, "wb") as fo:
fo.write(runtime.save_param_dict(complied_graph_lib.get_params()))
from tvm import rpc
remote = rpc.LocalSession()
remote.upload(path_lib)
loaded_lib = remote.load_module(path_lib)
data = np.random.uniform(-1, 1, size=input_shape(mod)).astype("float32")
dev = remote.cuda()
# raw api
gmod = loaded_lib["default"](dev)
set_input = gmod["set_input"]
run = gmod["run"]
get_output = gmod["get_output"]
load_params = gmod["load_params"]
loaded_params = bytearray(open(path_params, "rb").read())
set_input("data", tvm.nd.array(data, device=dev))
load_params(loaded_params)
run()
out = get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
# graph executor wrapper
gmod = graph_executor.GraphModule(loaded_lib["default"](dev))
loaded_params = bytearray(open(path_params, "rb").read())
gmod.set_input("data", data)
gmod.load_params(loaded_params)
gmod.run()
out = gmod.get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
for obj_format in [".so", ".tar"]:
verify_cpu_remove_package_params(obj_format)
verify_gpu_remove_package_params(obj_format)
verify_rpc_cpu_remove_package_params(obj_format)
verify_rpc_gpu_remove_package_params(obj_format)
@tvm.testing.requires_llvm
def test_debug_graph_executor():
mod, params = relay.testing.synthetic.get_workload()
with relay.build_config(opt_level=3):
complied_graph_lib = relay.build_module.build(mod, "llvm", params=params)
data = np.random.uniform(-1, 1, size=input_shape(mod)).astype("float32")
# raw api
dev = tvm.cpu()
try:
gmod = complied_graph_lib["debug_create"]("default", dev)
except:
print("Skip because debug graph_executor not enabled")
return
set_input = gmod["set_input"]
run = gmod["run"]
get_output = gmod["get_output"]
set_input("data", tvm.nd.array(data))
run()
out = get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
# debug graph executor wrapper
debug_g_mod = debug_executor.GraphModuleDebug(
complied_graph_lib["debug_create"]("default", dev),
[dev],
complied_graph_lib.get_graph_json(),
None,
)
debug_g_mod.set_input("data", data)
debug_g_mod.run()
out = debug_g_mod.get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
@tvm.testing.requires_cudagraph
def test_cuda_graph_executor():
mod, params = relay.testing.synthetic.get_workload()
with tvm.transform.PassContext(opt_level=3):
complied_graph_lib = relay.build_module.build(mod, "cuda", params=params)
data = np.random.uniform(-1, 1, size=input_shape(mod)).astype("float32")
dev = tvm.cuda()
try:
gmod = complied_graph_lib["cuda_graph_create"](dev)
except:
print("Skip because cuda_graph not enabled")
return
set_input = gmod["set_input"]
run = gmod["run"]
get_output = gmod["get_output"]
set_input("data", tvm.nd.array(data))
run()
out = get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
# cuda graph executor wrapper
cu_gmod = cuda_graph_executor.GraphModuleCudaGraph(gmod)
cu_gmod.set_input("data", data)
cu_gmod.run()
out = cu_gmod.get_output(0).numpy()
tvm.testing.assert_allclose(out, verify(data), atol=1e-5)
def test_multiple_imported_modules():
def make_func(symbol):
n = tvm.te.size_var("n")
Ab = tvm.tir.decl_buffer((n,), dtype="float32")
i = tvm.te.var("i")
stmt = tvm.tir.For(
i,
0,
n - 1,
tvm.tir.ForKind.SERIAL,
tvm.tir.BufferStore(Ab, tvm.tir.BufferLoad(Ab, [i]) + 1, [i + 1]),
)
return tvm.tir.PrimFunc([Ab], stmt).with_attr("global_symbol", symbol)
def make_module(mod):
mod = tvm.IRModule(mod)
mod = tvm.driver.build(mod, target="llvm")
return mod
module_main = make_module({"main": make_func("main")})
module_a = make_module({"func_a": make_func("func_a")})
module_b = make_module({"func_b": make_func("func_b")})
module_main.import_module(module_a)
module_main.import_module(module_b)
module_main.get_function("func_a", query_imports=True)
module_main.get_function("func_b", query_imports=True)
def test_num_threads():
reported = tvm.runtime.num_threads()
env_threads = os.getenv("TVM_NUM_THREADS")
omp_env_threads = os.getenv("OMP_NUM_THREADS")
if env_threads is not None:
assert reported == env_threads
elif omp_env_threads is not None:
assert reported == omp_env_threads
else:
hardware_threads = os.cpu_count()
assert reported == hardware_threads or reported == hardware_threads // 2
@tvm.testing.requires_llvm
@tvm.testing.requires_package("torch")
def test_graph_module_zero_copy():
mod = tvm.IRModule()
params = {}
dev = tvm.cpu()
x = relay.var("x", shape=(1, 10))
y = relay.var("y", shape=(1, 10))
z = relay.add(x, y)
mod["main"] = relay.Function([x, y], z)
# need torch to do the from_dlpack trick
import torch
compiled_graph_lib = relay.build(mod, target="llvm", params=params)
gm = graph_executor.GraphModule(compiled_graph_lib["default"](dev))
x_data = torch.rand((1, 10))
y_data = torch.rand((1, 10))
z_data = torch.rand((1, 10))
z_torch = x_data + y_data
# zero copy run
assert not np.allclose(z_data.numpy(), z_torch.numpy())
gm.set_input_zero_copy("x", tvm.nd.from_dlpack(x_data))
gm.set_input_zero_copy("y", tvm.nd.from_dlpack(y_data))
gm.set_output_zero_copy(0, tvm.nd.from_dlpack(z_data))
gm.run()
tvm.testing.assert_allclose(z_data.numpy(), z_torch.numpy())
# zero input copy with params
gm = graph_executor.GraphModule(compiled_graph_lib["default"](dev))
gm.set_input_zero_copy(x=tvm.nd.from_dlpack(x_data), y=tvm.nd.from_dlpack(y_data))
gm.run()
tvm.testing.assert_allclose(gm.get_output(0).numpy(), z_torch.numpy())
if __name__ == "__main__":
test_legacy_compatibility()
test_cpu()
test_gpu()
test_mod_export()
test_remove_package_params()
test_debug_graph_executor()
test_multiple_imported_modules()
test_cpu_get_graph_json()
test_cpu_get_graph_params_run()
test_cpu_get_graph_params_compare()
test_graph_module_zero_copy()
| 26,827 | 35.205128 | 89 | py |
tvm | tvm-main/tests/python/unittest/test_autotvm_xgboost_model.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import time
import multiprocessing
import numpy as np
import tvm
from tvm import te
from tvm import autotvm
from tvm.autotvm import MeasureInput, MeasureResult
from tvm.autotvm.tuner.xgboost_cost_model import XGBoostCostModel
from tvm.testing.autotvm import get_sample_task, get_sample_records
def test_fit():
task, target = get_sample_task()
records = get_sample_records(n=500)
base_model = XGBoostCostModel(task, feature_type="itervar", loss_type="reg")
base_model.fit_log(records, plan_size=32)
upper_model = XGBoostCostModel(task, feature_type="itervar", loss_type="reg")
upper_model.load_basemodel(base_model)
xs = np.arange(10)
ys = np.arange(10)
upper_model.fit(xs, ys, plan_size=32)
# feature lengths are not guaranteed to always be the same
upper_model.predict(np.ones(12))
upper_model.predict(np.ones(8))
def fit_spawn():
assert multiprocessing.get_start_method(False) == "spawn"
test_fit()
def test_fit_spawn():
# Subprocesses inherit the spawn method of their parents
ctx = multiprocessing.get_context("spawn")
p = ctx.Process(target=test_fit)
p.start()
p.join()
def test_tuner():
task, target = get_sample_task()
records = get_sample_records(n=10)
tuner = autotvm.tuner.XGBTuner(task)
tuner.load_history(records, min_seed_records=10)
# Confirm that loading history successfully loaded a
# base_model.
assert tuner.cost_model.base_model is not None
tuner = autotvm.tuner.XGBTuner(task)
tuner.load_history(records, min_seed_records=11)
# Confirm that loading history did not load base_model
# when not enough records according to `min_seed_records`
# are provided
assert tuner.cost_model.base_model is None
def test_update():
task, target = get_sample_task()
tuner = autotvm.tuner.XGBTuner(task)
n_records = 5
records = get_sample_records(n=n_records)
tuner.update([inp for inp, _ in records], [res for _, res in records])
assert len(tuner.xs) == n_records
assert len(tuner.ys) == n_records
assert len(tuner.visited) == n_records
assert all(x in tuner.visited for x in tuner.xs)
if __name__ == "__main__":
test_fit()
test_fit_spawn()
test_tuner()
test_update()
| 3,059 | 29.909091 | 81 | py |
tvm | tvm-main/tests/python/unittest/test_runtime_dlpack.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm import te
import numpy as np
@tvm.testing.requires_package("torch")
def test_from_dlpack_shape_one():
# A test case for the issue https://github.com/pytorch/pytorch/issues/99803
import torch
from torch.utils.dlpack import to_dlpack
tgt = tvm.target.Target(target="llvm", host="llvm")
rows = 1
a = tvm.runtime.ndarray.from_dlpack(to_dlpack(torch.randn(rows, 16)))
A = te.placeholder((rows, 16), name="A")
B = te.placeholder((rows, 16), name="B")
C = te.compute(A.shape, lambda i, j: A[i, j] + B[i, j], name="C")
s = te.create_schedule(C.op)
fadd = tvm.build(s, [A, B, C], tgt)
dev = tvm.device(tgt.kind.name, 0)
b = tvm.nd.array(np.random.uniform(size=(rows, 16)).astype(B.dtype), dev)
c = tvm.nd.array(np.zeros((rows, 16), dtype=C.dtype), dev)
fadd(a, b, c)
tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy())
@tvm.testing.requires_package("torch")
def test_from_dlpack_strided():
import torch
from torch.utils.dlpack import to_dlpack
rows = 1
inp = torch.randn(rows, 16)
a = tvm.runtime.ndarray.from_dlpack(to_dlpack(inp))
view = a._create_view((2, 8))
np.testing.assert_equal(inp.numpy().reshape(2, 8), view.numpy())
if __name__ == "__main__":
tvm.testing.main()
| 2,111 | 31 | 79 | py |
tvm | tvm-main/tests/python/relay/test_op_level5.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Support level5 operator test cases.
"""
import math
import platform
import sys
import numpy as np
import pytest
import tvm
import tvm.testing
import tvm.topi.testing
from tvm import relay, te
from tvm.relay.testing import run_infer_type
executor_kind = tvm.testing.parameter("graph", "vm")
def test_resize1d_infer_type():
n, c, w = te.size_var("n"), te.size_var("c"), te.size_var("w")
x = relay.var("x", relay.TensorType((n, c, w), "int8"))
tw = te.var("tw")
z = relay.image.resize1d(x, (tw,))
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((n, c, tw), "int8")
x = relay.var("x", relay.TensorType((n, c, w), "int8"))
z = relay.image.resize1d(x, (200,), None, "NCW", "linear", "align_corners")
assert "size=" in z.astext()
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((n, c, 200), "int8")
class TestResize1D:
interpolate_method = tvm.testing.parameter("nearest_neighbor", "linear", "cubic")
coord_trans = tvm.testing.parameter("asymmetric", "align_corners", "half_pixel")
layout = tvm.testing.parameter("NWC", "NCW")
dshape, scale = tvm.testing.parameters(
((1, 4, 4), 2),
((2, 8, 17), 3),
((2, 8, 17), 3),
((3, 4, 5), 5),
)
def test_resize(
self, target, dev, executor_kind, dshape, scale, interpolate_method, layout, coord_trans
):
target_kind = tvm.target.Target(target).kind.name
if (
target_kind == "vulkan"
and dshape == (3, 4, 5)
and scale == 5
and interpolate_method == "nearest_neighbor"
and coord_trans == "align_corners"
):
pytest.xfail("Known failing case for these parameters")
if layout == "NWC":
size = (dshape[1] * scale,)
else:
size = (dshape[2] * scale,)
x_data = np.random.uniform(size=dshape).astype("float32")
ref_res = tvm.topi.testing.resize1d_python(
x_data, (scale,), layout, interpolate_method, coord_trans
)
x = relay.var("x", relay.TensorType(dshape, "float32"))
z = relay.image.resize1d(
x, size, None, layout, interpolate_method, coordinate_transformation_mode=coord_trans
)
assert "size=" in z.astext()
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType(ref_res.shape, "float32")
func = relay.Function([x], z)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-3, atol=1e-4)
def test_resize2d_infer_type():
n, c, h, w = te.size_var("n"), te.size_var("c"), te.size_var("h"), te.size_var("w")
x = relay.var("x", relay.TensorType((n, c, h, w), "int8"))
th, tw = te.var("th"), te.var("tw")
z = relay.image.resize2d(x, (th, tw))
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((n, c, th, tw), "int8")
x = relay.var("x", relay.TensorType((n, c, h, w), "int8"))
z = relay.image.resize2d(x, (100, 200), None, "NCHW", "linear", "align_corners")
assert "size=" in z.astext()
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((n, c, 100, 200), "int8")
class TestResize2D:
interpolate_method = tvm.testing.parameter("nearest_neighbor", "linear", "cubic")
coord_trans = tvm.testing.parameter("asymmetric", "align_corners", "half_pixel")
layout = tvm.testing.parameter("NHWC", "NCHW")
dshape, scale = tvm.testing.parameters(
((1, 4, 4, 4), 2),
((2, 8, 17, 20), 3),
((2, 8, 17, 20), 3),
((3, 4, 5, 6), 5),
)
def test_resize(
self, target, dev, executor_kind, dshape, scale, interpolate_method, layout, coord_trans
):
target_kind = tvm.target.Target(target).kind.name
if (
target_kind == "vulkan"
and dshape == (3, 4, 5, 6)
and scale == 5
and interpolate_method == "nearest_neighbor"
and coord_trans == "align_corners"
):
pytest.xfail("Known failing case for these parameters")
if layout == "NHWC":
size = (dshape[1] * scale, dshape[2] * scale)
else:
size = (dshape[2] * scale, dshape[3] * scale)
x_data = np.random.uniform(size=dshape).astype("float32")
ref_res = tvm.topi.testing.resize2d_python(
x_data, (scale, scale), layout, interpolate_method, coord_trans
)
x = relay.var("x", relay.TensorType(dshape, "float32"))
z = relay.image.resize2d(
x, size, None, layout, interpolate_method, coordinate_transformation_mode=coord_trans
)
assert "size=" in z.astext()
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType(ref_res.shape, "float32")
func = relay.Function([x], z)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-3, atol=1e-4)
def test_resize3d_infer_type():
n, c, d, h, w = (
te.size_var("n"),
te.size_var("c"),
te.size_var("d"),
te.size_var("h"),
te.size_var("w"),
)
x = relay.var("x", relay.TensorType((n, c, d, h, w), "int8"))
td, th, tw = te.var("td"), te.var("th"), te.var("tw")
z = relay.image.resize3d(x, (td, th, tw))
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((n, c, td, th, tw), "int8")
x = relay.var("x", relay.TensorType((n, c, d, h, w), "int8"))
z = relay.image.resize3d(x, (10, 10, 20), None, "NCDHW", "linear", "align_corners")
assert "size=" in z.astext()
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType((n, c, 10, 10, 20), "int8")
class TestResize3D:
interpolate_method = tvm.testing.parameter("nearest_neighbor", "linear", "cubic")
coord_trans = tvm.testing.parameter("asymmetric", "align_corners", "half_pixel")
layout = tvm.testing.parameter("NDHWC", "NCDHW")
dshape, scale = tvm.testing.parameters(
((1, 4, 4, 4, 4), 2),
)
def test_resize(
self, target, dev, executor_kind, dshape, scale, interpolate_method, layout, coord_trans
):
if layout == "NDHWC":
size = (dshape[1] * scale, dshape[2] * scale, dshape[3] * scale)
else:
size = (dshape[2] * scale, dshape[3] * scale, dshape[4] * scale)
x_data = np.random.uniform(size=dshape).astype("float32")
ref_res = tvm.topi.testing.resize3d_python(
x_data, (scale, scale, scale), layout, interpolate_method, coord_trans
)
x = relay.var("x", relay.TensorType(dshape, "float32"))
z = relay.image.resize3d(x, size, None, layout, interpolate_method, coord_trans)
assert "size=" in z.astext()
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType(ref_res.shape, "float32")
func = relay.Function([x], z)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-4, atol=1e-6)
class TestCropAndResize:
interpolate_method = tvm.testing.parameter("bilinear", "nearest_neighbor")
layout = tvm.testing.parameter("NHWC", "NCHW")
@pytest.mark.skipif(
platform.machine() == "aarch64",
reason="Currently failing on AArch64 - see https://github.com/apache/tvm/issues/10673",
)
def test_crop_and_resize(self, target, dev, executor_kind, layout, interpolate_method):
target_kind = tvm.target.Target(target).kind.name
if (
target_kind == "vulkan"
and layout == "NHWC"
and interpolate_method == "nearest_neighbor"
):
pytest.xfail("Known failing case for these parameters")
extrapolation_value = 0.0
np.random.seed(0)
eps = 1e-4
if layout == "NHWC":
img_shape = (10, 224, 224, 3)
boxes = np.random.uniform(size=(2, 4)).astype("float32")
box_indices = np.array([1, 0]).astype("int32")
crop_size = np.array([20, 30]).astype("int32")
elif layout == "NCHW":
img_shape = (5, 3, 255, 255)
boxes = np.random.uniform(size=(2, 4)).astype("float32")
box_indices = np.array([0, 1]).astype("int32")
crop_size = np.array([30, 30]).astype("int32")
else:
raise ValueError(f"Unknown layout: {layout}")
image_data = np.random.uniform(size=img_shape).astype("float32")
ref_res = tvm.topi.testing.crop_and_resize_python(
image_data,
boxes,
box_indices,
crop_size,
layout,
interpolate_method,
extrapolation_value,
)
img = relay.var("img", relay.TensorType(img_shape, "float32"))
bx = relay.var("bx", relay.TensorType(boxes.shape, "float32"))
bx_idx = relay.var("bx_idx", relay.TensorType(box_indices.shape, "int32"))
z = relay.image.crop_and_resize(
img, bx, bx_idx, list(crop_size), layout, interpolate_method, extrapolation_value
)
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType(ref_res.shape, "float32")
func = relay.Function([img, bx, bx_idx], z)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
image_data, boxes, box_indices
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-3, atol=1e-04)
@tvm.testing.uses_gpu
def test_multibox_prior(executor_kind):
def get_ref_result(
dshape, sizes=(1.0,), ratios=(1.0,), steps=(-1.0, -1.0), offsets=(0.5, 0.5), clip=True
):
in_height = dshape[2]
in_width = dshape[3]
num_sizes = len(sizes)
num_ratios = len(ratios)
size_ratio_concat = sizes + ratios
steps_h = steps[0] if steps[0] > 0 else 1.0 / in_height
steps_w = steps[1] if steps[1] > 0 else 1.0 / in_width
offset_h = offsets[0]
offset_w = offsets[1]
oshape = (1, in_height * in_width * (num_sizes + num_ratios - 1), 4)
dtype = "float32"
np_out = np.zeros(oshape).astype(dtype)
for i in range(in_height):
center_h = (i + offset_h) * steps_h
for j in range(in_width):
center_w = (j + offset_w) * steps_w
for k in range(num_sizes + num_ratios - 1):
w = (
size_ratio_concat[k] * in_height / in_width / 2.0
if k < num_sizes
else size_ratio_concat[0]
* in_height
/ in_width
* math.sqrt(size_ratio_concat[k + 1])
/ 2.0
)
h = (
size_ratio_concat[k] / 2.0
if k < num_sizes
else size_ratio_concat[0] / math.sqrt(size_ratio_concat[k + 1]) / 2.0
)
count = (
i * in_width * (num_sizes + num_ratios - 1)
+ j * (num_sizes + num_ratios - 1)
+ k
)
np_out[0][count][0] = center_w - w
np_out[0][count][1] = center_h - h
np_out[0][count][2] = center_w + w
np_out[0][count][3] = center_h + h
if clip:
np_out = np.clip(np_out, 0, 1)
return np_out
def verify_multibox_prior(
x,
dshape,
ref_res,
sizes=(1.0,),
ratios=(1.0,),
steps=(-1.0, -1.0),
offsets=(0.5, 0.5),
clip=True,
check_size=False,
check_type_only=False,
):
z = relay.vision.multibox_prior(x, sizes, ratios, steps, offsets, clip)
zz = run_infer_type(z)
if check_size:
assert "sizes=" in z.astext()
assert zz.checked_type == relay.TensorType(
(1, dshape[2] * dshape[3] * (len(sizes) + len(ratios) - 1), 4), "float32"
)
if check_type_only:
return
data = np.random.uniform(low=-1, high=1, size=dshape).astype("float32")
func = relay.Function([x], z)
func = run_infer_type(func)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
sizes = (0.3, 1.5, 0.7)
ratios = (1.3, 2.4)
steps = (2.0, 1.5)
offsets = (0.2, 0.3)
dshape = (1, 3, 56, 56)
ref_res = get_ref_result(dshape, sizes, ratios, steps, offsets)
x = relay.var("x", relay.TensorType(dshape, "float32"))
verify_multibox_prior(x, dshape, ref_res, sizes, ratios, steps, offsets, check_size=True)
y = relay.var("y", relay.TensorType((te.size_var("n"), 3, 56, 56), "float32"))
verify_multibox_prior(
x, dshape, ref_res, sizes, ratios, steps, offsets, check_size=True, check_type_only=True
)
dshape = (1, 24, 32, 32)
ref_res = get_ref_result(dshape, clip=False)
x = relay.var("x", relay.TensorType(dshape, "float32"))
verify_multibox_prior(x, dshape, ref_res, clip=False)
y = relay.var("y", relay.TensorType((te.size_var("n"), 24, 32, 32), "float32"))
verify_multibox_prior(x, dshape, ref_res, clip=False, check_type_only=True)
@tvm.testing.uses_gpu
def test_get_valid_counts():
def verify_get_valid_counts(dshape, score_threshold, id_index, score_index):
dtype = "float32"
batch_size, num_anchor, elem_length = dshape
np_data = np.random.uniform(low=-2, high=2, size=dshape).astype(dtype)
np_out1 = np.zeros(shape=(batch_size,))
np_out2 = np.zeros(shape=dshape).astype(dtype)
np_out3 = np.zeros(shape=(batch_size, num_anchor))
for i in range(batch_size):
np_out1[i] = 0
inter_idx = 0
for j in range(num_anchor):
score = np_data[i, j, score_index]
if score > score_threshold and (id_index < 0 or np_data[i, j, id_index] >= 0):
for k in range(elem_length):
np_out2[i, inter_idx, k] = np_data[i, j, k]
np_out1[i] += 1
np_out3[i, inter_idx] = j
inter_idx += 1
if j >= np_out1[i]:
for k in range(elem_length):
np_out2[i, j, k] = -1.0
np_out3[i, j] = -1
x = relay.var("x", relay.ty.TensorType(dshape, dtype))
z = relay.vision.get_valid_counts(x, score_threshold, id_index, score_index)
assert "score_threshold" in z.astext()
func = relay.Function([x], z.astuple())
func = run_infer_type(func)
for target, dev in tvm.testing.enabled_targets():
out = relay.create_executor("vm", device=dev, target=target).evaluate(func)(np_data)
tvm.testing.assert_allclose(out[0].numpy(), np_out1, rtol=1e-3, atol=1e-04)
tvm.testing.assert_allclose(out[1].numpy(), np_out2, rtol=1e-3, atol=1e-04)
tvm.testing.assert_allclose(out[2].numpy(), np_out3, rtol=1e-3, atol=1e-04)
verify_get_valid_counts((1, 2500, 6), 0, 0, 1)
verify_get_valid_counts((1, 2500, 5), -1, -1, 0)
verify_get_valid_counts((3, 1000, 6), 0.55, 1, 0)
verify_get_valid_counts((16, 500, 5), 0.95, -1, 0)
@tvm.testing.uses_gpu
def test_non_max_suppression(executor_kind):
def verify_nms(
x0_data,
x1_data,
x2_data,
x3_data,
dshape,
ref_res,
ref_indices_res,
iou_threshold=0.5,
force_suppress=False,
top_k=-1,
check_type_only=False,
):
x0 = relay.var("x0", relay.ty.TensorType(dshape, "float32"))
x1 = relay.var("x1", relay.ty.TensorType((dshape[0],), "int32"))
x2 = relay.var("x2", relay.ty.TensorType((dshape[0], dshape[1]), "int32"))
x3 = relay.var("x3", relay.ty.TensorType((), "int32"))
z = relay.vision.non_max_suppression(
x0,
x1,
x2,
x3,
iou_threshold=iou_threshold,
force_suppress=force_suppress,
top_k=top_k,
return_indices=False,
)
z_indices = relay.vision.non_max_suppression(
x0,
x1,
x2,
x3,
iou_threshold=iou_threshold,
force_suppress=force_suppress,
top_k=top_k,
return_indices=True,
)
if isinstance(z_indices, relay.expr.TupleWrapper):
z_indices = z_indices.astuple()
zz = run_infer_type(z)
zz_indices = run_infer_type(z_indices)
assert zz.checked_type == relay.ty.TensorType(dshape, "float32")
assert zz_indices.checked_type == relay.ty.TupleType(
[
relay.ty.TensorType((dshape[0], dshape[1]), "int32"),
relay.ty.TensorType((dshape[0], 1), "int32"),
]
)
if check_type_only:
return
func = relay.Function([x0, x1, x2, x3], z)
func = run_infer_type(func)
func_indices = relay.Function([x0, x1, x2, x3], z_indices)
func_indices = run_infer_type(func_indices)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x0_data, x1_data, x2_data, x3_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
op_indices_res = relay.create_executor(
executor_kind, device=dev, target=target
).evaluate(func_indices)(x0_data, x1_data, x2_data, x3_data)
tvm.testing.assert_allclose(op_indices_res[0].numpy(), ref_indices_res, rtol=1e-5)
np_data = np.array(
[
[
[0, 0.8, 1, 20, 25, 45],
[1, 0.7, 30, 60, 50, 80],
[0, 0.4, 4, 21, 19, 40],
[2, 0.9, 35, 61, 52, 79],
[1, 0.5, 100, 60, 70, 110],
]
]
).astype("float32")
np_valid_count = np.array([4]).astype("int32")
np_indices = np.array([[0, 1, 3, 4, -1]]).astype("int32")
np_max_output_size = -1
np_result = np.array(
[
[
[2, 0.9, 35, 61, 52, 79],
[0, 0.8, 1, 20, 25, 45],
[-1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1],
]
]
)
np_indices_result = np.array([[4, 0, -1, -1, -1]])
num_anchors = 5
dshape = (te.size_var("n"), num_anchors, 6)
verify_nms(
np_data,
np_valid_count,
np_indices,
np_max_output_size,
dshape,
np_result,
np_indices_result,
force_suppress=True,
top_k=2,
check_type_only=True,
)
dshape = (1, num_anchors, 6)
verify_nms(
np_data,
np_valid_count,
np_indices,
np_max_output_size,
dshape,
np_result,
np_indices_result,
force_suppress=True,
top_k=2,
check_type_only=False,
)
np_result = np.array(
[
[
[2, 0.9, 35, 61, 52, 79],
[0, 0.8, 1, 20, 25, 45],
[-1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1],
]
]
)
np_indices_result = np.array([[4, 0, -1, -1, -1]])
np_max_output_size = 2
dshape = (te.size_var("n"), num_anchors, 6)
verify_nms(
np_data,
np_valid_count,
np_indices,
np_max_output_size,
dshape,
np_result,
np_indices_result,
check_type_only=True,
)
dshape = (1, num_anchors, 6)
verify_nms(
np_data,
np_valid_count,
np_indices,
np_max_output_size,
dshape,
np_result,
np_indices_result,
top_k=2,
)
np_data = np.array(
[
[
[0, 0.8, 1, 20, 25, 45, 1, 2, 3, 4],
[1, 0.7, 30, 60, 50, 80, 5, 6, 7, 8],
[0, 0.4, 4, 21, 19, 40, 9, 10, 11, 12],
[2, 0.9, 35, 61, 52, 79, 13, 14, 15, 16],
[1, 0.5, 100, 60, 70, 110, 17, 18, 19, 20],
]
]
).astype("float32")
np_result = np.array(
[
[
[2, 0.9, 35, 61, 52, 79, 13, 14, 15, 16],
[0, 0.8, 1, 20, 25, 45, 1, 2, 3, 4],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1, -1, -1, -1, -1, -1],
]
]
)
dshape = (1, 5, 10)
verify_nms(
np_data,
np_valid_count,
np_indices,
np_max_output_size,
dshape,
np_result,
np_indices_result,
force_suppress=True,
top_k=2,
check_type_only=False,
)
@tvm.testing.uses_gpu
def test_multibox_transform_loc(executor_kind):
def test_default_value():
num_anchors = 3
num_classes = 3
np_cls_prob = np.array([[[0.2, 0.5, 0.3], [0.25, 0.3, 0.45], [0.7, 0.1, 0.2]]]).astype(
"float32"
)
np_loc_preds = np.array(
[[0.1, -0.2, 0.3, 0.2, 0.2, 0.4, 0.5, -0.3, 0.7, -0.2, -0.4, -0.8]]
).astype("float32")
np_anchors = np.array(
[[[-0.1, -0.1, 0.1, 0.1], [-0.2, -0.2, 0.2, 0.2], [1.2, 1.2, 1.5, 1.5]]]
).astype("float32")
expected_np_out = np.array(
[
[
[1, 0.69999999, 0, 0, 0.10818365, 0.10008108],
[0, 0.44999999, 1, 1, 1, 1],
[0, 0.30000001, 0, 0, 0.22903419, 0.20435292],
]
]
)
cls_prob = relay.var(
"cls_prob", relay.ty.TensorType((1, num_anchors, num_classes), "float32")
)
loc_pred = relay.var("loc_pred", relay.ty.TensorType((1, num_anchors * 4), "float32"))
anchors = relay.var("anchors", relay.ty.TensorType((1, num_anchors, 4), "float32"))
mtl = relay.vision.multibox_transform_loc(
cls_prob=cls_prob, loc_pred=loc_pred, anchor=anchors
)
ret = run_infer_type(mtl.astuple())
ref_type = relay.ty.TupleType(
tvm.runtime.convert(
[
relay.ty.TensorType((1, num_anchors, 6), "float32"),
relay.ty.TensorType((1,), "int"),
]
)
)
assert ret.checked_type == ref_type
nms = relay.vision.non_max_suppression(mtl[0], mtl[1], mtl[0], return_indices=False)
func = relay.Function([cls_prob, loc_pred, anchors], nms)
func = run_infer_type(func)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
np_cls_prob, np_loc_preds, np_anchors
)
tvm.testing.assert_allclose(op_res.numpy(), expected_np_out, rtol=1e-5)
def test_threshold():
num_anchors = 5
num_classes = 5
n = te.size_var("n")
cls_prob = relay.var(
"cls_prob", relay.ty.TensorType((n, num_anchors, num_classes), "float32")
)
loc_pred = relay.var("loc_pred", relay.ty.TensorType((n, num_anchors * 4), "float32"))
anchors = relay.var("anchors", relay.ty.TensorType((1, num_anchors, 4), "float32"))
threshold = 0.02
variances = (0.2, 0.2, 0.3, 0.3)
ret = relay.vision.multibox_transform_loc(
cls_prob=cls_prob,
loc_pred=loc_pred,
anchor=anchors,
threshold=threshold,
variances=variances,
)
ret = run_infer_type(ret.astuple())
ref_type = relay.ty.TupleType(
tvm.runtime.convert(
[
relay.ty.TensorType((n, num_anchors, 6), "float32"),
relay.ty.TensorType((n,), "int"),
]
)
)
assert ret.checked_type == ref_type
test_default_value()
test_threshold()
@tvm.testing.uses_gpu
def test_roi_align(executor_kind):
def verify_roi_align(
data_shape,
rois_shape,
channel,
in_size,
pooled_size,
spatial_scale,
sample_ratio,
mode,
layout,
ref_func,
):
data = relay.var("data", relay.ty.TensorType(data_shape, "float32"))
rois = relay.var("rois", relay.ty.TensorType(rois_shape, "float32"))
z = relay.vision.roi_align(
data,
rois,
pooled_size=(pooled_size, pooled_size),
spatial_scale=spatial_scale,
sample_ratio=sample_ratio,
mode=mode,
layout=layout,
)
zz = run_infer_type(z)
num_roi = rois_shape[0]
if layout == "NCHW":
assert zz.checked_type == relay.ty.TensorType(
(num_roi, channel, pooled_size, pooled_size), "float32"
)
else:
assert zz.checked_type == relay.ty.TensorType(
(num_roi, pooled_size, pooled_size, channel), "float32"
)
func = relay.Function([data, rois], z)
func = run_infer_type(func)
np_data = np.random.uniform(size=data_shape).astype("float32")
np_rois = np.random.uniform(size=rois_shape).astype("float32") * in_size
np_rois[:, 0] = np.random.randint(low=0, high=data_shape[0], size=num_roi)
ref_res = ref_func(
np_data,
np_rois,
pooled_size=pooled_size,
spatial_scale=spatial_scale,
sample_ratio=sample_ratio,
mode=mode,
)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
np_data, np_rois
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, atol=1e-6, rtol=1e-3)
def verify_roi_align_nchw(
data_shape, rois_shape, pooled_size, spatial_scale, sample_ratio, mode
):
_, channel, in_size, _ = data_shape
return verify_roi_align(
data_shape,
rois_shape,
channel,
in_size,
pooled_size,
spatial_scale,
sample_ratio,
mode,
"NCHW",
tvm.topi.testing.roi_align_nchw_python,
)
def verify_roi_align_nhwc(
data_shape, rois_shape, pooled_size, spatial_scale, sample_ratio, mode
):
_, in_size, _, channel = data_shape
return verify_roi_align(
data_shape,
rois_shape,
channel,
in_size,
pooled_size,
spatial_scale,
sample_ratio,
mode,
"NHWC",
tvm.topi.testing.roi_align_nhwc_python,
)
verify_roi_align_nchw(
(1, 4, 16, 16), (32, 5), pooled_size=7, spatial_scale=1.0, sample_ratio=-1, mode="avg"
)
verify_roi_align_nchw(
(4, 4, 16, 16), (32, 5), pooled_size=7, spatial_scale=0.5, sample_ratio=2, mode="avg"
)
verify_roi_align_nchw(
(1, 4, 16, 16), (32, 5), pooled_size=7, spatial_scale=1.0, sample_ratio=-1, mode="max"
)
verify_roi_align_nchw(
(4, 4, 16, 16), (32, 5), pooled_size=7, spatial_scale=0.5, sample_ratio=2, mode="max"
)
verify_roi_align_nhwc(
(1, 16, 16, 4), (32, 5), pooled_size=7, spatial_scale=1.0, sample_ratio=-1, mode="avg"
)
verify_roi_align_nhwc(
(4, 16, 16, 4), (32, 5), pooled_size=7, spatial_scale=0.5, sample_ratio=2, mode="avg"
)
verify_roi_align_nhwc(
(1, 16, 16, 4), (32, 5), pooled_size=7, spatial_scale=1.0, sample_ratio=-1, mode="max"
)
verify_roi_align_nhwc(
(4, 16, 16, 4), (32, 5), pooled_size=7, spatial_scale=0.5, sample_ratio=2, mode="max"
)
@tvm.testing.uses_gpu
def test_roi_pool(executor_kind):
def verify_roi_pool(data_shape, rois_shape, pooled_size, spatial_scale):
data = relay.var("data", relay.ty.TensorType(data_shape, "float32"))
rois = relay.var("rois", relay.ty.TensorType(rois_shape, "float32"))
z = relay.vision.roi_pool(
data,
rois,
pooled_size=(pooled_size, pooled_size),
spatial_scale=spatial_scale,
layout="NCHW",
)
zz = run_infer_type(z)
batch, channel, in_size, _ = data_shape
num_roi = rois_shape[0]
assert zz.checked_type == relay.ty.TensorType(
(num_roi, channel, pooled_size, pooled_size), "float32"
)
func = relay.Function([data, rois], z)
func = run_infer_type(func)
np_data = np.random.uniform(size=data_shape).astype("float32")
np_rois = np.random.uniform(size=rois_shape).astype("float32") * in_size
np_rois[:, 0] = np.random.randint(low=0, high=batch, size=num_roi).astype("float32")
ref_res = tvm.topi.testing.roi_pool_nchw_python(
np_data, np_rois, pooled_size=pooled_size, spatial_scale=spatial_scale
)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
np_data, np_rois
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-4)
verify_roi_pool((1, 4, 16, 16), (32, 5), pooled_size=7, spatial_scale=1.0)
verify_roi_pool((4, 4, 16, 16), (32, 5), pooled_size=7, spatial_scale=0.5)
@tvm.testing.uses_gpu
def test_proposal(executor_kind):
def verify_proposal(np_cls_prob, np_bbox_pred, np_im_info, np_out, attrs):
cls_prob = relay.var("cls_prob", relay.ty.TensorType(np_cls_prob.shape, "float32"))
bbox_pred = relay.var("bbox_pred", relay.ty.TensorType(np_bbox_pred.shape, "float32"))
im_info = relay.var("im_info", relay.ty.TensorType(np_im_info.shape, "float32"))
z = relay.vision.proposal(cls_prob, bbox_pred, im_info, **attrs)
zz = run_infer_type(z)
assert zz.checked_type == relay.ty.TensorType(np_out.shape, "float32")
func = relay.Function([cls_prob, bbox_pred, im_info], z)
func = run_infer_type(func)
for target in ["llvm", "cuda"]:
if not tvm.testing.device_enabled(target):
print("Skip test because %s is not enabled." % target)
continue
dev = tvm.device(target, 0)
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
np_cls_prob, np_bbox_pred, np_im_info
)
tvm.testing.assert_allclose(op_res.numpy(), np_out, rtol=1e-4)
attrs = {
"scales": (0.5,),
"ratios": (0.5,),
"feature_stride": 16,
"iou_loss": False,
"rpn_min_size": 16,
"threshold": 0.7,
"rpn_pre_nms_top_n": 200,
"rpn_post_nms_top_n": 4,
}
np_cls_prob = np.array(
[
[
[[0.3, 0.6, 0.2], [0.4, 0.7, 0.5], [0.1, 0.4, 0.3]],
[[0.7, 0.5, 0.3], [0.6, 0.4, 0.8], [0.9, 0.2, 0.5]],
]
],
dtype="float32",
)
np_bbox_pred = np.array(
[
[
[[0.5, 1.0, 0.6], [0.8, 1.2, 2.0], [0.9, 1.0, 0.8]],
[[0.5, 1.0, 0.7], [0.8, 1.2, 1.6], [2.1, 1.5, 0.7]],
[[1.0, 0.5, 0.7], [1.5, 0.9, 1.6], [1.4, 1.5, 0.8]],
[[1.0, 0.5, 0.6], [1.5, 0.9, 2.0], [1.8, 1.0, 0.9]],
]
],
dtype="float32",
)
np_im_info = np.array([[48.0, 48.0, 1.0]], dtype="float32")
np_out = np.array(
[
[0.0, 0.0, 2.8451548, 28.38012, 18.154846],
[0.0, 0.0, 15.354933, 41.96971, 41.245064],
[0.0, 18.019852, 1.0538368, 51.98015, 25.946163],
[0.0, 27.320923, -1.266357, 55.0, 24.666357],
],
dtype="float32",
)
verify_proposal(np_cls_prob, np_bbox_pred, np_im_info, np_out, attrs)
np_out = np.array(
[
[0.0, -5.25, -2.5, 21.75, 19.0],
[0.0, 11.25, -2.0, 37.25, 18.5],
[0.0, 26.849998, -2.3000002, 53.45, 18.6],
[0.0, -4.95, 13.799999, 22.25, 35.5],
],
dtype="float32",
)
attrs["iou_loss"] = True
verify_proposal(np_cls_prob, np_bbox_pred, np_im_info, np_out, attrs)
def test_yolo_reorg_infer_shape():
def verify_yolo_reorg(shape, stride, out_shape):
x = relay.var("x", relay.TensorType(shape, "float32"))
z = relay.vision.yolo_reorg(x, stride=stride)
zz = run_infer_type(z)
assert "stride=" in z.astext()
assert zz.checked_type == relay.ty.TensorType(out_shape, "float32")
n, c, h, w = te.size_var("n"), te.size_var("c"), te.size_var("h"), te.size_var("w")
idxd = tvm.tir.indexdiv
verify_yolo_reorg((n, c, 20, 20), 10, (n, c * 10 * 10, 2, 2))
verify_yolo_reorg((n, c, h, w), 2, (n, c * 2 * 2, idxd(h, 2), idxd(w, 2)))
@tvm.testing.uses_gpu
def test_yolo_reorg(executor_kind):
def verify_yolo_reorg(shape, stride):
x_data = np.random.uniform(low=-1, high=1, size=shape).astype("float32")
ref_res = tvm.topi.testing.reorg_python(x_data, stride)
x = relay.var("x", relay.TensorType(shape, "float32"))
z = relay.vision.yolo_reorg(x, stride=stride)
zz = run_infer_type(z)
assert "stride=" in z.astext()
assert zz.checked_type == relay.ty.TensorType(ref_res.shape, "float32")
func = relay.Function([x], z)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
verify_yolo_reorg((1, 100, 20, 20), 10)
verify_yolo_reorg((1, 4, 6, 6), 2)
class TestDeformableConv2D:
batch, in_channel, size, out_channel, deformable_groups = tvm.testing.parameters(
(1, 4, 16, 4, 4),
(2, 4, 16, 4, 1),
)
kernel_size = tvm.testing.parameter((3, 3))
groups = tvm.testing.parameter(1, 2)
layout = tvm.testing.parameter("NCHW", "NHWC")
dtype = tvm.testing.parameter("float32")
@tvm.testing.fixture
def data_shape(self, layout, batch, in_channel, size):
if layout == "NCHW":
return (batch, in_channel, size, size)
elif layout == "NHWC":
return (batch, size, size, in_channel)
@tvm.testing.fixture
def kernel_shape(self, layout, in_channel, out_channel, groups, kernel_size):
if layout == "NCHW":
return (out_channel, in_channel // groups, kernel_size[0], kernel_size[1])
elif layout == "NHWC":
return (kernel_size[0], kernel_size[1], in_channel // groups, out_channel)
@tvm.testing.fixture
def out_shape(self, layout, batch, out_channel, size):
if layout == "NCHW":
return (batch, out_channel, size, size)
elif layout == "NHWC":
return (batch, size, size, out_channel)
@tvm.testing.fixture
def offset_shape(self, layout, batch, kernel_size, deformable_groups, out_shape):
if layout == "NCHW":
return (
batch,
2 * kernel_size[0] * kernel_size[1] * deformable_groups,
out_shape[2],
out_shape[3],
)
elif layout == "NHWC":
return (
batch,
out_shape[1],
out_shape[2],
2 * kernel_size[0] * kernel_size[1] * deformable_groups,
)
@tvm.testing.fixture
def kernel_layout(self, layout):
return {"NCHW": "OIHW", "NHWC": "HWIO"}[layout]
@tvm.testing.fixture
def relay_setup(
self,
dtype,
data_shape,
layout,
kernel_layout,
kernel_size,
deformable_groups,
groups,
out_channel,
):
data = relay.var("data", shape=data_shape, dtype=dtype)
offset = relay.var("offset", dtype=dtype)
kernel = relay.var("kernel", dtype=dtype)
expr = relay.nn.deformable_conv2d(
data,
offset,
kernel,
strides=(1, 1),
padding=(1, 1),
dilation=(1, 1),
data_layout=layout,
kernel_layout=kernel_layout,
kernel_size=kernel_size,
deformable_groups=deformable_groups,
groups=groups,
channels=out_channel,
)
func = relay.Function([data, offset, kernel], expr)
return expr, func
def test_infer_type(self, relay_setup, out_shape, offset_shape, kernel_shape):
expr, func = relay_setup
yy = run_infer_type(expr)
assert yy.checked_type == relay.TensorType(out_shape), yy.checked_type
assert yy.args[1].checked_type == relay.TensorType(offset_shape), yy.args[1].checked_type
assert yy.args[2].checked_type == relay.TensorType(kernel_shape), yy.args[2].checked_type
# The reference python implementation only supports groups==1.
@pytest.mark.parametrize("groups", [1])
def test_run(
self,
target,
dev,
dtype,
executor_kind,
data_shape,
offset_shape,
kernel_shape,
relay_setup,
deformable_groups,
groups,
layout,
):
target = tvm.target.Target(target)
if layout == "NHWC" and target.kind.name != "llvm":
pytest.xfail("Can only run NHWC layout on llvm")
expr, func = relay_setup
data = np.random.uniform(size=data_shape).astype(dtype)
offset = np.random.uniform(size=offset_shape).astype(dtype)
kernel = np.random.uniform(size=kernel_shape).astype(dtype)
if layout == "NCHW":
ref_res = tvm.topi.testing.deformable_conv2d_nchw_python(
data,
offset,
kernel,
stride=(1, 1),
padding=(1, 1),
dilation=(1, 1),
deformable_groups=deformable_groups,
groups=groups,
)
else:
ref_res = tvm.topi.testing.deformable_conv2d_nhwc_python(
data,
offset,
kernel,
stride=(1, 1),
padding=(1, 1),
dilation=(1, 1),
deformable_groups=deformable_groups,
groups=groups,
)
op_res1 = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
data, offset, kernel
)
tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5, atol=1e-5)
@tvm.testing.uses_gpu
def test_depth_to_space(executor_kind):
def verify_depth_to_space(dshape, block_size, layout, mode):
if layout == "NHWC":
out_shape = [
dshape[0],
dshape[1] * block_size,
dshape[2] * block_size,
dshape[3] / (block_size * block_size),
]
else:
out_shape = [
dshape[0],
dshape[1] / (block_size * block_size),
dshape[2] * block_size,
dshape[3] * block_size,
]
x_data = np.random.uniform(size=dshape).astype("float32")
if layout == "NHWC":
x_data = np.transpose(x_data, axes=[0, 3, 1, 2])
ref_res = tvm.topi.testing.depth_to_space_python(x_data, block_size, mode=mode)
if layout == "NHWC":
x_data = np.transpose(x_data, axes=[0, 2, 3, 1])
ref_res = np.transpose(ref_res, axes=[0, 2, 3, 1])
x = relay.var("x", relay.TensorType(dshape, "float32"))
z = relay.nn.depth_to_space(x, block_size, layout, mode)
assert "block_size=" in z.astext()
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType(ref_res.shape, "float32")
func = relay.Function([x], z)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-4)
for layout in ["NHWC", "NCHW"]:
for mode in ["DCR", "CDR"]:
verify_depth_to_space((1, 4, 4, 4), 2, layout, mode)
@tvm.testing.uses_gpu
def test_space_to_depth(executor_kind):
def verify_space_to_depth(dshape, block_size, layout):
if layout == "NHWC":
out_shape = [
dshape[0],
dshape[1] / block_size,
dshape[2] / block_size,
dshape[3] * (block_size * block_size),
]
else:
out_shape = [
dshape[0],
dshape[1] * (block_size * block_size),
dshape[2] / block_size,
dshape[3] / block_size,
]
x_data = np.random.uniform(size=dshape).astype("float32")
if layout == "NHWC":
x_data = np.transpose(x_data, axes=[0, 3, 1, 2])
ref_res = tvm.topi.testing.space_to_depth_python(x_data, block_size)
if layout == "NHWC":
x_data = np.transpose(x_data, axes=[0, 2, 3, 1])
ref_res = np.transpose(ref_res, axes=[0, 2, 3, 1])
x = relay.var("x", relay.TensorType(dshape, "float32"))
z = relay.nn.space_to_depth(x, block_size, layout)
assert "block_size=" in z.astext()
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType(ref_res.shape, "float32")
func = relay.Function([x], z)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-4)
for layout in ["NHWC", "NCHW"]:
verify_space_to_depth((1, 4, 4, 4), 2, layout)
def test_dilation2d_infer_type():
# symbolic in batch dimension
n, h, w, c = te.var("n"), 224, 224, 10
x = relay.var("x", relay.ty.TensorType((n, c, h, w), "float32"))
kc, kh, kw = 10, 8, 8
w = relay.var("w", relay.ty.TensorType((kc, kw, kh), "float32"))
y = relay.image.dilation2d(
x,
w,
# kernel_size=(3, 3),
strides=[1, 1, 1, 1],
dilations=[1, 1, 1, 1],
padding=[0, 0, 0, 0],
)
yy = run_infer_type(y)
assert yy.checked_type == relay.TensorType((n, 10, 217, 217), "float32")
class TestDilation2DRun:
data_layout, kernel_layout = tvm.testing.parameters(("NCHW", "IHW"), ("NHWC", "HWI"))
dtype = tvm.testing.parameter("float32")
config = tvm.testing.parameter(
dict(
image=[[[[0.1], [0.2]], [[0.3], [0.4]]]],
kernel=[[[0.4], [0.3]], [[0.1], [0.0]]],
out=[[[[0.5]]]],
),
dict(
image=[[[[0.1], [0.2]], [[0.3], [0.4]]]],
kernel=[[[0.4], [0.3]], [[0.1], [0.0]]],
out=[[[[0.5], [0.6]], [[0.7], [0.8]]]],
padding=[0, 0, 1, 1],
),
dict(
image=[[[[0.1, 0.2, 0.0], [0.2, 0.3, 0.1]], [[0.3, 0.4, 0.2], [0.4, 0.5, 0.3]]]],
kernel=[[[0.4, 0.5, 0.3], [0.3, 0.4, 0.2]], [[0.1, 0.2, 0.0], [0.0, 0.1, -0.1]]],
out=[[[[0.5, 0.7, 0.3], [0.6, 0.8, 0.4]], [[0.7, 0.9, 0.5], [0.8, 1.0, 0.6]]]],
padding=[0, 0, 1, 1],
),
dict(
image=[[[[0.1], [0.2]], [[0.3], [0.4]]], [[[0.2], [0.3]], [[0.4], [0.5]]]],
kernel=[[[0.4], [0.3]], [[0.1], [0.0]]],
out=[[[[0.5], [0.6]], [[0.7], [0.8]]], [[[0.6], [0.7]], [[0.8], [0.9]]]],
padding=[0, 0, 1, 1],
),
dict(
image=[[[[0.1], [0.2]], [[0.3], [0.4]]]],
kernel=[[[0.4], [0.3]]],
out=[[[[0.5]], [[0.7]]]],
),
dict(
image=[[[[0.1], [0.2], [0.3]], [[0.4], [0.5], [0.6]], [[0.7], [0.8], [0.9]]]],
kernel=[[[0.4], [0.3]], [[0.1], [0.2]]],
out=[[[[0.7], [0.8], [0.6]], [[1.0], [1.1], [0.9]], [[0.8], [0.9], [0.9]]]],
padding=[1, 1],
dilations=[2, 2],
),
dict(
image=[
[
[[0.1], [0.2], [0.3], [0.4]],
[[0.5], [0.6], [0.7], [0.8]],
[[0.9], [1.0], [1.1], [1.2]],
]
],
kernel=[[[0.4], [0.3]], [[0.1], [0.2]]],
out=[[[[0.8], [1.0]], [[1.2], [1.4]]]],
strides=[1, 2],
),
)
@tvm.testing.fixture
def test_case(self, config, data_layout, dtype):
indata = np.array(config["image"], dtype=dtype)
kernel = np.array(config["kernel"], dtype=dtype)
out = np.array(config["out"], dtype=dtype)
if data_layout == "NHWC":
pass
elif data_layout == "NCHW":
indata = indata.transpose([0, 3, 1, 2])
kernel = kernel.transpose([2, 0, 1])
out = out.transpose([0, 3, 1, 2])
else:
raise ValueError(f"Unsupported layout '{data_layout}'")
return indata, kernel, out
@tvm.testing.parametrize_targets("llvm")
def test_dilation2d(
self,
target,
dev,
test_case,
dtype,
config,
data_layout,
kernel_layout,
):
strides = config.get("strides", [1, 1])
padding = config.get("padding", [0, 0])
dilations = config.get("dilations", [1, 1])
indata, kernel, out = test_case
x = relay.var("x", shape=indata.shape, dtype=dtype)
w = relay.var("w", shape=kernel.shape, dtype=dtype)
y = relay.image.dilation2d(
x,
w,
strides=strides,
dilations=dilations,
padding=padding,
data_layout=data_layout,
kernel_layout=kernel_layout,
)
func = relay.Function([x, w], y)
op_res = relay.create_executor("graph", device=dev, target=target).evaluate(func)(
indata, kernel
)
tvm.testing.assert_allclose(op_res.numpy(), out, rtol=1e-5, atol=1e-5)
@tvm.testing.uses_gpu
def test_affine_grid(executor_kind):
def verify_affine_grid(num_batch, target_shape):
dtype = "float32"
data_shape = (num_batch, 2, 3)
data = relay.var("data", relay.ty.TensorType(data_shape, dtype))
y = relay.image.affine_grid(data, target_shape)
yy = run_infer_type(y)
assert yy.checked_type == relay.ty.TensorType(
(num_batch, len(target_shape), *target_shape), dtype
)
func = relay.Function([data], y)
data_np = np.random.uniform(size=data_shape).astype(dtype)
ref_res = tvm.topi.testing.affine_grid_python(data_np, target_shape)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
data_np
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5, atol=1e-5)
verify_affine_grid(1, (16, 32))
verify_affine_grid(4, (16, 32))
@tvm.testing.uses_gpu
def test_grid_sample(executor_kind):
def verify_grid_sample(
data_shape, grid_shape, method="bilinear", padding_mode="zeros", align_corners=True
):
dtype = "float32"
data = relay.var("data", relay.ty.TensorType(data_shape, dtype))
grid = relay.var("grid", relay.ty.TensorType(grid_shape, dtype))
if len(data_shape) == 4:
layout = "NCHW"
batch, channel, _, _ = data_shape
_, _, out_height, out_width = grid_shape
tensor_type = relay.TensorType((batch, channel, out_height, out_width), dtype)
else: # len(data_shape) == 5:
layout = "NCDHW"
batch, channel, _, _, _ = data_shape
_, _, out_depth, out_height, out_width = grid_shape
tensor_type = relay.TensorType(
(batch, channel, out_depth, out_height, out_width), dtype
)
y = relay.image.grid_sample(
data,
grid,
method=method,
layout=layout,
padding_mode=padding_mode,
align_corners=align_corners,
)
yy = run_infer_type(y)
assert yy.checked_type == tensor_type
func = relay.Function([data, grid], y)
data_np = np.random.uniform(size=data_shape).astype(dtype)
grid_np = np.random.uniform(size=grid_shape, low=-1.5, high=1.5).astype(dtype)
ref_res = tvm.topi.testing.grid_sample_python(
data_np, grid_np, method, layout, padding_mode, align_corners
)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
data_np, grid_np
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5, atol=1e-5)
methods = ["nearest", "bilinear", "bicubic"]
padding_modes = ["zeros", "border", "reflection"]
align_corners = [True, False]
data_2D_shape = (4, 4, 8, 8)
grid_2D_shape = (4, 2, 16, 16)
# choosing smaller sizes to be testable on weaker GPUs
data_3D_shape = (4, 4, 4, 4, 4)
grid_3D_shape = (4, 3, 8, 8, 8)
for _method in methods:
for _padding in padding_modes:
for _align in align_corners:
verify_grid_sample(data_2D_shape, grid_2D_shape, _method, _padding, _align)
# 3D "bicubic"(tricubic) is not supported in pytorch
if _method != "bicubic":
verify_grid_sample(data_3D_shape, grid_3D_shape, _method, _padding, _align)
@tvm.testing.uses_gpu
def test_space_to_batch_nd(executor_kind):
def verify_space_to_batch_nd(dshape, block_shape, paddings):
x_data = np.random.uniform(size=dshape).astype("float32")
pad_before, pad_after = map(list, zip(*paddings))
ref_res = tvm.topi.testing.space_to_batch_nd_python(
x_data, block_shape, pad_before, pad_after
)
x = relay.var("x", relay.TensorType(dshape, "float32"))
z = relay.nn.space_to_batch_nd(x, block_shape, paddings)
assert "block_shape=" in z.astext()
assert "paddings=" in z.astext()
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType(ref_res.shape, "float32")
func = relay.Function([x], z)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-4)
verify_space_to_batch_nd([3, 3, 2, 1], [3], [[0, 0]])
verify_space_to_batch_nd([2, 2, 4, 1], [2, 2], [[0, 0], [2, 0]])
@tvm.testing.uses_gpu
def test_batch_to_space_nd(executor_kind):
def verify_batch_to_space_nd(dshape, block_shape, crops):
x_data = np.random.uniform(size=dshape).astype("float32")
crop_begin_list, crop_end_list = map(list, zip(*crops))
ref_res = tvm.topi.testing.batch_to_space_nd_python(
x_data, block_shape, crop_begin_list, crop_end_list
)
x = relay.var("x", relay.TensorType(dshape, "float32"))
z = relay.nn.batch_to_space_nd(x, block_shape, crops)
assert "block_shape=" in z.astext()
assert "crops=" in z.astext()
zz = run_infer_type(z)
assert zz.checked_type == relay.TensorType(ref_res.shape, "float32")
func = relay.Function([x], z)
for target, dev in tvm.testing.enabled_targets():
op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(
x_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-4)
verify_batch_to_space_nd([4, 1, 1, 3], [2, 2], [[0, 0], [0, 0]])
verify_batch_to_space_nd([8, 1, 3, 1], [2, 2], [[0, 0], [2, 0]])
@tvm.testing.uses_gpu
def test_all_class_non_max_suppression(executor_kind):
def verify_all_class_non_max_suppression(
boxes_np,
scores_np,
max_output_boxes_per_class,
iou_threshold,
score_threshold,
expected_indices,
):
boxes = relay.var("boxes", relay.ty.TensorType(boxes_np.shape, "float32"))
scores = relay.var("scores", relay.ty.TensorType(scores_np.shape, "float32"))
out = relay.vision.all_class_non_max_suppression(
boxes,
scores,
max_output_boxes_per_class,
iou_threshold,
score_threshold,
)
func = relay.Function([boxes, scores], out.astuple())
func = run_infer_type(func)
for target, dev in tvm.testing.enabled_targets():
selected_indices, num_detections = relay.create_executor(
executor_kind, device=dev, target=target
).evaluate(func)(boxes_np, scores_np)
tvm_res = selected_indices.numpy()[: num_detections.numpy()[0]]
np.testing.assert_equal(tvm_res, expected_indices)
boxes = np.array(
[
[
[0.0, 0.0, 0.3, 0.3],
[0.0, 0.0, 0.4, 0.4],
[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.9, 0.9],
[0.5, 0.5, 1.0, 1.0],
],
[
[0.0, 0.0, 0.3, 0.3],
[0.0, 0.0, 0.4, 0.4],
[0.5, 0.5, 0.95, 0.95],
[0.5, 0.5, 0.96, 0.96],
[0.5, 0.5, 1.0, 1.0],
],
]
).astype("float32")
scores = np.array(
[
[[0.1, 0.2, 0.6, 0.3, 0.9], [0.1, 0.2, 0.6, 0.3, 0.9]],
[[0.1, 0.2, 0.6, 0.3, 0.9], [0.1, 0.2, 0.6, 0.3, 0.9]],
]
).astype("float32")
max_output_boxes_per_class = 2
iou_threshold = 0.8
score_threshold = 0.0
expected = np.array(
[[0, 0, 4], [0, 0, 2], [0, 1, 4], [0, 1, 2], [1, 0, 4], [1, 0, 1], [1, 1, 4], [1, 1, 1]]
)
verify_all_class_non_max_suppression(
boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, expected
)
boxes = np.array(
[
[
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.1, 1.0, 1.1],
[0.0, -0.1, 1.0, 0.9],
[0.0, 10.0, 1.0, 11.0],
[0.0, 10.1, 1.0, 11.1],
[0.0, 100.0, 1.0, 101.0],
]
]
).astype(np.float32)
scores = np.array([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)
max_output_boxes_per_class = 3
iou_threshold = 0.5
score_threshold = 0.4
expected = np.array([[0, 0, 3], [0, 0, 0]])
verify_all_class_non_max_suppression(
boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, expected
)
if __name__ == "__main__":
tvm.testing.main()
| 56,875 | 34.793581 | 100 | py |
tvm | tvm-main/tests/python/relay/collage/menangerie.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""A collection of Relay models for exercising Collage."""
import tvm
import onnx
import numpy as np
import logging
import tvm.contrib.target.onnx
MODEL_PREFIX = "/home/mbs/gauntlet/models/"
MNIST = {
"name": "mnist",
"filename": "mnist-8.onnx",
"input_shapes": {"Input3": [1, 1, 28, 28]},
"input_dtypes": {"Input3": "float32"},
"main_dtype": "float32",
}
GPT2 = {
"name": "gpt2",
"filename": "gpt2.onnx",
"input_shapes": {"input1": [1, 50, 32]},
"input_dtypes": {"input1": "int64"},
"main_dtype": "float32",
}
RESNET50V2 = {
"name": "resnet50",
"filename": "resnet50-v2-7.onnx",
"input_shapes": {"data": [1, 3, 224, 224]},
"input_dtypes": {"data": "float32"},
"main_dtype": "float32",
}
MOBILENETV2 = {
"name": "mobilenet",
"filename": "mobilenetv2-1.0.onnx",
"input_shapes": {"data": [1, 3, 224, 224]},
"input_dtypes": {"data": "float32"},
"main_dtype": "float32",
}
# Note that resnext50_32_4d below was extracted directly from the pytorch model and not from any onnx file.
RESNEXT50_32_4d = {
"name": "resnext50_32_4d",
"filename": "resnext50_32x4d.onnx",
"input_shapes": {"x": [1, 64, 56, 56]},
"input_dtypes": {"x": "float32"},
"main_dtype": "float32",
}
def make_const(dtype, shape):
return tvm.relay.const(np.random.rand(*shape).astype(dtype))
def make_consts(dtype, shapes):
return [make_const(dtype, shape) for shape in shapes]
def mnist_consts(dtype):
return make_consts(
dtype,
[
(8, 1, 5, 5), # 0
(8, 1, 1), # 1
(16, 8, 5, 5), # 2
(16, 1, 1), # 3
(10, 256), # 4
(1, 10), # 5
],
)
def mnist():
metatable = {"relay.Constant": mnist_consts("float32")}
mod = tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%x: Tensor[(1, 1, 28, 28), float32]) -> Tensor[(1, 10), float32] {
%0 = nn.pad(%x, 0f, pad_width=[[0, 0], [0, 0], [2, 2], [2, 2]]);
%1 = nn.conv2d(%0, meta[relay.Constant][0], padding=[0, 0, 0, 0], channels=8, kernel_size=[5, 5]);
%2 = add(%1, meta[relay.Constant][1]);
%3 = nn.relu(%2);
%4 = nn.max_pool2d(%3, pool_size=[2, 2], strides=[2, 2], padding=[0, 0, 0, 0]);
%5 = nn.pad(%4, 0f, pad_width=[[0, 0], [0, 0], [2, 2], [2, 2]]);
%6 = nn.conv2d(%5, meta[relay.Constant][2], padding=[0, 0, 0, 0], channels=16, kernel_size=[5, 5]);
%7 = add(%6, meta[relay.Constant][3]);
%8 = nn.relu(%7);
%9 = nn.max_pool2d(%8, pool_size=[3, 3], strides=[3, 3], padding=[0, 0, 0, 0]);
%10 = reshape(%9, newshape=[1, 256]);
%11 = nn.dense(%10, meta[relay.Constant][4], units=None, out_dtype="float32");
add(%11, meta[relay.Constant][5])
}
""",
"from_string",
None,
metatable,
)
return {
"name": "mnist",
"input_shapes": {"x": [1, 1, 28, 28]},
"input_dtypes": {"x": "float32"},
"mod": mod,
"params": None,
"main_dtype": "float32",
}
def gpt2_consts(dtype):
return make_consts(
dtype,
[
(50257, 768), # 0
(1, 32, 768), # 1
(768,), # 2
(768,), # 3
(2304, 768), # 4
(2304,), # 5
(1, 1, 32, 32), # 6
(1, 1, 32, 32), # 7
(768, 768), # 8
(768,), # 9
(768,), # 10
(768,), # 11
(3072, 768), # 12
(3072,), # 13
(768, 3072), # 14
(768,), # 15
(768,), # 16
(768,), # 17
(2304, 768), # 18
(2304,), # 19
(1, 1, 32, 32), # 20
(1, 1, 32, 32), # 21
(768, 768), # 22
(768,), # 23
(768,), # 24
(768,), # 25
(3072, 768), # 26
(3072,), # 27
(768, 3072), # 28
(768,), # 29
(768,), # 30
(768,), # 31
(2304, 768), # 32
(2304,), # 33
(1, 1, 32, 32), # 34
(1, 1, 32, 32), # 35
(768, 768), # 36
(768,), # 37
(768,), # 38
(768,), # 39
(3072, 768), # 40
(3072,), # 41
(768, 3072), # 42
(768,), # 43
(768,), # 44
(768,), # 45
(2304, 768), # 46
(2304,), # 47
(1, 1, 32, 32), # 48
(1, 1, 32, 32), # 49
(768, 768), # 50
(768,), # 51
(768,), # 52
(768,), # 53
(3072, 768), # 54
(3072,), # 55
(768, 3072), # 56
(768,), # 57
(768,), # 58
(768,), # 59
(2304, 768), # 60
(2304,), # 61
(1, 1, 32, 32), # 62
(1, 1, 32, 32), # 63
(768, 768), # 64
(768,), # 65
(768,), # 66
(768,), # 67
(3072, 768), # 68
(3072,), # 69
(768, 3072), # 70
(768,), # 71
(768,), # 72
(768,), # 73
(2304, 768), # 74
(2304,), # 75
(1, 1, 32, 32), # 76
(1, 1, 32, 32), # 77
(768, 768), # 78
(768,), # 79
(768,), # 80
(768,), # 81
(3072, 768), # 82
(3072,), # 83
(768, 3072), # 84
(768,), # 85
(768,), # 86
(768,), # 87
(2304, 768), # 88
(2304,), # 89
(1, 1, 32, 32), # 90
(1, 1, 32, 32), # 91
(768, 768), # 92
(768,), # 93
(768,), # 94
(768,), # 95
(3072, 768), # 96
(3072,), # 97
(768, 3072), # 98
(768,), # 99
(768,), # 100
(768,), # 101
(2304, 768), # 102
(2304,), # 103
(1, 1, 32, 32), # 104
(1, 1, 32, 32), # 105
(768, 768), # 106
(768,), # 107
(768,), # 108
(768,), # 109
(3072, 768), # 110
(3072,), # 111
(768, 3072), # 112
(768,), # 113
(768,), # 114
(768,), # 115
(2304, 768), # 116
(2304,), # 117
(1, 1, 32, 32), # 118
(1, 1, 32, 32), # 119
(768, 768), # 120
(768,), # 121
(768,), # 122
(768,), # 123
(3072, 768), # 124
(3072,), # 125
(768, 3072), # 126
(768,), # 127
(768,), # 128
(768,), # 129
(2304, 768), # 130
(2304,), # 131
(1, 1, 32, 32), # 132
(1, 1, 32, 32), # 133
(768, 768), # 134
(768,), # 135
(768,), # 136
(768,), # 137
(3072, 768), # 138
(3072,), # 139
(768, 3072), # 140
(768,), # 141
(768,), # 142
(768,), # 143
(2304, 768), # 144
(2304,), # 145
(1, 1, 32, 32), # 146
(1, 1, 32, 32), # 147
(768, 768), # 148
(768,), # 149
(768,), # 150
(768,), # 151
(3072, 768), # 152
(3072,), # 153
(768, 3072), # 154
(768,), # 155
(768,), # 156
(768,), # 157
(2304, 768), # 158
(2304,), # 159
(1, 1, 32, 32), # 160
(1, 1, 32, 32), # 161
(768, 768), # 162
(768,), # 163
(768,), # 164
(768,), # 165
(3072, 768), # 166
(3072,), # 167
(768, 3072), # 168
(768,), # 169
(768,), # 170
(768,), # 171
],
)
def gpt2():
metatable = {"relay.Constant": gpt2_consts("float32")}
mod = tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%x: Tensor[(1, 50, 32), int64]) -> (Tensor[(1, 50, 32, 768), float32],
Tensor[(2, 50, 12, 32, 64), float32],
Tensor[(2, 50, 12, 32, 64), float32],
Tensor[(2, 50, 12, 32, 64), float32],
Tensor[(2, 50, 12, 32, 64), float32],
Tensor[(2, 50, 12, 32, 64), float32],
Tensor[(2, 50, 12, 32, 64), float32],
Tensor[(2, 50, 12, 32, 64), float32],
Tensor[(2, 50, 12, 32, 64), float32],
Tensor[(2, 50, 12, 32, 64), float32],
Tensor[(2, 50, 12, 32, 64), float32],
Tensor[(2, 50, 12, 32, 64), float32],
Tensor[(2, 50, 12, 32, 64), float32]) {
%0 = reshape(%x, newshape=[-1, 32]);
%1 = less(%0, 0i64);
%2 = add(%0, 50257i64);
%3 = where(%1, %2, %0);
%4 = take(meta[relay.Constant][0], %3, axis=0);
%5 = add(%4, meta[relay.Constant][1]);
%6 = mean(%5, axis=[-1], keepdims=True);
%7 = subtract(%5, %6);
%8 = power(%7, 2f);
%9 = mean(%8, axis=[-1], keepdims=True);
%10 = add(%9, 1e-05f);
%11 = sqrt(%10);
%12 = divide(%7, %11);
%13 = multiply(%12, meta[relay.Constant][2]);
%14 = add(%13, meta[relay.Constant][3]);
%15 = reshape(%14, newshape=[-1, 768]);
%16 = nn.dense(%15, meta[relay.Constant][4], units=2304);
%17 = add(%16, meta[relay.Constant][5]);
%18 = reshape(%17, newshape=[50, 32, 2304]);
%19 = split(%18, indices_or_sections=[768, 1536], axis=2);
%20 = %19.0;
%21 = reshape(%20, newshape=[50, 32, 12, 64]);
%22 = transpose(%21, axes=[0, 2, 1, 3]);
%23 = %19.1;
%24 = reshape(%23, newshape=[50, 32, 12, 64]);
%25 = transpose(%24, axes=[0, 2, 3, 1]);
%26 = reshape(%25, newshape=[-1, 64, 32]);
%27 = reshape(%22, newshape=[-1, 32, 64]);
%28 = transpose(%26, axes=[0, 2, 1]);
%29 = nn.batch_matmul(%27, %28, out_dtype="float32", transpose_b=True);
%30 = reshape(%29, newshape=[50, 12, 32, 32]);
%31 = divide(%30, 8f);
%32 = multiply(%31, meta[relay.Constant][6]);
%33 = subtract(%32, meta[relay.Constant][7]);
%34 = nn.softmax(%33, axis=3);
%35 = %19.2;
%36 = reshape(%35, newshape=[50, 32, 12, 64]);
%37 = transpose(%36, axes=[0, 2, 1, 3]);
%38 = reshape(%37, newshape=[-1, 32, 64]);
%39 = reshape(%34, newshape=[-1, 32, 32]);
%40 = transpose(%38, axes=[0, 2, 1]);
%41 = nn.batch_matmul(%39, %40, out_dtype="float32", transpose_b=True);
%42 = reshape(%41, newshape=[50, 12, 32, 64]);
%43 = transpose(%42, axes=[0, 2, 1, 3]);
%44 = reshape(%43, newshape=[50, 32, 768]);
%45 = reshape(%44, newshape=[-1, 768]);
%46 = nn.dense(%45, meta[relay.Constant][8], units=768);
%47 = add(%46, meta[relay.Constant][9]);
%48 = reshape(%47, newshape=[50, 32, 768]);
%49 = add(%5, %48);
%50 = mean(%49, axis=[-1], keepdims=True);
%51 = subtract(%49, %50);
%52 = power(%51, 2f);
%53 = mean(%52, axis=[-1], keepdims=True);
%54 = add(%53, 1e-05f);
%55 = sqrt(%54);
%56 = divide(%51, %55);
%57 = multiply(%56, meta[relay.Constant][10]);
%58 = add(%57, meta[relay.Constant][11]);
%59 = reshape(%58, newshape=[-1, 768]);
%60 = nn.dense(%59, meta[relay.Constant][12], units=3072);
%61 = add(%60, meta[relay.Constant][13]);
%62 = reshape(%61, newshape=[50, 32, 3072]);
%63 = power(%62, 3f);
%64 = multiply(%63, 0.044715f);
%65 = add(%62, %64);
%66 = multiply(%65, 0.797885f);
%67 = tanh(%66);
%68 = multiply(%62, 0.5f);
%69 = add(%67, 1f);
%70 = multiply(%68, %69);
%71 = reshape(%70, newshape=[-1, 3072]);
%72 = nn.dense(%71, meta[relay.Constant][14], units=768);
%73 = add(%72, meta[relay.Constant][15]);
%74 = reshape(%73, newshape=[50, 32, 768]);
%75 = add(%49, %74);
%76 = mean(%75, axis=[-1], keepdims=True);
%77 = subtract(%75, %76);
%78 = power(%77, 2f);
%79 = mean(%78, axis=[-1], keepdims=True);
%80 = add(%79, 1e-05f);
%81 = sqrt(%80);
%82 = divide(%77, %81);
%83 = multiply(%82, meta[relay.Constant][16]);
%84 = add(%83, meta[relay.Constant][17]);
%85 = reshape(%84, newshape=[-1, 768]);
%86 = nn.dense(%85, meta[relay.Constant][18], units=2304);
%87 = add(%86, meta[relay.Constant][19]);
%88 = reshape(%87, newshape=[50, 32, 2304]);
%89 = split(%88, indices_or_sections=[768, 1536], axis=2);
%90 = %89.0;
%91 = reshape(%90, newshape=[50, 32, 12, 64]);
%92 = transpose(%91, axes=[0, 2, 1, 3]);
%93 = %89.1;
%94 = reshape(%93, newshape=[50, 32, 12, 64]);
%95 = transpose(%94, axes=[0, 2, 3, 1]);
%96 = reshape(%95, newshape=[-1, 64, 32]);
%97 = reshape(%92, newshape=[-1, 32, 64]);
%98 = transpose(%96, axes=[0, 2, 1]);
%99 = nn.batch_matmul(%97, %98, out_dtype="float32", transpose_b=True);
%100 = reshape(%99, newshape=[50, 12, 32, 32]);
%101 = divide(%100, 8f);
%102 = multiply(%101, meta[relay.Constant][20]);
%103 = subtract(%102, meta[relay.Constant][21]);
%104 = nn.softmax(%103, axis=3);
%105 = %89.2;
%106 = reshape(%105, newshape=[50, 32, 12, 64]);
%107 = transpose(%106, axes=[0, 2, 1, 3]);
%108 = reshape(%107, newshape=[-1, 32, 64]);
%109 = reshape(%104, newshape=[-1, 32, 32]);
%110 = transpose(%108, axes=[0, 2, 1]);
%111 = nn.batch_matmul(%109, %110, out_dtype="float32", transpose_b=True);
%112 = reshape(%111, newshape=[50, 12, 32, 64]);
%113 = transpose(%112, axes=[0, 2, 1, 3]);
%114 = reshape(%113, newshape=[50, 32, 768]);
%115 = reshape(%114, newshape=[-1, 768]);
%116 = nn.dense(%115, meta[relay.Constant][22], units=768);
%117 = add(%116, meta[relay.Constant][23]);
%118 = reshape(%117, newshape=[50, 32, 768]);
%119 = add(%75, %118);
%120 = mean(%119, axis=[-1], keepdims=True);
%121 = subtract(%119, %120);
%122 = power(%121, 2f);
%123 = mean(%122, axis=[-1], keepdims=True);
%124 = add(%123, 1e-05f);
%125 = sqrt(%124);
%126 = divide(%121, %125);
%127 = multiply(%126, meta[relay.Constant][24]);
%128 = add(%127, meta[relay.Constant][25]);
%129 = reshape(%128, newshape=[-1, 768]);
%130 = nn.dense(%129, meta[relay.Constant][26], units=3072);
%131 = add(%130, meta[relay.Constant][27]);
%132 = reshape(%131, newshape=[50, 32, 3072]);
%133 = power(%132, 3f);
%134 = multiply(%133, 0.044715f);
%135 = add(%132, %134);
%136 = multiply(%135, 0.797885f);
%137 = tanh(%136);
%138 = multiply(%132, 0.5f);
%139 = add(%137, 1f);
%140 = multiply(%138, %139);
%141 = reshape(%140, newshape=[-1, 3072]);
%142 = nn.dense(%141, meta[relay.Constant][28], units=768);
%143 = add(%142, meta[relay.Constant][29]);
%144 = reshape(%143, newshape=[50, 32, 768]);
%145 = add(%119, %144);
%146 = mean(%145, axis=[-1], keepdims=True);
%147 = subtract(%145, %146);
%148 = power(%147, 2f);
%149 = mean(%148, axis=[-1], keepdims=True);
%150 = add(%149, 1e-05f);
%151 = sqrt(%150);
%152 = divide(%147, %151);
%153 = multiply(%152, meta[relay.Constant][30]);
%154 = add(%153, meta[relay.Constant][31]);
%155 = reshape(%154, newshape=[-1, 768]);
%156 = nn.dense(%155, meta[relay.Constant][32], units=2304);
%157 = add(%156, meta[relay.Constant][33]);
%158 = reshape(%157, newshape=[50, 32, 2304]);
%159 = split(%158, indices_or_sections=[768, 1536], axis=2);
%160 = %159.0;
%161 = reshape(%160, newshape=[50, 32, 12, 64]);
%162 = transpose(%161, axes=[0, 2, 1, 3]);
%163 = %159.1;
%164 = reshape(%163, newshape=[50, 32, 12, 64]);
%165 = transpose(%164, axes=[0, 2, 3, 1]);
%166 = reshape(%165, newshape=[-1, 64, 32]);
%167 = reshape(%162, newshape=[-1, 32, 64]);
%168 = transpose(%166, axes=[0, 2, 1]);
%169 = nn.batch_matmul(%167, %168, out_dtype="float32", transpose_b=True);
%170 = reshape(%169, newshape=[50, 12, 32, 32]);
%171 = divide(%170, 8f);
%172 = multiply(%171, meta[relay.Constant][34]);
%173 = subtract(%172, meta[relay.Constant][35]);
%174 = nn.softmax(%173, axis=3);
%175 = %159.2;
%176 = reshape(%175, newshape=[50, 32, 12, 64]);
%177 = transpose(%176, axes=[0, 2, 1, 3]);
%178 = reshape(%177, newshape=[-1, 32, 64]);
%179 = reshape(%174, newshape=[-1, 32, 32]);
%180 = transpose(%178, axes=[0, 2, 1]);
%181 = nn.batch_matmul(%179, %180, out_dtype="float32", transpose_b=True);
%182 = reshape(%181, newshape=[50, 12, 32, 64]);
%183 = transpose(%182, axes=[0, 2, 1, 3]);
%184 = reshape(%183, newshape=[50, 32, 768]);
%185 = reshape(%184, newshape=[-1, 768]);
%186 = nn.dense(%185, meta[relay.Constant][36], units=768);
%187 = add(%186, meta[relay.Constant][37]);
%188 = reshape(%187, newshape=[50, 32, 768]);
%189 = add(%145, %188);
%190 = mean(%189, axis=[-1], keepdims=True);
%191 = subtract(%189, %190);
%192 = power(%191, 2f);
%193 = mean(%192, axis=[-1], keepdims=True);
%194 = add(%193, 1e-05f);
%195 = sqrt(%194);
%196 = divide(%191, %195);
%197 = multiply(%196, meta[relay.Constant][38]);
%198 = add(%197, meta[relay.Constant][39]);
%199 = reshape(%198, newshape=[-1, 768]);
%200 = nn.dense(%199, meta[relay.Constant][40], units=3072);
%201 = add(%200, meta[relay.Constant][41]);
%202 = reshape(%201, newshape=[50, 32, 3072]);
%203 = power(%202, 3f);
%204 = multiply(%203, 0.044715f);
%205 = add(%202, %204);
%206 = multiply(%205, 0.797885f);
%207 = tanh(%206);
%208 = multiply(%202, 0.5f);
%209 = add(%207, 1f);
%210 = multiply(%208, %209);
%211 = reshape(%210, newshape=[-1, 3072]);
%212 = nn.dense(%211, meta[relay.Constant][42], units=768);
%213 = add(%212, meta[relay.Constant][43]);
%214 = reshape(%213, newshape=[50, 32, 768]);
%215 = add(%189, %214);
%216 = mean(%215, axis=[-1], keepdims=True);
%217 = subtract(%215, %216);
%218 = power(%217, 2f);
%219 = mean(%218, axis=[-1], keepdims=True);
%220 = add(%219, 1e-05f);
%221 = sqrt(%220);
%222 = divide(%217, %221);
%223 = multiply(%222, meta[relay.Constant][44]);
%224 = add(%223, meta[relay.Constant][45]);
%225 = reshape(%224, newshape=[-1, 768]);
%226 = nn.dense(%225, meta[relay.Constant][46], units=2304);
%227 = add(%226, meta[relay.Constant][47]);
%228 = reshape(%227, newshape=[50, 32, 2304]);
%229 = split(%228, indices_or_sections=[768, 1536], axis=2);
%230 = %229.0;
%231 = reshape(%230, newshape=[50, 32, 12, 64]);
%232 = transpose(%231, axes=[0, 2, 1, 3]);
%233 = %229.1;
%234 = reshape(%233, newshape=[50, 32, 12, 64]);
%235 = transpose(%234, axes=[0, 2, 3, 1]);
%236 = reshape(%235, newshape=[-1, 64, 32]);
%237 = reshape(%232, newshape=[-1, 32, 64]);
%238 = transpose(%236, axes=[0, 2, 1]);
%239 = nn.batch_matmul(%237, %238, out_dtype="float32", transpose_b=True);
%240 = reshape(%239, newshape=[50, 12, 32, 32]);
%241 = divide(%240, 8f);
%242 = multiply(%241, meta[relay.Constant][48]);
%243 = subtract(%242, meta[relay.Constant][49]);
%244 = nn.softmax(%243, axis=3);
%245 = %229.2;
%246 = reshape(%245, newshape=[50, 32, 12, 64]);
%247 = transpose(%246, axes=[0, 2, 1, 3]);
%248 = reshape(%247, newshape=[-1, 32, 64]);
%249 = reshape(%244, newshape=[-1, 32, 32]);
%250 = transpose(%248, axes=[0, 2, 1]);
%251 = nn.batch_matmul(%249, %250, out_dtype="float32", transpose_b=True);
%252 = reshape(%251, newshape=[50, 12, 32, 64]);
%253 = transpose(%252, axes=[0, 2, 1, 3]);
%254 = reshape(%253, newshape=[50, 32, 768]);
%255 = reshape(%254, newshape=[-1, 768]);
%256 = nn.dense(%255, meta[relay.Constant][50], units=768);
%257 = add(%256, meta[relay.Constant][51]);
%258 = reshape(%257, newshape=[50, 32, 768]);
%259 = add(%215, %258);
%260 = mean(%259, axis=[-1], keepdims=True);
%261 = subtract(%259, %260);
%262 = power(%261, 2f);
%263 = mean(%262, axis=[-1], keepdims=True);
%264 = add(%263, 1e-05f);
%265 = sqrt(%264);
%266 = divide(%261, %265);
%267 = multiply(%266, meta[relay.Constant][52]);
%268 = add(%267, meta[relay.Constant][53]);
%269 = reshape(%268, newshape=[-1, 768]);
%270 = nn.dense(%269, meta[relay.Constant][54], units=3072);
%271 = add(%270, meta[relay.Constant][55]);
%272 = reshape(%271, newshape=[50, 32, 3072]);
%273 = power(%272, 3f);
%274 = multiply(%273, 0.044715f);
%275 = add(%272, %274);
%276 = multiply(%275, 0.797885f);
%277 = tanh(%276);
%278 = multiply(%272, 0.5f);
%279 = add(%277, 1f);
%280 = multiply(%278, %279);
%281 = reshape(%280, newshape=[-1, 3072]);
%282 = nn.dense(%281, meta[relay.Constant][56], units=768);
%283 = add(%282, meta[relay.Constant][57]);
%284 = reshape(%283, newshape=[50, 32, 768]);
%285 = add(%259, %284);
%286 = mean(%285, axis=[-1], keepdims=True);
%287 = subtract(%285, %286);
%288 = power(%287, 2f);
%289 = mean(%288, axis=[-1], keepdims=True);
%290 = add(%289, 1e-05f);
%291 = sqrt(%290);
%292 = divide(%287, %291);
%293 = multiply(%292, meta[relay.Constant][58]);
%294 = add(%293, meta[relay.Constant][59]);
%295 = reshape(%294, newshape=[-1, 768]);
%296 = nn.dense(%295, meta[relay.Constant][60], units=2304);
%297 = add(%296, meta[relay.Constant][61]);
%298 = reshape(%297, newshape=[50, 32, 2304]);
%299 = split(%298, indices_or_sections=[768, 1536], axis=2);
%300 = %299.0;
%301 = reshape(%300, newshape=[50, 32, 12, 64]);
%302 = transpose(%301, axes=[0, 2, 1, 3]);
%303 = %299.1;
%304 = reshape(%303, newshape=[50, 32, 12, 64]);
%305 = transpose(%304, axes=[0, 2, 3, 1]);
%306 = reshape(%305, newshape=[-1, 64, 32]);
%307 = reshape(%302, newshape=[-1, 32, 64]);
%308 = transpose(%306, axes=[0, 2, 1]);
%309 = nn.batch_matmul(%307, %308, out_dtype="float32", transpose_b=True);
%310 = reshape(%309, newshape=[50, 12, 32, 32]);
%311 = divide(%310, 8f);
%312 = multiply(%311, meta[relay.Constant][62]);
%313 = subtract(%312, meta[relay.Constant][63]);
%314 = nn.softmax(%313, axis=3);
%315 = %299.2;
%316 = reshape(%315, newshape=[50, 32, 12, 64]);
%317 = transpose(%316, axes=[0, 2, 1, 3]);
%318 = reshape(%317, newshape=[-1, 32, 64]);
%319 = reshape(%314, newshape=[-1, 32, 32]);
%320 = transpose(%318, axes=[0, 2, 1]);
%321 = nn.batch_matmul(%319, %320, out_dtype="float32", transpose_b=True);
%322 = reshape(%321, newshape=[50, 12, 32, 64]);
%323 = transpose(%322, axes=[0, 2, 1, 3]);
%324 = reshape(%323, newshape=[50, 32, 768]);
%325 = reshape(%324, newshape=[-1, 768]);
%326 = nn.dense(%325, meta[relay.Constant][64], units=768);
%327 = add(%326, meta[relay.Constant][65]);
%328 = reshape(%327, newshape=[50, 32, 768]);
%329 = add(%285, %328);
%330 = mean(%329, axis=[-1], keepdims=True);
%331 = subtract(%329, %330);
%332 = power(%331, 2f);
%333 = mean(%332, axis=[-1], keepdims=True);
%334 = add(%333, 1e-05f);
%335 = sqrt(%334);
%336 = divide(%331, %335);
%337 = multiply(%336, meta[relay.Constant][66]);
%338 = add(%337, meta[relay.Constant][67]);
%339 = reshape(%338, newshape=[-1, 768]);
%340 = nn.dense(%339, meta[relay.Constant][68], units=3072);
%341 = add(%340, meta[relay.Constant][69]);
%342 = reshape(%341, newshape=[50, 32, 3072]);
%343 = power(%342, 3f);
%344 = multiply(%343, 0.044715f);
%345 = add(%342, %344);
%346 = multiply(%345, 0.797885f);
%347 = tanh(%346);
%348 = multiply(%342, 0.5f);
%349 = add(%347, 1f);
%350 = multiply(%348, %349);
%351 = reshape(%350, newshape=[-1, 3072]);
%352 = nn.dense(%351, meta[relay.Constant][70], units=768);
%353 = add(%352, meta[relay.Constant][71]);
%354 = reshape(%353, newshape=[50, 32, 768]);
%355 = add(%329, %354);
%356 = mean(%355, axis=[-1], keepdims=True);
%357 = subtract(%355, %356);
%358 = power(%357, 2f);
%359 = mean(%358, axis=[-1], keepdims=True);
%360 = add(%359, 1e-05f);
%361 = sqrt(%360);
%362 = divide(%357, %361);
%363 = multiply(%362, meta[relay.Constant][72]);
%364 = add(%363, meta[relay.Constant][73]);
%365 = reshape(%364, newshape=[-1, 768]);
%366 = nn.dense(%365, meta[relay.Constant][74], units=2304);
%367 = add(%366, meta[relay.Constant][75]);
%368 = reshape(%367, newshape=[50, 32, 2304]);
%369 = split(%368, indices_or_sections=[768, 1536], axis=2);
%370 = %369.0;
%371 = reshape(%370, newshape=[50, 32, 12, 64]);
%372 = transpose(%371, axes=[0, 2, 1, 3]);
%373 = %369.1;
%374 = reshape(%373, newshape=[50, 32, 12, 64]);
%375 = transpose(%374, axes=[0, 2, 3, 1]);
%376 = reshape(%375, newshape=[-1, 64, 32]);
%377 = reshape(%372, newshape=[-1, 32, 64]);
%378 = transpose(%376, axes=[0, 2, 1]);
%379 = nn.batch_matmul(%377, %378, out_dtype="float32", transpose_b=True);
%380 = reshape(%379, newshape=[50, 12, 32, 32]);
%381 = divide(%380, 8f);
%382 = multiply(%381, meta[relay.Constant][76]);
%383 = subtract(%382, meta[relay.Constant][77]);
%384 = nn.softmax(%383, axis=3);
%385 = %369.2;
%386 = reshape(%385, newshape=[50, 32, 12, 64]);
%387 = transpose(%386, axes=[0, 2, 1, 3]);
%388 = reshape(%387, newshape=[-1, 32, 64]);
%389 = reshape(%384, newshape=[-1, 32, 32]);
%390 = transpose(%388, axes=[0, 2, 1]);
%391 = nn.batch_matmul(%389, %390, out_dtype="float32", transpose_b=True);
%392 = reshape(%391, newshape=[50, 12, 32, 64]);
%393 = transpose(%392, axes=[0, 2, 1, 3]);
%394 = reshape(%393, newshape=[50, 32, 768]);
%395 = reshape(%394, newshape=[-1, 768]);
%396 = nn.dense(%395, meta[relay.Constant][78], units=768);
%397 = add(%396, meta[relay.Constant][79]);
%398 = reshape(%397, newshape=[50, 32, 768]);
%399 = add(%355, %398);
%400 = mean(%399, axis=[-1], keepdims=True);
%401 = subtract(%399, %400);
%402 = power(%401, 2f);
%403 = mean(%402, axis=[-1], keepdims=True);
%404 = add(%403, 1e-05f);
%405 = sqrt(%404);
%406 = divide(%401, %405);
%407 = multiply(%406, meta[relay.Constant][80]);
%408 = add(%407, meta[relay.Constant][81]);
%409 = reshape(%408, newshape=[-1, 768]);
%410 = nn.dense(%409, meta[relay.Constant][82], units=3072);
%411 = add(%410, meta[relay.Constant][83]);
%412 = reshape(%411, newshape=[50, 32, 3072]);
%413 = power(%412, 3f);
%414 = multiply(%413, 0.044715f);
%415 = add(%412, %414);
%416 = multiply(%415, 0.797885f);
%417 = tanh(%416);
%418 = multiply(%412, 0.5f);
%419 = add(%417, 1f);
%420 = multiply(%418, %419);
%421 = reshape(%420, newshape=[-1, 3072]);
%422 = nn.dense(%421, meta[relay.Constant][84], units=768);
%423 = add(%422, meta[relay.Constant][85]);
%424 = reshape(%423, newshape=[50, 32, 768]);
%425 = add(%399, %424);
%426 = mean(%425, axis=[-1], keepdims=True);
%427 = subtract(%425, %426);
%428 = power(%427, 2f);
%429 = mean(%428, axis=[-1], keepdims=True);
%430 = add(%429, 1e-05f);
%431 = sqrt(%430);
%432 = divide(%427, %431);
%433 = multiply(%432, meta[relay.Constant][86]);
%434 = add(%433, meta[relay.Constant][87]);
%435 = reshape(%434, newshape=[-1, 768]);
%436 = nn.dense(%435, meta[relay.Constant][88], units=2304);
%437 = add(%436, meta[relay.Constant][89]);
%438 = reshape(%437, newshape=[50, 32, 2304]);
%439 = split(%438, indices_or_sections=[768, 1536], axis=2);
%440 = %439.0;
%441 = reshape(%440, newshape=[50, 32, 12, 64]);
%442 = transpose(%441, axes=[0, 2, 1, 3]);
%443 = %439.1;
%444 = reshape(%443, newshape=[50, 32, 12, 64]);
%445 = transpose(%444, axes=[0, 2, 3, 1]);
%446 = reshape(%445, newshape=[-1, 64, 32]);
%447 = reshape(%442, newshape=[-1, 32, 64]);
%448 = transpose(%446, axes=[0, 2, 1]);
%449 = nn.batch_matmul(%447, %448, out_dtype="float32", transpose_b=True);
%450 = reshape(%449, newshape=[50, 12, 32, 32]);
%451 = divide(%450, 8f);
%452 = multiply(%451, meta[relay.Constant][90]);
%453 = subtract(%452, meta[relay.Constant][91]);
%454 = nn.softmax(%453, axis=3);
%455 = %439.2;
%456 = reshape(%455, newshape=[50, 32, 12, 64]);
%457 = transpose(%456, axes=[0, 2, 1, 3]);
%458 = reshape(%457, newshape=[-1, 32, 64]);
%459 = reshape(%454, newshape=[-1, 32, 32]);
%460 = transpose(%458, axes=[0, 2, 1]);
%461 = nn.batch_matmul(%459, %460, out_dtype="float32", transpose_b=True);
%462 = reshape(%461, newshape=[50, 12, 32, 64]);
%463 = transpose(%462, axes=[0, 2, 1, 3]);
%464 = reshape(%463, newshape=[50, 32, 768]);
%465 = reshape(%464, newshape=[-1, 768]);
%466 = nn.dense(%465, meta[relay.Constant][92], units=768);
%467 = add(%466, meta[relay.Constant][93]);
%468 = reshape(%467, newshape=[50, 32, 768]);
%469 = add(%425, %468);
%470 = mean(%469, axis=[-1], keepdims=True);
%471 = subtract(%469, %470);
%472 = power(%471, 2f);
%473 = mean(%472, axis=[-1], keepdims=True);
%474 = add(%473, 1e-05f);
%475 = sqrt(%474);
%476 = divide(%471, %475);
%477 = multiply(%476, meta[relay.Constant][94]);
%478 = add(%477, meta[relay.Constant][95]);
%479 = reshape(%478, newshape=[-1, 768]);
%480 = nn.dense(%479, meta[relay.Constant][96], units=3072);
%481 = add(%480, meta[relay.Constant][97]);
%482 = reshape(%481, newshape=[50, 32, 3072]);
%483 = power(%482, 3f);
%484 = multiply(%483, 0.044715f);
%485 = add(%482, %484);
%486 = multiply(%485, 0.797885f);
%487 = tanh(%486);
%488 = multiply(%482, 0.5f);
%489 = add(%487, 1f);
%490 = multiply(%488, %489);
%491 = reshape(%490, newshape=[-1, 3072]);
%492 = nn.dense(%491, meta[relay.Constant][98], units=768);
%493 = add(%492, meta[relay.Constant][99]);
%494 = reshape(%493, newshape=[50, 32, 768]);
%495 = add(%469, %494);
%496 = mean(%495, axis=[-1], keepdims=True);
%497 = subtract(%495, %496);
%498 = power(%497, 2f);
%499 = mean(%498, axis=[-1], keepdims=True);
%500 = add(%499, 1e-05f);
%501 = sqrt(%500);
%502 = divide(%497, %501);
%503 = multiply(%502, meta[relay.Constant][100]);
%504 = add(%503, meta[relay.Constant][101]);
%505 = reshape(%504, newshape=[-1, 768]);
%506 = nn.dense(%505, meta[relay.Constant][102], units=2304);
%507 = add(%506, meta[relay.Constant][103]);
%508 = reshape(%507, newshape=[50, 32, 2304]);
%509 = split(%508, indices_or_sections=[768, 1536], axis=2);
%510 = %509.0;
%511 = reshape(%510, newshape=[50, 32, 12, 64]);
%512 = transpose(%511, axes=[0, 2, 1, 3]);
%513 = %509.1;
%514 = reshape(%513, newshape=[50, 32, 12, 64]);
%515 = transpose(%514, axes=[0, 2, 3, 1]);
%516 = reshape(%515, newshape=[-1, 64, 32]);
%517 = reshape(%512, newshape=[-1, 32, 64]);
%518 = transpose(%516, axes=[0, 2, 1]);
%519 = nn.batch_matmul(%517, %518, out_dtype="float32", transpose_b=True);
%520 = reshape(%519, newshape=[50, 12, 32, 32]);
%521 = divide(%520, 8f);
%522 = multiply(%521, meta[relay.Constant][104]);
%523 = subtract(%522, meta[relay.Constant][105]);
%524 = nn.softmax(%523, axis=3);
%525 = %509.2;
%526 = reshape(%525, newshape=[50, 32, 12, 64]);
%527 = transpose(%526, axes=[0, 2, 1, 3]);
%528 = reshape(%527, newshape=[-1, 32, 64]);
%529 = reshape(%524, newshape=[-1, 32, 32]);
%530 = transpose(%528, axes=[0, 2, 1]);
%531 = nn.batch_matmul(%529, %530, out_dtype="float32", transpose_b=True);
%532 = reshape(%531, newshape=[50, 12, 32, 64]);
%533 = transpose(%532, axes=[0, 2, 1, 3]);
%534 = reshape(%533, newshape=[50, 32, 768]);
%535 = reshape(%534, newshape=[-1, 768]);
%536 = nn.dense(%535, meta[relay.Constant][106], units=768);
%537 = add(%536, meta[relay.Constant][107]);
%538 = reshape(%537, newshape=[50, 32, 768]);
%539 = add(%495, %538);
%540 = mean(%539, axis=[-1], keepdims=True);
%541 = subtract(%539, %540);
%542 = power(%541, 2f);
%543 = mean(%542, axis=[-1], keepdims=True);
%544 = add(%543, 1e-05f);
%545 = sqrt(%544);
%546 = divide(%541, %545);
%547 = multiply(%546, meta[relay.Constant][108]);
%548 = add(%547, meta[relay.Constant][109]);
%549 = reshape(%548, newshape=[-1, 768]);
%550 = nn.dense(%549, meta[relay.Constant][110], units=3072);
%551 = add(%550, meta[relay.Constant][111]);
%552 = reshape(%551, newshape=[50, 32, 3072]);
%553 = power(%552, 3f);
%554 = multiply(%553, 0.044715f);
%555 = add(%552, %554);
%556 = multiply(%555, 0.797885f);
%557 = tanh(%556);
%558 = multiply(%552, 0.5f);
%559 = add(%557, 1f);
%560 = multiply(%558, %559);
%561 = reshape(%560, newshape=[-1, 3072]);
%562 = nn.dense(%561, meta[relay.Constant][112], units=768);
%563 = add(%562, meta[relay.Constant][113]);
%564 = reshape(%563, newshape=[50, 32, 768]);
%565 = add(%539, %564);
%566 = mean(%565, axis=[-1], keepdims=True);
%567 = subtract(%565, %566);
%568 = power(%567, 2f);
%569 = mean(%568, axis=[-1], keepdims=True);
%570 = add(%569, 1e-05f);
%571 = sqrt(%570);
%572 = divide(%567, %571);
%573 = multiply(%572, meta[relay.Constant][114]);
%574 = add(%573, meta[relay.Constant][115]);
%575 = reshape(%574, newshape=[-1, 768]);
%576 = nn.dense(%575, meta[relay.Constant][116], units=2304);
%577 = add(%576, meta[relay.Constant][117]);
%578 = reshape(%577, newshape=[50, 32, 2304]);
%579 = split(%578, indices_or_sections=[768, 1536], axis=2);
%580 = %579.0;
%581 = reshape(%580, newshape=[50, 32, 12, 64]);
%582 = transpose(%581, axes=[0, 2, 1, 3]);
%583 = %579.1;
%584 = reshape(%583, newshape=[50, 32, 12, 64]);
%585 = transpose(%584, axes=[0, 2, 3, 1]);
%586 = reshape(%585, newshape=[-1, 64, 32]);
%587 = reshape(%582, newshape=[-1, 32, 64]);
%588 = transpose(%586, axes=[0, 2, 1]);
%589 = nn.batch_matmul(%587, %588, out_dtype="float32", transpose_b=True);
%590 = reshape(%589, newshape=[50, 12, 32, 32]);
%591 = divide(%590, 8f);
%592 = multiply(%591, meta[relay.Constant][118]);
%593 = subtract(%592, meta[relay.Constant][119]);
%594 = nn.softmax(%593, axis=3);
%595 = %579.2;
%596 = reshape(%595, newshape=[50, 32, 12, 64]);
%597 = transpose(%596, axes=[0, 2, 1, 3]);
%598 = reshape(%597, newshape=[-1, 32, 64]);
%599 = reshape(%594, newshape=[-1, 32, 32]);
%600 = transpose(%598, axes=[0, 2, 1]);
%601 = nn.batch_matmul(%599, %600, out_dtype="float32", transpose_b=True);
%602 = reshape(%601, newshape=[50, 12, 32, 64]);
%603 = transpose(%602, axes=[0, 2, 1, 3]);
%604 = reshape(%603, newshape=[50, 32, 768]);
%605 = reshape(%604, newshape=[-1, 768]);
%606 = nn.dense(%605, meta[relay.Constant][120], units=768);
%607 = add(%606, meta[relay.Constant][121]);
%608 = reshape(%607, newshape=[50, 32, 768]);
%609 = add(%565, %608);
%610 = mean(%609, axis=[-1], keepdims=True);
%611 = subtract(%609, %610);
%612 = power(%611, 2f);
%613 = mean(%612, axis=[-1], keepdims=True);
%614 = add(%613, 1e-05f);
%615 = sqrt(%614);
%616 = divide(%611, %615);
%617 = multiply(%616, meta[relay.Constant][122]);
%618 = add(%617, meta[relay.Constant][123]);
%619 = reshape(%618, newshape=[-1, 768]);
%620 = nn.dense(%619, meta[relay.Constant][124], units=3072);
%621 = add(%620, meta[relay.Constant][125]);
%622 = reshape(%621, newshape=[50, 32, 3072]);
%623 = power(%622, 3f);
%624 = multiply(%623, 0.044715f);
%625 = add(%622, %624);
%626 = multiply(%625, 0.797885f);
%627 = tanh(%626);
%628 = multiply(%622, 0.5f);
%629 = add(%627, 1f);
%630 = multiply(%628, %629);
%631 = reshape(%630, newshape=[-1, 3072]);
%632 = nn.dense(%631, meta[relay.Constant][126], units=768);
%633 = add(%632, meta[relay.Constant][127]);
%634 = reshape(%633, newshape=[50, 32, 768]);
%635 = add(%609, %634);
%636 = mean(%635, axis=[-1], keepdims=True);
%637 = subtract(%635, %636);
%638 = power(%637, 2f);
%639 = mean(%638, axis=[-1], keepdims=True);
%640 = add(%639, 1e-05f);
%641 = sqrt(%640);
%642 = divide(%637, %641);
%643 = multiply(%642, meta[relay.Constant][128]);
%644 = add(%643, meta[relay.Constant][129]);
%645 = reshape(%644, newshape=[-1, 768]);
%646 = nn.dense(%645, meta[relay.Constant][130], units=2304);
%647 = add(%646, meta[relay.Constant][131]);
%648 = reshape(%647, newshape=[50, 32, 2304]);
%649 = split(%648, indices_or_sections=[768, 1536], axis=2);
%650 = %649.0;
%651 = reshape(%650, newshape=[50, 32, 12, 64]);
%652 = transpose(%651, axes=[0, 2, 1, 3]);
%653 = %649.1;
%654 = reshape(%653, newshape=[50, 32, 12, 64]);
%655 = transpose(%654, axes=[0, 2, 3, 1]);
%656 = reshape(%655, newshape=[-1, 64, 32]);
%657 = reshape(%652, newshape=[-1, 32, 64]);
%658 = transpose(%656, axes=[0, 2, 1]);
%659 = nn.batch_matmul(%657, %658, out_dtype="float32", transpose_b=True);
%660 = reshape(%659, newshape=[50, 12, 32, 32]);
%661 = divide(%660, 8f);
%662 = multiply(%661, meta[relay.Constant][132]);
%663 = subtract(%662, meta[relay.Constant][133]);
%664 = nn.softmax(%663, axis=3);
%665 = %649.2;
%666 = reshape(%665, newshape=[50, 32, 12, 64]);
%667 = transpose(%666, axes=[0, 2, 1, 3]);
%668 = reshape(%667, newshape=[-1, 32, 64]);
%669 = reshape(%664, newshape=[-1, 32, 32]);
%670 = transpose(%668, axes=[0, 2, 1]);
%671 = nn.batch_matmul(%669, %670, out_dtype="float32", transpose_b=True);
%672 = reshape(%671, newshape=[50, 12, 32, 64]);
%673 = transpose(%672, axes=[0, 2, 1, 3]);
%674 = reshape(%673, newshape=[50, 32, 768]);
%675 = reshape(%674, newshape=[-1, 768]);
%676 = nn.dense(%675, meta[relay.Constant][134], units=768);
%677 = add(%676, meta[relay.Constant][135]);
%678 = reshape(%677, newshape=[50, 32, 768]);
%679 = add(%635, %678);
%680 = mean(%679, axis=[-1], keepdims=True);
%681 = subtract(%679, %680);
%682 = power(%681, 2f);
%683 = mean(%682, axis=[-1], keepdims=True);
%684 = add(%683, 1e-05f);
%685 = sqrt(%684);
%686 = divide(%681, %685);
%687 = multiply(%686, meta[relay.Constant][136]);
%688 = add(%687, meta[relay.Constant][137]);
%689 = reshape(%688, newshape=[-1, 768]);
%690 = nn.dense(%689, meta[relay.Constant][138], units=3072);
%691 = add(%690, meta[relay.Constant][139]);
%692 = reshape(%691, newshape=[50, 32, 3072]);
%693 = power(%692, 3f);
%694 = multiply(%693, 0.044715f);
%695 = add(%692, %694);
%696 = multiply(%695, 0.797885f);
%697 = tanh(%696);
%698 = multiply(%692, 0.5f);
%699 = add(%697, 1f);
%700 = multiply(%698, %699);
%701 = reshape(%700, newshape=[-1, 3072]);
%702 = nn.dense(%701, meta[relay.Constant][140], units=768);
%703 = add(%702, meta[relay.Constant][141]);
%704 = reshape(%703, newshape=[50, 32, 768]);
%705 = add(%679, %704);
%706 = mean(%705, axis=[-1], keepdims=True);
%707 = subtract(%705, %706);
%708 = power(%707, 2f);
%709 = mean(%708, axis=[-1], keepdims=True);
%710 = add(%709, 1e-05f);
%711 = sqrt(%710);
%712 = divide(%707, %711);
%713 = multiply(%712, meta[relay.Constant][142]);
%714 = add(%713, meta[relay.Constant][143]);
%715 = reshape(%714, newshape=[-1, 768]);
%716 = nn.dense(%715, meta[relay.Constant][144], units=2304);
%717 = add(%716, meta[relay.Constant][145]);
%718 = reshape(%717, newshape=[50, 32, 2304]);
%719 = split(%718, indices_or_sections=[768, 1536], axis=2);
%720 = %719.0;
%721 = reshape(%720, newshape=[50, 32, 12, 64]);
%722 = transpose(%721, axes=[0, 2, 1, 3]);
%723 = %719.1;
%724 = reshape(%723, newshape=[50, 32, 12, 64]);
%725 = transpose(%724, axes=[0, 2, 3, 1]);
%726 = reshape(%725, newshape=[-1, 64, 32]);
%727 = reshape(%722, newshape=[-1, 32, 64]);
%728 = transpose(%726, axes=[0, 2, 1]);
%729 = nn.batch_matmul(%727, %728, out_dtype="float32", transpose_b=True);
%730 = reshape(%729, newshape=[50, 12, 32, 32]);
%731 = divide(%730, 8f);
%732 = multiply(%731, meta[relay.Constant][146]);
%733 = subtract(%732, meta[relay.Constant][147]);
%734 = nn.softmax(%733, axis=3);
%735 = %719.2;
%736 = reshape(%735, newshape=[50, 32, 12, 64]);
%737 = transpose(%736, axes=[0, 2, 1, 3]);
%738 = reshape(%737, newshape=[-1, 32, 64]);
%739 = reshape(%734, newshape=[-1, 32, 32]);
%740 = transpose(%738, axes=[0, 2, 1]);
%741 = nn.batch_matmul(%739, %740, out_dtype="float32", transpose_b=True);
%742 = reshape(%741, newshape=[50, 12, 32, 64]);
%743 = transpose(%742, axes=[0, 2, 1, 3]);
%744 = reshape(%743, newshape=[50, 32, 768]);
%745 = reshape(%744, newshape=[-1, 768]);
%746 = nn.dense(%745, meta[relay.Constant][148], units=768);
%747 = add(%746, meta[relay.Constant][149]);
%748 = reshape(%747, newshape=[50, 32, 768]);
%749 = add(%705, %748);
%750 = mean(%749, axis=[-1], keepdims=True);
%751 = subtract(%749, %750);
%752 = power(%751, 2f);
%753 = mean(%752, axis=[-1], keepdims=True);
%754 = add(%753, 1e-05f);
%755 = sqrt(%754);
%756 = divide(%751, %755);
%757 = multiply(%756, meta[relay.Constant][150]);
%758 = add(%757, meta[relay.Constant][151]);
%759 = reshape(%758, newshape=[-1, 768]);
%760 = nn.dense(%759, meta[relay.Constant][152], units=3072);
%761 = add(%760, meta[relay.Constant][153]);
%762 = reshape(%761, newshape=[50, 32, 3072]);
%763 = power(%762, 3f);
%764 = multiply(%763, 0.044715f);
%765 = add(%762, %764);
%766 = multiply(%765, 0.797885f);
%767 = tanh(%766);
%768 = multiply(%762, 0.5f);
%769 = add(%767, 1f);
%770 = multiply(%768, %769);
%771 = reshape(%770, newshape=[-1, 3072]);
%772 = nn.dense(%771, meta[relay.Constant][154], units=768);
%773 = add(%772, meta[relay.Constant][155]);
%774 = reshape(%773, newshape=[50, 32, 768]);
%775 = add(%749, %774);
%776 = mean(%775, axis=[-1], keepdims=True);
%777 = subtract(%775, %776);
%778 = power(%777, 2f);
%779 = mean(%778, axis=[-1], keepdims=True);
%780 = add(%779, 1e-05f);
%781 = sqrt(%780);
%782 = divide(%777, %781);
%783 = multiply(%782, meta[relay.Constant][156]);
%784 = add(%783, meta[relay.Constant][157]);
%785 = reshape(%784, newshape=[-1, 768]);
%786 = nn.dense(%785, meta[relay.Constant][158], units=2304);
%787 = add(%786, meta[relay.Constant][159]);
%788 = reshape(%787, newshape=[50, 32, 2304]);
%789 = split(%788, indices_or_sections=[768, 1536], axis=2);
%790 = %789.0;
%791 = reshape(%790, newshape=[50, 32, 12, 64]);
%792 = transpose(%791, axes=[0, 2, 1, 3]);
%793 = %789.1;
%794 = reshape(%793, newshape=[50, 32, 12, 64]);
%795 = transpose(%794, axes=[0, 2, 3, 1]);
%796 = reshape(%795, newshape=[-1, 64, 32]);
%797 = reshape(%792, newshape=[-1, 32, 64]);
%798 = transpose(%796, axes=[0, 2, 1]);
%799 = nn.batch_matmul(%797, %798, out_dtype="float32", transpose_b=True);
%800 = reshape(%799, newshape=[50, 12, 32, 32]);
%801 = divide(%800, 8f);
%802 = multiply(%801, meta[relay.Constant][160]);
%803 = subtract(%802, meta[relay.Constant][161]);
%804 = nn.softmax(%803, axis=3);
%805 = %789.2;
%806 = reshape(%805, newshape=[50, 32, 12, 64]);
%807 = transpose(%806, axes=[0, 2, 1, 3]);
%808 = reshape(%807, newshape=[-1, 32, 64]);
%809 = reshape(%804, newshape=[-1, 32, 32]);
%810 = transpose(%808, axes=[0, 2, 1]);
%811 = nn.batch_matmul(%809, %810, out_dtype="float32", transpose_b=True);
%812 = reshape(%811, newshape=[50, 12, 32, 64]);
%813 = transpose(%812, axes=[0, 2, 1, 3]);
%814 = reshape(%813, newshape=[50, 32, 768]);
%815 = reshape(%814, newshape=[-1, 768]);
%816 = nn.dense(%815, meta[relay.Constant][162], units=768);
%817 = add(%816, meta[relay.Constant][163]);
%818 = reshape(%817, newshape=[50, 32, 768]);
%819 = add(%775, %818);
%820 = mean(%819, axis=[-1], keepdims=True);
%821 = subtract(%819, %820);
%822 = power(%821, 2f);
%823 = mean(%822, axis=[-1], keepdims=True);
%824 = add(%823, 1e-05f);
%825 = sqrt(%824);
%826 = divide(%821, %825);
%827 = multiply(%826, meta[relay.Constant][164]);
%828 = add(%827, meta[relay.Constant][165]);
%829 = reshape(%828, newshape=[-1, 768]);
%830 = nn.dense(%829, meta[relay.Constant][166], units=3072);
%831 = add(%830, meta[relay.Constant][167]);
%832 = reshape(%831, newshape=[50, 32, 3072]);
%833 = power(%832, 3f);
%834 = multiply(%833, 0.044715f);
%835 = add(%832, %834);
%836 = multiply(%835, 0.797885f);
%837 = tanh(%836);
%838 = multiply(%832, 0.5f);
%839 = add(%837, 1f);
%840 = multiply(%838, %839);
%841 = reshape(%840, newshape=[-1, 3072]);
%842 = nn.dense(%841, meta[relay.Constant][168], units=768);
%843 = add(%842, meta[relay.Constant][169]);
%844 = reshape(%843, newshape=[50, 32, 768]);
%845 = add(%819, %844);
%846 = mean(%845, axis=[-1], keepdims=True);
%847 = subtract(%845, %846);
%848 = power(%847, 2f);
%849 = mean(%848, axis=[-1], keepdims=True);
%850 = add(%849, 1e-05f);
%851 = sqrt(%850);
%852 = divide(%847, %851);
%853 = multiply(%852, meta[relay.Constant][170]);
%854 = add(%853, meta[relay.Constant][171]);
%855 = transpose(%24, axes=[0, 2, 1, 3]);
%856 = expand_dims(%855, axis=0);
%857 = expand_dims(%37, axis=0);
%858 = (%856, %857);
%859 = transpose(%94, axes=[0, 2, 1, 3]);
%860 = expand_dims(%859, axis=0);
%861 = expand_dims(%107, axis=0);
%862 = (%860, %861);
%863 = transpose(%164, axes=[0, 2, 1, 3]);
%864 = expand_dims(%863, axis=0);
%865 = expand_dims(%177, axis=0);
%866 = (%864, %865);
%867 = transpose(%234, axes=[0, 2, 1, 3]);
%868 = expand_dims(%867, axis=0);
%869 = expand_dims(%247, axis=0);
%870 = (%868, %869);
%871 = transpose(%304, axes=[0, 2, 1, 3]);
%872 = expand_dims(%871, axis=0);
%873 = expand_dims(%317, axis=0);
%874 = (%872, %873);
%875 = transpose(%374, axes=[0, 2, 1, 3]);
%876 = expand_dims(%875, axis=0);
%877 = expand_dims(%387, axis=0);
%878 = (%876, %877);
%879 = transpose(%444, axes=[0, 2, 1, 3]);
%880 = expand_dims(%879, axis=0);
%881 = expand_dims(%457, axis=0);
%882 = (%880, %881);
%883 = transpose(%514, axes=[0, 2, 1, 3]);
%884 = expand_dims(%883, axis=0);
%885 = expand_dims(%527, axis=0);
%886 = (%884, %885);
%887 = transpose(%584, axes=[0, 2, 1, 3]);
%888 = expand_dims(%887, axis=0);
%889 = expand_dims(%597, axis=0);
%890 = (%888, %889);
%891 = transpose(%654, axes=[0, 2, 1, 3]);
%892 = expand_dims(%891, axis=0);
%893 = expand_dims(%667, axis=0);
%894 = (%892, %893);
%895 = transpose(%724, axes=[0, 2, 1, 3]);
%896 = expand_dims(%895, axis=0);
%897 = expand_dims(%737, axis=0);
%898 = (%896, %897);
%899 = transpose(%794, axes=[0, 2, 1, 3]);
%900 = expand_dims(%899, axis=0);
%901 = expand_dims(%807, axis=0);
%902 = (%900, %901);
%903 = reshape(%854, newshape=[1, 50, 32, 768]);
%904 = concatenate(%858);
%905 = concatenate(%862);
%906 = concatenate(%866);
%907 = concatenate(%870);
%908 = concatenate(%874);
%909 = concatenate(%878);
%910 = concatenate(%882);
%911 = concatenate(%886);
%912 = concatenate(%890);
%913 = concatenate(%894);
%914 = concatenate(%898);
%915 = concatenate(%902);
(%903, %904, %905, %906, %907, %908, %909, %910, %911, %912, %913, %914, %915)
}
""",
"from_string",
None,
metatable,
)
return {
"name": "gpt2",
"input_shapes": {"x": [1, 50, 32]},
"input_dtypes": {"x": "int64"},
"mod": mod,
"params": None,
"main_dtype": "float32",
}
def gpt2_16():
metatable = {"relay.Constant": gpt2_consts("float16")}
mod = tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%x: Tensor[(1, 50, 32), int64]) -> (Tensor[(1, 50, 32, 768), float16],
Tensor[(2, 50, 12, 32, 64), float16],
Tensor[(2, 50, 12, 32, 64), float16],
Tensor[(2, 50, 12, 32, 64), float16],
Tensor[(2, 50, 12, 32, 64), float16],
Tensor[(2, 50, 12, 32, 64), float16],
Tensor[(2, 50, 12, 32, 64), float16],
Tensor[(2, 50, 12, 32, 64), float16],
Tensor[(2, 50, 12, 32, 64), float16],
Tensor[(2, 50, 12, 32, 64), float16],
Tensor[(2, 50, 12, 32, 64), float16],
Tensor[(2, 50, 12, 32, 64), float16],
Tensor[(2, 50, 12, 32, 64), float16]) {
%0 = reshape(%x, newshape=[-1, 32]);
%1 = less(%0, 0i64);
%2 = add(%0, 50257i64);
%3 = where(%1, %2, %0);
%4 = take(meta[relay.Constant][0], %3, axis=0);
%5 = add(%4, meta[relay.Constant][1]);
%6 = mean(%5, axis=[-1], keepdims=True);
%7 = subtract(%5, %6);
%8 = power(%7, 2f16);
%9 = mean(%8, axis=[-1], keepdims=True);
%10 = add(%9, 1e-05f16);
%11 = sqrt(%10);
%12 = divide(%7, %11);
%13 = multiply(%12, meta[relay.Constant][2]);
%14 = add(%13, meta[relay.Constant][3]);
%15 = reshape(%14, newshape=[-1, 768]);
%16 = nn.dense(%15, meta[relay.Constant][4], units=2304);
%17 = add(%16, meta[relay.Constant][5]);
%18 = reshape(%17, newshape=[50, 32, 2304]);
%19 = split(%18, indices_or_sections=[768, 1536], axis=2);
%20 = %19.0;
%21 = reshape(%20, newshape=[50, 32, 12, 64]);
%22 = transpose(%21, axes=[0, 2, 1, 3]);
%23 = %19.1;
%24 = reshape(%23, newshape=[50, 32, 12, 64]);
%25 = transpose(%24, axes=[0, 2, 3, 1]);
%26 = reshape(%25, newshape=[-1, 64, 32]);
%27 = reshape(%22, newshape=[-1, 32, 64]);
%28 = transpose(%26, axes=[0, 2, 1]);
%29 = nn.batch_matmul(%27, %28, out_dtype="float16", transpose_b=True);
%30 = reshape(%29, newshape=[50, 12, 32, 32]);
%31 = divide(%30, 8f16);
%32 = multiply(%31, meta[relay.Constant][6]);
%33 = subtract(%32, meta[relay.Constant][7]);
%34 = nn.softmax(%33, axis=3);
%35 = %19.2;
%36 = reshape(%35, newshape=[50, 32, 12, 64]);
%37 = transpose(%36, axes=[0, 2, 1, 3]);
%38 = reshape(%37, newshape=[-1, 32, 64]);
%39 = reshape(%34, newshape=[-1, 32, 32]);
%40 = transpose(%38, axes=[0, 2, 1]);
%41 = nn.batch_matmul(%39, %40, out_dtype="float16", transpose_b=True);
%42 = reshape(%41, newshape=[50, 12, 32, 64]);
%43 = transpose(%42, axes=[0, 2, 1, 3]);
%44 = reshape(%43, newshape=[50, 32, 768]);
%45 = reshape(%44, newshape=[-1, 768]);
%46 = nn.dense(%45, meta[relay.Constant][8], units=768);
%47 = add(%46, meta[relay.Constant][9]);
%48 = reshape(%47, newshape=[50, 32, 768]);
%49 = add(%5, %48);
%50 = mean(%49, axis=[-1], keepdims=True);
%51 = subtract(%49, %50);
%52 = power(%51, 2f16);
%53 = mean(%52, axis=[-1], keepdims=True);
%54 = add(%53, 1e-05f16);
%55 = sqrt(%54);
%56 = divide(%51, %55);
%57 = multiply(%56, meta[relay.Constant][10]);
%58 = add(%57, meta[relay.Constant][11]);
%59 = reshape(%58, newshape=[-1, 768]);
%60 = nn.dense(%59, meta[relay.Constant][12], units=3072);
%61 = add(%60, meta[relay.Constant][13]);
%62 = reshape(%61, newshape=[50, 32, 3072]);
%63 = power(%62, 3f16);
%64 = multiply(%63, 0.044715f16);
%65 = add(%62, %64);
%66 = multiply(%65, 0.797885f16);
%67 = tanh(%66);
%68 = multiply(%62, 0.5f16);
%69 = add(%67, 1f16);
%70 = multiply(%68, %69);
%71 = reshape(%70, newshape=[-1, 3072]);
%72 = nn.dense(%71, meta[relay.Constant][14], units=768);
%73 = add(%72, meta[relay.Constant][15]);
%74 = reshape(%73, newshape=[50, 32, 768]);
%75 = add(%49, %74);
%76 = mean(%75, axis=[-1], keepdims=True);
%77 = subtract(%75, %76);
%78 = power(%77, 2f16);
%79 = mean(%78, axis=[-1], keepdims=True);
%80 = add(%79, 1e-05f16);
%81 = sqrt(%80);
%82 = divide(%77, %81);
%83 = multiply(%82, meta[relay.Constant][16]);
%84 = add(%83, meta[relay.Constant][17]);
%85 = reshape(%84, newshape=[-1, 768]);
%86 = nn.dense(%85, meta[relay.Constant][18], units=2304);
%87 = add(%86, meta[relay.Constant][19]);
%88 = reshape(%87, newshape=[50, 32, 2304]);
%89 = split(%88, indices_or_sections=[768, 1536], axis=2);
%90 = %89.0;
%91 = reshape(%90, newshape=[50, 32, 12, 64]);
%92 = transpose(%91, axes=[0, 2, 1, 3]);
%93 = %89.1;
%94 = reshape(%93, newshape=[50, 32, 12, 64]);
%95 = transpose(%94, axes=[0, 2, 3, 1]);
%96 = reshape(%95, newshape=[-1, 64, 32]);
%97 = reshape(%92, newshape=[-1, 32, 64]);
%98 = transpose(%96, axes=[0, 2, 1]);
%99 = nn.batch_matmul(%97, %98, out_dtype="float16", transpose_b=True);
%100 = reshape(%99, newshape=[50, 12, 32, 32]);
%101 = divide(%100, 8f16);
%102 = multiply(%101, meta[relay.Constant][20]);
%103 = subtract(%102, meta[relay.Constant][21]);
%104 = nn.softmax(%103, axis=3);
%105 = %89.2;
%106 = reshape(%105, newshape=[50, 32, 12, 64]);
%107 = transpose(%106, axes=[0, 2, 1, 3]);
%108 = reshape(%107, newshape=[-1, 32, 64]);
%109 = reshape(%104, newshape=[-1, 32, 32]);
%110 = transpose(%108, axes=[0, 2, 1]);
%111 = nn.batch_matmul(%109, %110, out_dtype="float16", transpose_b=True);
%112 = reshape(%111, newshape=[50, 12, 32, 64]);
%113 = transpose(%112, axes=[0, 2, 1, 3]);
%114 = reshape(%113, newshape=[50, 32, 768]);
%115 = reshape(%114, newshape=[-1, 768]);
%116 = nn.dense(%115, meta[relay.Constant][22], units=768);
%117 = add(%116, meta[relay.Constant][23]);
%118 = reshape(%117, newshape=[50, 32, 768]);
%119 = add(%75, %118);
%120 = mean(%119, axis=[-1], keepdims=True);
%121 = subtract(%119, %120);
%122 = power(%121, 2f16);
%123 = mean(%122, axis=[-1], keepdims=True);
%124 = add(%123, 1e-05f16);
%125 = sqrt(%124);
%126 = divide(%121, %125);
%127 = multiply(%126, meta[relay.Constant][24]);
%128 = add(%127, meta[relay.Constant][25]);
%129 = reshape(%128, newshape=[-1, 768]);
%130 = nn.dense(%129, meta[relay.Constant][26], units=3072);
%131 = add(%130, meta[relay.Constant][27]);
%132 = reshape(%131, newshape=[50, 32, 3072]);
%133 = power(%132, 3f16);
%134 = multiply(%133, 0.044715f16);
%135 = add(%132, %134);
%136 = multiply(%135, 0.797885f16);
%137 = tanh(%136);
%138 = multiply(%132, 0.5f16);
%139 = add(%137, 1f16);
%140 = multiply(%138, %139);
%141 = reshape(%140, newshape=[-1, 3072]);
%142 = nn.dense(%141, meta[relay.Constant][28], units=768);
%143 = add(%142, meta[relay.Constant][29]);
%144 = reshape(%143, newshape=[50, 32, 768]);
%145 = add(%119, %144);
%146 = mean(%145, axis=[-1], keepdims=True);
%147 = subtract(%145, %146);
%148 = power(%147, 2f16);
%149 = mean(%148, axis=[-1], keepdims=True);
%150 = add(%149, 1e-05f16);
%151 = sqrt(%150);
%152 = divide(%147, %151);
%153 = multiply(%152, meta[relay.Constant][30]);
%154 = add(%153, meta[relay.Constant][31]);
%155 = reshape(%154, newshape=[-1, 768]);
%156 = nn.dense(%155, meta[relay.Constant][32], units=2304);
%157 = add(%156, meta[relay.Constant][33]);
%158 = reshape(%157, newshape=[50, 32, 2304]);
%159 = split(%158, indices_or_sections=[768, 1536], axis=2);
%160 = %159.0;
%161 = reshape(%160, newshape=[50, 32, 12, 64]);
%162 = transpose(%161, axes=[0, 2, 1, 3]);
%163 = %159.1;
%164 = reshape(%163, newshape=[50, 32, 12, 64]);
%165 = transpose(%164, axes=[0, 2, 3, 1]);
%166 = reshape(%165, newshape=[-1, 64, 32]);
%167 = reshape(%162, newshape=[-1, 32, 64]);
%168 = transpose(%166, axes=[0, 2, 1]);
%169 = nn.batch_matmul(%167, %168, out_dtype="float16", transpose_b=True);
%170 = reshape(%169, newshape=[50, 12, 32, 32]);
%171 = divide(%170, 8f16);
%172 = multiply(%171, meta[relay.Constant][34]);
%173 = subtract(%172, meta[relay.Constant][35]);
%174 = nn.softmax(%173, axis=3);
%175 = %159.2;
%176 = reshape(%175, newshape=[50, 32, 12, 64]);
%177 = transpose(%176, axes=[0, 2, 1, 3]);
%178 = reshape(%177, newshape=[-1, 32, 64]);
%179 = reshape(%174, newshape=[-1, 32, 32]);
%180 = transpose(%178, axes=[0, 2, 1]);
%181 = nn.batch_matmul(%179, %180, out_dtype="float16", transpose_b=True);
%182 = reshape(%181, newshape=[50, 12, 32, 64]);
%183 = transpose(%182, axes=[0, 2, 1, 3]);
%184 = reshape(%183, newshape=[50, 32, 768]);
%185 = reshape(%184, newshape=[-1, 768]);
%186 = nn.dense(%185, meta[relay.Constant][36], units=768);
%187 = add(%186, meta[relay.Constant][37]);
%188 = reshape(%187, newshape=[50, 32, 768]);
%189 = add(%145, %188);
%190 = mean(%189, axis=[-1], keepdims=True);
%191 = subtract(%189, %190);
%192 = power(%191, 2f16);
%193 = mean(%192, axis=[-1], keepdims=True);
%194 = add(%193, 1e-05f16);
%195 = sqrt(%194);
%196 = divide(%191, %195);
%197 = multiply(%196, meta[relay.Constant][38]);
%198 = add(%197, meta[relay.Constant][39]);
%199 = reshape(%198, newshape=[-1, 768]);
%200 = nn.dense(%199, meta[relay.Constant][40], units=3072);
%201 = add(%200, meta[relay.Constant][41]);
%202 = reshape(%201, newshape=[50, 32, 3072]);
%203 = power(%202, 3f16);
%204 = multiply(%203, 0.044715f16);
%205 = add(%202, %204);
%206 = multiply(%205, 0.797885f16);
%207 = tanh(%206);
%208 = multiply(%202, 0.5f16);
%209 = add(%207, 1f16);
%210 = multiply(%208, %209);
%211 = reshape(%210, newshape=[-1, 3072]);
%212 = nn.dense(%211, meta[relay.Constant][42], units=768);
%213 = add(%212, meta[relay.Constant][43]);
%214 = reshape(%213, newshape=[50, 32, 768]);
%215 = add(%189, %214);
%216 = mean(%215, axis=[-1], keepdims=True);
%217 = subtract(%215, %216);
%218 = power(%217, 2f16);
%219 = mean(%218, axis=[-1], keepdims=True);
%220 = add(%219, 1e-05f16);
%221 = sqrt(%220);
%222 = divide(%217, %221);
%223 = multiply(%222, meta[relay.Constant][44]);
%224 = add(%223, meta[relay.Constant][45]);
%225 = reshape(%224, newshape=[-1, 768]);
%226 = nn.dense(%225, meta[relay.Constant][46], units=2304);
%227 = add(%226, meta[relay.Constant][47]);
%228 = reshape(%227, newshape=[50, 32, 2304]);
%229 = split(%228, indices_or_sections=[768, 1536], axis=2);
%230 = %229.0;
%231 = reshape(%230, newshape=[50, 32, 12, 64]);
%232 = transpose(%231, axes=[0, 2, 1, 3]);
%233 = %229.1;
%234 = reshape(%233, newshape=[50, 32, 12, 64]);
%235 = transpose(%234, axes=[0, 2, 3, 1]);
%236 = reshape(%235, newshape=[-1, 64, 32]);
%237 = reshape(%232, newshape=[-1, 32, 64]);
%238 = transpose(%236, axes=[0, 2, 1]);
%239 = nn.batch_matmul(%237, %238, out_dtype="float16", transpose_b=True);
%240 = reshape(%239, newshape=[50, 12, 32, 32]);
%241 = divide(%240, 8f16);
%242 = multiply(%241, meta[relay.Constant][48]);
%243 = subtract(%242, meta[relay.Constant][49]);
%244 = nn.softmax(%243, axis=3);
%245 = %229.2;
%246 = reshape(%245, newshape=[50, 32, 12, 64]);
%247 = transpose(%246, axes=[0, 2, 1, 3]);
%248 = reshape(%247, newshape=[-1, 32, 64]);
%249 = reshape(%244, newshape=[-1, 32, 32]);
%250 = transpose(%248, axes=[0, 2, 1]);
%251 = nn.batch_matmul(%249, %250, out_dtype="float16", transpose_b=True);
%252 = reshape(%251, newshape=[50, 12, 32, 64]);
%253 = transpose(%252, axes=[0, 2, 1, 3]);
%254 = reshape(%253, newshape=[50, 32, 768]);
%255 = reshape(%254, newshape=[-1, 768]);
%256 = nn.dense(%255, meta[relay.Constant][50], units=768);
%257 = add(%256, meta[relay.Constant][51]);
%258 = reshape(%257, newshape=[50, 32, 768]);
%259 = add(%215, %258);
%260 = mean(%259, axis=[-1], keepdims=True);
%261 = subtract(%259, %260);
%262 = power(%261, 2f16);
%263 = mean(%262, axis=[-1], keepdims=True);
%264 = add(%263, 1e-05f16);
%265 = sqrt(%264);
%266 = divide(%261, %265);
%267 = multiply(%266, meta[relay.Constant][52]);
%268 = add(%267, meta[relay.Constant][53]);
%269 = reshape(%268, newshape=[-1, 768]);
%270 = nn.dense(%269, meta[relay.Constant][54], units=3072);
%271 = add(%270, meta[relay.Constant][55]);
%272 = reshape(%271, newshape=[50, 32, 3072]);
%273 = power(%272, 3f16);
%274 = multiply(%273, 0.044715f16);
%275 = add(%272, %274);
%276 = multiply(%275, 0.797885f16);
%277 = tanh(%276);
%278 = multiply(%272, 0.5f16);
%279 = add(%277, 1f16);
%280 = multiply(%278, %279);
%281 = reshape(%280, newshape=[-1, 3072]);
%282 = nn.dense(%281, meta[relay.Constant][56], units=768);
%283 = add(%282, meta[relay.Constant][57]);
%284 = reshape(%283, newshape=[50, 32, 768]);
%285 = add(%259, %284);
%286 = mean(%285, axis=[-1], keepdims=True);
%287 = subtract(%285, %286);
%288 = power(%287, 2f16);
%289 = mean(%288, axis=[-1], keepdims=True);
%290 = add(%289, 1e-05f16);
%291 = sqrt(%290);
%292 = divide(%287, %291);
%293 = multiply(%292, meta[relay.Constant][58]);
%294 = add(%293, meta[relay.Constant][59]);
%295 = reshape(%294, newshape=[-1, 768]);
%296 = nn.dense(%295, meta[relay.Constant][60], units=2304);
%297 = add(%296, meta[relay.Constant][61]);
%298 = reshape(%297, newshape=[50, 32, 2304]);
%299 = split(%298, indices_or_sections=[768, 1536], axis=2);
%300 = %299.0;
%301 = reshape(%300, newshape=[50, 32, 12, 64]);
%302 = transpose(%301, axes=[0, 2, 1, 3]);
%303 = %299.1;
%304 = reshape(%303, newshape=[50, 32, 12, 64]);
%305 = transpose(%304, axes=[0, 2, 3, 1]);
%306 = reshape(%305, newshape=[-1, 64, 32]);
%307 = reshape(%302, newshape=[-1, 32, 64]);
%308 = transpose(%306, axes=[0, 2, 1]);
%309 = nn.batch_matmul(%307, %308, out_dtype="float16", transpose_b=True);
%310 = reshape(%309, newshape=[50, 12, 32, 32]);
%311 = divide(%310, 8f16);
%312 = multiply(%311, meta[relay.Constant][62]);
%313 = subtract(%312, meta[relay.Constant][63]);
%314 = nn.softmax(%313, axis=3);
%315 = %299.2;
%316 = reshape(%315, newshape=[50, 32, 12, 64]);
%317 = transpose(%316, axes=[0, 2, 1, 3]);
%318 = reshape(%317, newshape=[-1, 32, 64]);
%319 = reshape(%314, newshape=[-1, 32, 32]);
%320 = transpose(%318, axes=[0, 2, 1]);
%321 = nn.batch_matmul(%319, %320, out_dtype="float16", transpose_b=True);
%322 = reshape(%321, newshape=[50, 12, 32, 64]);
%323 = transpose(%322, axes=[0, 2, 1, 3]);
%324 = reshape(%323, newshape=[50, 32, 768]);
%325 = reshape(%324, newshape=[-1, 768]);
%326 = nn.dense(%325, meta[relay.Constant][64], units=768);
%327 = add(%326, meta[relay.Constant][65]);
%328 = reshape(%327, newshape=[50, 32, 768]);
%329 = add(%285, %328);
%330 = mean(%329, axis=[-1], keepdims=True);
%331 = subtract(%329, %330);
%332 = power(%331, 2f16);
%333 = mean(%332, axis=[-1], keepdims=True);
%334 = add(%333, 1e-05f16);
%335 = sqrt(%334);
%336 = divide(%331, %335);
%337 = multiply(%336, meta[relay.Constant][66]);
%338 = add(%337, meta[relay.Constant][67]);
%339 = reshape(%338, newshape=[-1, 768]);
%340 = nn.dense(%339, meta[relay.Constant][68], units=3072);
%341 = add(%340, meta[relay.Constant][69]);
%342 = reshape(%341, newshape=[50, 32, 3072]);
%343 = power(%342, 3f16);
%344 = multiply(%343, 0.044715f16);
%345 = add(%342, %344);
%346 = multiply(%345, 0.797885f16);
%347 = tanh(%346);
%348 = multiply(%342, 0.5f16);
%349 = add(%347, 1f16);
%350 = multiply(%348, %349);
%351 = reshape(%350, newshape=[-1, 3072]);
%352 = nn.dense(%351, meta[relay.Constant][70], units=768);
%353 = add(%352, meta[relay.Constant][71]);
%354 = reshape(%353, newshape=[50, 32, 768]);
%355 = add(%329, %354);
%356 = mean(%355, axis=[-1], keepdims=True);
%357 = subtract(%355, %356);
%358 = power(%357, 2f16);
%359 = mean(%358, axis=[-1], keepdims=True);
%360 = add(%359, 1e-05f16);
%361 = sqrt(%360);
%362 = divide(%357, %361);
%363 = multiply(%362, meta[relay.Constant][72]);
%364 = add(%363, meta[relay.Constant][73]);
%365 = reshape(%364, newshape=[-1, 768]);
%366 = nn.dense(%365, meta[relay.Constant][74], units=2304);
%367 = add(%366, meta[relay.Constant][75]);
%368 = reshape(%367, newshape=[50, 32, 2304]);
%369 = split(%368, indices_or_sections=[768, 1536], axis=2);
%370 = %369.0;
%371 = reshape(%370, newshape=[50, 32, 12, 64]);
%372 = transpose(%371, axes=[0, 2, 1, 3]);
%373 = %369.1;
%374 = reshape(%373, newshape=[50, 32, 12, 64]);
%375 = transpose(%374, axes=[0, 2, 3, 1]);
%376 = reshape(%375, newshape=[-1, 64, 32]);
%377 = reshape(%372, newshape=[-1, 32, 64]);
%378 = transpose(%376, axes=[0, 2, 1]);
%379 = nn.batch_matmul(%377, %378, out_dtype="float16", transpose_b=True);
%380 = reshape(%379, newshape=[50, 12, 32, 32]);
%381 = divide(%380, 8f16);
%382 = multiply(%381, meta[relay.Constant][76]);
%383 = subtract(%382, meta[relay.Constant][77]);
%384 = nn.softmax(%383, axis=3);
%385 = %369.2;
%386 = reshape(%385, newshape=[50, 32, 12, 64]);
%387 = transpose(%386, axes=[0, 2, 1, 3]);
%388 = reshape(%387, newshape=[-1, 32, 64]);
%389 = reshape(%384, newshape=[-1, 32, 32]);
%390 = transpose(%388, axes=[0, 2, 1]);
%391 = nn.batch_matmul(%389, %390, out_dtype="float16", transpose_b=True);
%392 = reshape(%391, newshape=[50, 12, 32, 64]);
%393 = transpose(%392, axes=[0, 2, 1, 3]);
%394 = reshape(%393, newshape=[50, 32, 768]);
%395 = reshape(%394, newshape=[-1, 768]);
%396 = nn.dense(%395, meta[relay.Constant][78], units=768);
%397 = add(%396, meta[relay.Constant][79]);
%398 = reshape(%397, newshape=[50, 32, 768]);
%399 = add(%355, %398);
%400 = mean(%399, axis=[-1], keepdims=True);
%401 = subtract(%399, %400);
%402 = power(%401, 2f16);
%403 = mean(%402, axis=[-1], keepdims=True);
%404 = add(%403, 1e-05f16);
%405 = sqrt(%404);
%406 = divide(%401, %405);
%407 = multiply(%406, meta[relay.Constant][80]);
%408 = add(%407, meta[relay.Constant][81]);
%409 = reshape(%408, newshape=[-1, 768]);
%410 = nn.dense(%409, meta[relay.Constant][82], units=3072);
%411 = add(%410, meta[relay.Constant][83]);
%412 = reshape(%411, newshape=[50, 32, 3072]);
%413 = power(%412, 3f16);
%414 = multiply(%413, 0.044715f16);
%415 = add(%412, %414);
%416 = multiply(%415, 0.797885f16);
%417 = tanh(%416);
%418 = multiply(%412, 0.5f16);
%419 = add(%417, 1f16);
%420 = multiply(%418, %419);
%421 = reshape(%420, newshape=[-1, 3072]);
%422 = nn.dense(%421, meta[relay.Constant][84], units=768);
%423 = add(%422, meta[relay.Constant][85]);
%424 = reshape(%423, newshape=[50, 32, 768]);
%425 = add(%399, %424);
%426 = mean(%425, axis=[-1], keepdims=True);
%427 = subtract(%425, %426);
%428 = power(%427, 2f16);
%429 = mean(%428, axis=[-1], keepdims=True);
%430 = add(%429, 1e-05f16);
%431 = sqrt(%430);
%432 = divide(%427, %431);
%433 = multiply(%432, meta[relay.Constant][86]);
%434 = add(%433, meta[relay.Constant][87]);
%435 = reshape(%434, newshape=[-1, 768]);
%436 = nn.dense(%435, meta[relay.Constant][88], units=2304);
%437 = add(%436, meta[relay.Constant][89]);
%438 = reshape(%437, newshape=[50, 32, 2304]);
%439 = split(%438, indices_or_sections=[768, 1536], axis=2);
%440 = %439.0;
%441 = reshape(%440, newshape=[50, 32, 12, 64]);
%442 = transpose(%441, axes=[0, 2, 1, 3]);
%443 = %439.1;
%444 = reshape(%443, newshape=[50, 32, 12, 64]);
%445 = transpose(%444, axes=[0, 2, 3, 1]);
%446 = reshape(%445, newshape=[-1, 64, 32]);
%447 = reshape(%442, newshape=[-1, 32, 64]);
%448 = transpose(%446, axes=[0, 2, 1]);
%449 = nn.batch_matmul(%447, %448, out_dtype="float16", transpose_b=True);
%450 = reshape(%449, newshape=[50, 12, 32, 32]);
%451 = divide(%450, 8f16);
%452 = multiply(%451, meta[relay.Constant][90]);
%453 = subtract(%452, meta[relay.Constant][91]);
%454 = nn.softmax(%453, axis=3);
%455 = %439.2;
%456 = reshape(%455, newshape=[50, 32, 12, 64]);
%457 = transpose(%456, axes=[0, 2, 1, 3]);
%458 = reshape(%457, newshape=[-1, 32, 64]);
%459 = reshape(%454, newshape=[-1, 32, 32]);
%460 = transpose(%458, axes=[0, 2, 1]);
%461 = nn.batch_matmul(%459, %460, out_dtype="float16", transpose_b=True);
%462 = reshape(%461, newshape=[50, 12, 32, 64]);
%463 = transpose(%462, axes=[0, 2, 1, 3]);
%464 = reshape(%463, newshape=[50, 32, 768]);
%465 = reshape(%464, newshape=[-1, 768]);
%466 = nn.dense(%465, meta[relay.Constant][92], units=768);
%467 = add(%466, meta[relay.Constant][93]);
%468 = reshape(%467, newshape=[50, 32, 768]);
%469 = add(%425, %468);
%470 = mean(%469, axis=[-1], keepdims=True);
%471 = subtract(%469, %470);
%472 = power(%471, 2f16);
%473 = mean(%472, axis=[-1], keepdims=True);
%474 = add(%473, 1e-05f16);
%475 = sqrt(%474);
%476 = divide(%471, %475);
%477 = multiply(%476, meta[relay.Constant][94]);
%478 = add(%477, meta[relay.Constant][95]);
%479 = reshape(%478, newshape=[-1, 768]);
%480 = nn.dense(%479, meta[relay.Constant][96], units=3072);
%481 = add(%480, meta[relay.Constant][97]);
%482 = reshape(%481, newshape=[50, 32, 3072]);
%483 = power(%482, 3f16);
%484 = multiply(%483, 0.044715f16);
%485 = add(%482, %484);
%486 = multiply(%485, 0.797885f16);
%487 = tanh(%486);
%488 = multiply(%482, 0.5f16);
%489 = add(%487, 1f16);
%490 = multiply(%488, %489);
%491 = reshape(%490, newshape=[-1, 3072]);
%492 = nn.dense(%491, meta[relay.Constant][98], units=768);
%493 = add(%492, meta[relay.Constant][99]);
%494 = reshape(%493, newshape=[50, 32, 768]);
%495 = add(%469, %494);
%496 = mean(%495, axis=[-1], keepdims=True);
%497 = subtract(%495, %496);
%498 = power(%497, 2f16);
%499 = mean(%498, axis=[-1], keepdims=True);
%500 = add(%499, 1e-05f16);
%501 = sqrt(%500);
%502 = divide(%497, %501);
%503 = multiply(%502, meta[relay.Constant][100]);
%504 = add(%503, meta[relay.Constant][101]);
%505 = reshape(%504, newshape=[-1, 768]);
%506 = nn.dense(%505, meta[relay.Constant][102], units=2304);
%507 = add(%506, meta[relay.Constant][103]);
%508 = reshape(%507, newshape=[50, 32, 2304]);
%509 = split(%508, indices_or_sections=[768, 1536], axis=2);
%510 = %509.0;
%511 = reshape(%510, newshape=[50, 32, 12, 64]);
%512 = transpose(%511, axes=[0, 2, 1, 3]);
%513 = %509.1;
%514 = reshape(%513, newshape=[50, 32, 12, 64]);
%515 = transpose(%514, axes=[0, 2, 3, 1]);
%516 = reshape(%515, newshape=[-1, 64, 32]);
%517 = reshape(%512, newshape=[-1, 32, 64]);
%518 = transpose(%516, axes=[0, 2, 1]);
%519 = nn.batch_matmul(%517, %518, out_dtype="float16", transpose_b=True);
%520 = reshape(%519, newshape=[50, 12, 32, 32]);
%521 = divide(%520, 8f16);
%522 = multiply(%521, meta[relay.Constant][104]);
%523 = subtract(%522, meta[relay.Constant][105]);
%524 = nn.softmax(%523, axis=3);
%525 = %509.2;
%526 = reshape(%525, newshape=[50, 32, 12, 64]);
%527 = transpose(%526, axes=[0, 2, 1, 3]);
%528 = reshape(%527, newshape=[-1, 32, 64]);
%529 = reshape(%524, newshape=[-1, 32, 32]);
%530 = transpose(%528, axes=[0, 2, 1]);
%531 = nn.batch_matmul(%529, %530, out_dtype="float16", transpose_b=True);
%532 = reshape(%531, newshape=[50, 12, 32, 64]);
%533 = transpose(%532, axes=[0, 2, 1, 3]);
%534 = reshape(%533, newshape=[50, 32, 768]);
%535 = reshape(%534, newshape=[-1, 768]);
%536 = nn.dense(%535, meta[relay.Constant][106], units=768);
%537 = add(%536, meta[relay.Constant][107]);
%538 = reshape(%537, newshape=[50, 32, 768]);
%539 = add(%495, %538);
%540 = mean(%539, axis=[-1], keepdims=True);
%541 = subtract(%539, %540);
%542 = power(%541, 2f16);
%543 = mean(%542, axis=[-1], keepdims=True);
%544 = add(%543, 1e-05f16);
%545 = sqrt(%544);
%546 = divide(%541, %545);
%547 = multiply(%546, meta[relay.Constant][108]);
%548 = add(%547, meta[relay.Constant][109]);
%549 = reshape(%548, newshape=[-1, 768]);
%550 = nn.dense(%549, meta[relay.Constant][110], units=3072);
%551 = add(%550, meta[relay.Constant][111]);
%552 = reshape(%551, newshape=[50, 32, 3072]);
%553 = power(%552, 3f16);
%554 = multiply(%553, 0.044715f16);
%555 = add(%552, %554);
%556 = multiply(%555, 0.797885f16);
%557 = tanh(%556);
%558 = multiply(%552, 0.5f16);
%559 = add(%557, 1f16);
%560 = multiply(%558, %559);
%561 = reshape(%560, newshape=[-1, 3072]);
%562 = nn.dense(%561, meta[relay.Constant][112], units=768);
%563 = add(%562, meta[relay.Constant][113]);
%564 = reshape(%563, newshape=[50, 32, 768]);
%565 = add(%539, %564);
%566 = mean(%565, axis=[-1], keepdims=True);
%567 = subtract(%565, %566);
%568 = power(%567, 2f16);
%569 = mean(%568, axis=[-1], keepdims=True);
%570 = add(%569, 1e-05f16);
%571 = sqrt(%570);
%572 = divide(%567, %571);
%573 = multiply(%572, meta[relay.Constant][114]);
%574 = add(%573, meta[relay.Constant][115]);
%575 = reshape(%574, newshape=[-1, 768]);
%576 = nn.dense(%575, meta[relay.Constant][116], units=2304);
%577 = add(%576, meta[relay.Constant][117]);
%578 = reshape(%577, newshape=[50, 32, 2304]);
%579 = split(%578, indices_or_sections=[768, 1536], axis=2);
%580 = %579.0;
%581 = reshape(%580, newshape=[50, 32, 12, 64]);
%582 = transpose(%581, axes=[0, 2, 1, 3]);
%583 = %579.1;
%584 = reshape(%583, newshape=[50, 32, 12, 64]);
%585 = transpose(%584, axes=[0, 2, 3, 1]);
%586 = reshape(%585, newshape=[-1, 64, 32]);
%587 = reshape(%582, newshape=[-1, 32, 64]);
%588 = transpose(%586, axes=[0, 2, 1]);
%589 = nn.batch_matmul(%587, %588, out_dtype="float16", transpose_b=True);
%590 = reshape(%589, newshape=[50, 12, 32, 32]);
%591 = divide(%590, 8f16);
%592 = multiply(%591, meta[relay.Constant][118]);
%593 = subtract(%592, meta[relay.Constant][119]);
%594 = nn.softmax(%593, axis=3);
%595 = %579.2;
%596 = reshape(%595, newshape=[50, 32, 12, 64]);
%597 = transpose(%596, axes=[0, 2, 1, 3]);
%598 = reshape(%597, newshape=[-1, 32, 64]);
%599 = reshape(%594, newshape=[-1, 32, 32]);
%600 = transpose(%598, axes=[0, 2, 1]);
%601 = nn.batch_matmul(%599, %600, out_dtype="float16", transpose_b=True);
%602 = reshape(%601, newshape=[50, 12, 32, 64]);
%603 = transpose(%602, axes=[0, 2, 1, 3]);
%604 = reshape(%603, newshape=[50, 32, 768]);
%605 = reshape(%604, newshape=[-1, 768]);
%606 = nn.dense(%605, meta[relay.Constant][120], units=768);
%607 = add(%606, meta[relay.Constant][121]);
%608 = reshape(%607, newshape=[50, 32, 768]);
%609 = add(%565, %608);
%610 = mean(%609, axis=[-1], keepdims=True);
%611 = subtract(%609, %610);
%612 = power(%611, 2f16);
%613 = mean(%612, axis=[-1], keepdims=True);
%614 = add(%613, 1e-05f16);
%615 = sqrt(%614);
%616 = divide(%611, %615);
%617 = multiply(%616, meta[relay.Constant][122]);
%618 = add(%617, meta[relay.Constant][123]);
%619 = reshape(%618, newshape=[-1, 768]);
%620 = nn.dense(%619, meta[relay.Constant][124], units=3072);
%621 = add(%620, meta[relay.Constant][125]);
%622 = reshape(%621, newshape=[50, 32, 3072]);
%623 = power(%622, 3f16);
%624 = multiply(%623, 0.044715f16);
%625 = add(%622, %624);
%626 = multiply(%625, 0.797885f16);
%627 = tanh(%626);
%628 = multiply(%622, 0.5f16);
%629 = add(%627, 1f16);
%630 = multiply(%628, %629);
%631 = reshape(%630, newshape=[-1, 3072]);
%632 = nn.dense(%631, meta[relay.Constant][126], units=768);
%633 = add(%632, meta[relay.Constant][127]);
%634 = reshape(%633, newshape=[50, 32, 768]);
%635 = add(%609, %634);
%636 = mean(%635, axis=[-1], keepdims=True);
%637 = subtract(%635, %636);
%638 = power(%637, 2f16);
%639 = mean(%638, axis=[-1], keepdims=True);
%640 = add(%639, 1e-05f16);
%641 = sqrt(%640);
%642 = divide(%637, %641);
%643 = multiply(%642, meta[relay.Constant][128]);
%644 = add(%643, meta[relay.Constant][129]);
%645 = reshape(%644, newshape=[-1, 768]);
%646 = nn.dense(%645, meta[relay.Constant][130], units=2304);
%647 = add(%646, meta[relay.Constant][131]);
%648 = reshape(%647, newshape=[50, 32, 2304]);
%649 = split(%648, indices_or_sections=[768, 1536], axis=2);
%650 = %649.0;
%651 = reshape(%650, newshape=[50, 32, 12, 64]);
%652 = transpose(%651, axes=[0, 2, 1, 3]);
%653 = %649.1;
%654 = reshape(%653, newshape=[50, 32, 12, 64]);
%655 = transpose(%654, axes=[0, 2, 3, 1]);
%656 = reshape(%655, newshape=[-1, 64, 32]);
%657 = reshape(%652, newshape=[-1, 32, 64]);
%658 = transpose(%656, axes=[0, 2, 1]);
%659 = nn.batch_matmul(%657, %658, out_dtype="float16", transpose_b=True);
%660 = reshape(%659, newshape=[50, 12, 32, 32]);
%661 = divide(%660, 8f16);
%662 = multiply(%661, meta[relay.Constant][132]);
%663 = subtract(%662, meta[relay.Constant][133]);
%664 = nn.softmax(%663, axis=3);
%665 = %649.2;
%666 = reshape(%665, newshape=[50, 32, 12, 64]);
%667 = transpose(%666, axes=[0, 2, 1, 3]);
%668 = reshape(%667, newshape=[-1, 32, 64]);
%669 = reshape(%664, newshape=[-1, 32, 32]);
%670 = transpose(%668, axes=[0, 2, 1]);
%671 = nn.batch_matmul(%669, %670, out_dtype="float16", transpose_b=True);
%672 = reshape(%671, newshape=[50, 12, 32, 64]);
%673 = transpose(%672, axes=[0, 2, 1, 3]);
%674 = reshape(%673, newshape=[50, 32, 768]);
%675 = reshape(%674, newshape=[-1, 768]);
%676 = nn.dense(%675, meta[relay.Constant][134], units=768);
%677 = add(%676, meta[relay.Constant][135]);
%678 = reshape(%677, newshape=[50, 32, 768]);
%679 = add(%635, %678);
%680 = mean(%679, axis=[-1], keepdims=True);
%681 = subtract(%679, %680);
%682 = power(%681, 2f16);
%683 = mean(%682, axis=[-1], keepdims=True);
%684 = add(%683, 1e-05f16);
%685 = sqrt(%684);
%686 = divide(%681, %685);
%687 = multiply(%686, meta[relay.Constant][136]);
%688 = add(%687, meta[relay.Constant][137]);
%689 = reshape(%688, newshape=[-1, 768]);
%690 = nn.dense(%689, meta[relay.Constant][138], units=3072);
%691 = add(%690, meta[relay.Constant][139]);
%692 = reshape(%691, newshape=[50, 32, 3072]);
%693 = power(%692, 3f16);
%694 = multiply(%693, 0.044715f16);
%695 = add(%692, %694);
%696 = multiply(%695, 0.797885f16);
%697 = tanh(%696);
%698 = multiply(%692, 0.5f16);
%699 = add(%697, 1f16);
%700 = multiply(%698, %699);
%701 = reshape(%700, newshape=[-1, 3072]);
%702 = nn.dense(%701, meta[relay.Constant][140], units=768);
%703 = add(%702, meta[relay.Constant][141]);
%704 = reshape(%703, newshape=[50, 32, 768]);
%705 = add(%679, %704);
%706 = mean(%705, axis=[-1], keepdims=True);
%707 = subtract(%705, %706);
%708 = power(%707, 2f16);
%709 = mean(%708, axis=[-1], keepdims=True);
%710 = add(%709, 1e-05f16);
%711 = sqrt(%710);
%712 = divide(%707, %711);
%713 = multiply(%712, meta[relay.Constant][142]);
%714 = add(%713, meta[relay.Constant][143]);
%715 = reshape(%714, newshape=[-1, 768]);
%716 = nn.dense(%715, meta[relay.Constant][144], units=2304);
%717 = add(%716, meta[relay.Constant][145]);
%718 = reshape(%717, newshape=[50, 32, 2304]);
%719 = split(%718, indices_or_sections=[768, 1536], axis=2);
%720 = %719.0;
%721 = reshape(%720, newshape=[50, 32, 12, 64]);
%722 = transpose(%721, axes=[0, 2, 1, 3]);
%723 = %719.1;
%724 = reshape(%723, newshape=[50, 32, 12, 64]);
%725 = transpose(%724, axes=[0, 2, 3, 1]);
%726 = reshape(%725, newshape=[-1, 64, 32]);
%727 = reshape(%722, newshape=[-1, 32, 64]);
%728 = transpose(%726, axes=[0, 2, 1]);
%729 = nn.batch_matmul(%727, %728, out_dtype="float16", transpose_b=True);
%730 = reshape(%729, newshape=[50, 12, 32, 32]);
%731 = divide(%730, 8f16);
%732 = multiply(%731, meta[relay.Constant][146]);
%733 = subtract(%732, meta[relay.Constant][147]);
%734 = nn.softmax(%733, axis=3);
%735 = %719.2;
%736 = reshape(%735, newshape=[50, 32, 12, 64]);
%737 = transpose(%736, axes=[0, 2, 1, 3]);
%738 = reshape(%737, newshape=[-1, 32, 64]);
%739 = reshape(%734, newshape=[-1, 32, 32]);
%740 = transpose(%738, axes=[0, 2, 1]);
%741 = nn.batch_matmul(%739, %740, out_dtype="float16", transpose_b=True);
%742 = reshape(%741, newshape=[50, 12, 32, 64]);
%743 = transpose(%742, axes=[0, 2, 1, 3]);
%744 = reshape(%743, newshape=[50, 32, 768]);
%745 = reshape(%744, newshape=[-1, 768]);
%746 = nn.dense(%745, meta[relay.Constant][148], units=768);
%747 = add(%746, meta[relay.Constant][149]);
%748 = reshape(%747, newshape=[50, 32, 768]);
%749 = add(%705, %748);
%750 = mean(%749, axis=[-1], keepdims=True);
%751 = subtract(%749, %750);
%752 = power(%751, 2f16);
%753 = mean(%752, axis=[-1], keepdims=True);
%754 = add(%753, 1e-05f16);
%755 = sqrt(%754);
%756 = divide(%751, %755);
%757 = multiply(%756, meta[relay.Constant][150]);
%758 = add(%757, meta[relay.Constant][151]);
%759 = reshape(%758, newshape=[-1, 768]);
%760 = nn.dense(%759, meta[relay.Constant][152], units=3072);
%761 = add(%760, meta[relay.Constant][153]);
%762 = reshape(%761, newshape=[50, 32, 3072]);
%763 = power(%762, 3f16);
%764 = multiply(%763, 0.044715f16);
%765 = add(%762, %764);
%766 = multiply(%765, 0.797885f16);
%767 = tanh(%766);
%768 = multiply(%762, 0.5f16);
%769 = add(%767, 1f16);
%770 = multiply(%768, %769);
%771 = reshape(%770, newshape=[-1, 3072]);
%772 = nn.dense(%771, meta[relay.Constant][154], units=768);
%773 = add(%772, meta[relay.Constant][155]);
%774 = reshape(%773, newshape=[50, 32, 768]);
%775 = add(%749, %774);
%776 = mean(%775, axis=[-1], keepdims=True);
%777 = subtract(%775, %776);
%778 = power(%777, 2f16);
%779 = mean(%778, axis=[-1], keepdims=True);
%780 = add(%779, 1e-05f16);
%781 = sqrt(%780);
%782 = divide(%777, %781);
%783 = multiply(%782, meta[relay.Constant][156]);
%784 = add(%783, meta[relay.Constant][157]);
%785 = reshape(%784, newshape=[-1, 768]);
%786 = nn.dense(%785, meta[relay.Constant][158], units=2304);
%787 = add(%786, meta[relay.Constant][159]);
%788 = reshape(%787, newshape=[50, 32, 2304]);
%789 = split(%788, indices_or_sections=[768, 1536], axis=2);
%790 = %789.0;
%791 = reshape(%790, newshape=[50, 32, 12, 64]);
%792 = transpose(%791, axes=[0, 2, 1, 3]);
%793 = %789.1;
%794 = reshape(%793, newshape=[50, 32, 12, 64]);
%795 = transpose(%794, axes=[0, 2, 3, 1]);
%796 = reshape(%795, newshape=[-1, 64, 32]);
%797 = reshape(%792, newshape=[-1, 32, 64]);
%798 = transpose(%796, axes=[0, 2, 1]);
%799 = nn.batch_matmul(%797, %798, out_dtype="float16", transpose_b=True);
%800 = reshape(%799, newshape=[50, 12, 32, 32]);
%801 = divide(%800, 8f16);
%802 = multiply(%801, meta[relay.Constant][160]);
%803 = subtract(%802, meta[relay.Constant][161]);
%804 = nn.softmax(%803, axis=3);
%805 = %789.2;
%806 = reshape(%805, newshape=[50, 32, 12, 64]);
%807 = transpose(%806, axes=[0, 2, 1, 3]);
%808 = reshape(%807, newshape=[-1, 32, 64]);
%809 = reshape(%804, newshape=[-1, 32, 32]);
%810 = transpose(%808, axes=[0, 2, 1]);
%811 = nn.batch_matmul(%809, %810, out_dtype="float16", transpose_b=True);
%812 = reshape(%811, newshape=[50, 12, 32, 64]);
%813 = transpose(%812, axes=[0, 2, 1, 3]);
%814 = reshape(%813, newshape=[50, 32, 768]);
%815 = reshape(%814, newshape=[-1, 768]);
%816 = nn.dense(%815, meta[relay.Constant][162], units=768);
%817 = add(%816, meta[relay.Constant][163]);
%818 = reshape(%817, newshape=[50, 32, 768]);
%819 = add(%775, %818);
%820 = mean(%819, axis=[-1], keepdims=True);
%821 = subtract(%819, %820);
%822 = power(%821, 2f16);
%823 = mean(%822, axis=[-1], keepdims=True);
%824 = add(%823, 1e-05f16);
%825 = sqrt(%824);
%826 = divide(%821, %825);
%827 = multiply(%826, meta[relay.Constant][164]);
%828 = add(%827, meta[relay.Constant][165]);
%829 = reshape(%828, newshape=[-1, 768]);
%830 = nn.dense(%829, meta[relay.Constant][166], units=3072);
%831 = add(%830, meta[relay.Constant][167]);
%832 = reshape(%831, newshape=[50, 32, 3072]);
%833 = power(%832, 3f16);
%834 = multiply(%833, 0.044715f16);
%835 = add(%832, %834);
%836 = multiply(%835, 0.797885f16);
%837 = tanh(%836);
%838 = multiply(%832, 0.5f16);
%839 = add(%837, 1f16);
%840 = multiply(%838, %839);
%841 = reshape(%840, newshape=[-1, 3072]);
%842 = nn.dense(%841, meta[relay.Constant][168], units=768);
%843 = add(%842, meta[relay.Constant][169]);
%844 = reshape(%843, newshape=[50, 32, 768]);
%845 = add(%819, %844);
%846 = mean(%845, axis=[-1], keepdims=True);
%847 = subtract(%845, %846);
%848 = power(%847, 2f16);
%849 = mean(%848, axis=[-1], keepdims=True);
%850 = add(%849, 1e-05f16);
%851 = sqrt(%850);
%852 = divide(%847, %851);
%853 = multiply(%852, meta[relay.Constant][170]);
%854 = add(%853, meta[relay.Constant][171]);
%855 = transpose(%24, axes=[0, 2, 1, 3]);
%856 = expand_dims(%855, axis=0);
%857 = expand_dims(%37, axis=0);
%858 = (%856, %857);
%859 = transpose(%94, axes=[0, 2, 1, 3]);
%860 = expand_dims(%859, axis=0);
%861 = expand_dims(%107, axis=0);
%862 = (%860, %861);
%863 = transpose(%164, axes=[0, 2, 1, 3]);
%864 = expand_dims(%863, axis=0);
%865 = expand_dims(%177, axis=0);
%866 = (%864, %865);
%867 = transpose(%234, axes=[0, 2, 1, 3]);
%868 = expand_dims(%867, axis=0);
%869 = expand_dims(%247, axis=0);
%870 = (%868, %869);
%871 = transpose(%304, axes=[0, 2, 1, 3]);
%872 = expand_dims(%871, axis=0);
%873 = expand_dims(%317, axis=0);
%874 = (%872, %873);
%875 = transpose(%374, axes=[0, 2, 1, 3]);
%876 = expand_dims(%875, axis=0);
%877 = expand_dims(%387, axis=0);
%878 = (%876, %877);
%879 = transpose(%444, axes=[0, 2, 1, 3]);
%880 = expand_dims(%879, axis=0);
%881 = expand_dims(%457, axis=0);
%882 = (%880, %881);
%883 = transpose(%514, axes=[0, 2, 1, 3]);
%884 = expand_dims(%883, axis=0);
%885 = expand_dims(%527, axis=0);
%886 = (%884, %885);
%887 = transpose(%584, axes=[0, 2, 1, 3]);
%888 = expand_dims(%887, axis=0);
%889 = expand_dims(%597, axis=0);
%890 = (%888, %889);
%891 = transpose(%654, axes=[0, 2, 1, 3]);
%892 = expand_dims(%891, axis=0);
%893 = expand_dims(%667, axis=0);
%894 = (%892, %893);
%895 = transpose(%724, axes=[0, 2, 1, 3]);
%896 = expand_dims(%895, axis=0);
%897 = expand_dims(%737, axis=0);
%898 = (%896, %897);
%899 = transpose(%794, axes=[0, 2, 1, 3]);
%900 = expand_dims(%899, axis=0);
%901 = expand_dims(%807, axis=0);
%902 = (%900, %901);
%903 = reshape(%854, newshape=[1, 50, 32, 768]);
%904 = concatenate(%858);
%905 = concatenate(%862);
%906 = concatenate(%866);
%907 = concatenate(%870);
%908 = concatenate(%874);
%909 = concatenate(%878);
%910 = concatenate(%882);
%911 = concatenate(%886);
%912 = concatenate(%890);
%913 = concatenate(%894);
%914 = concatenate(%898);
%915 = concatenate(%902);
(%903, %904, %905, %906, %907, %908, %909, %910, %911, %912, %913, %914, %915)
}
""",
"from_string",
None,
metatable,
)
return {
"name": "gpt2_16",
"input_shapes": {"x": [1, 50, 32]},
"input_dtypes": {"x": "int64"},
"mod": mod,
"params": None,
"main_dtype": "float16",
}
def gpt2_extract_consts(dtype):
return make_consts(
dtype,
[
(768, 768), # 0
(768,), # 1
(768,), # 2
(768,), # 3
(3072, 768), # 4
(3072,), # 5
(1, 32, 768), # 6
],
)
def gpt2_extract():
metatable = {"relay.Constant": gpt2_extract_consts("float32")}
mod = tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%x: Tensor[(1600, 768), float32]) -> Tensor[(50, 32, 3072), float32] {
%46 = nn.dense(%x, meta[relay.Constant][0], units=768);
%47 = add(%46, meta[relay.Constant][1]);
%48 = reshape(%47, newshape=[50, 32, 768]);
%49 = add(meta[relay.Constant][6], %48);
%50 = mean(%49, axis=[-1], keepdims=True);
%51 = subtract(%49, %50);
%52 = power(%51, 2f);
%53 = mean(%52, axis=[-1], keepdims=True);
%54 = add(%53, 1e-05f);
%55 = sqrt(%54);
%56 = divide(%51, %55);
%57 = multiply(%56, meta[relay.Constant][2]);
%58 = add(%57, meta[relay.Constant][3]);
%59 = reshape(%58, newshape=[-1, 768]);
%60 = nn.dense(%59, meta[relay.Constant][4], units=3072);
%61 = add(%60, meta[relay.Constant][5]);
%62 = reshape(%61, newshape=[50, 32, 3072]);
%63 = power(%62, 3f);
%64 = multiply(%63, 0.044715f);
%65 = add(%62, %64);
%66 = multiply(%65, 0.797885f);
%67 = tanh(%66);
%68 = multiply(%62, 0.5f);
%69 = add(%67, 1f);
%70 = multiply(%68, %69);
%70
}
""",
"from_string",
None,
metatable,
)
return {
"input_shapes": {"x": [1600, 768]},
"input_dtypes": {"x": "float32"},
"mod": mod,
"params": None,
"main_dtype": "float32",
}
def gpt2_extract_16():
metatable = {"relay.Constant": gpt2_extract_consts("float16")}
mod = tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%x: Tensor[(1600, 768), float16]) -> Tensor[(50, 32, 3072), float16] {
%46 = nn.dense(%x, meta[relay.Constant][0], units=768);
%47 = add(%46, meta[relay.Constant][1]);
%48 = reshape(%47, newshape=[50, 32, 768]);
%49 = add(meta[relay.Constant][6], %48);
%50 = mean(%49, axis=[-1], keepdims=True);
%51 = subtract(%49, %50);
%52 = power(%51, 2f16);
%53 = mean(%52, axis=[-1], keepdims=True);
%54 = add(%53, 1e-05f16);
%55 = sqrt(%54);
%56 = divide(%51, %55);
%57 = multiply(%56, meta[relay.Constant][2]);
%58 = add(%57, meta[relay.Constant][3]);
%59 = reshape(%58, newshape=[-1, 768]);
%60 = nn.dense(%59, meta[relay.Constant][4], units=3072);
%61 = add(%60, meta[relay.Constant][5]);
%62 = reshape(%61, newshape=[50, 32, 3072]);
%63 = power(%62, 3f16);
%64 = multiply(%63, 0.044715f16);
%65 = add(%62, %64);
%66 = multiply(%65, 0.797885f16);
%67 = tanh(%66);
%68 = multiply(%62, 0.5f16);
%69 = add(%67, 1f16);
%70 = multiply(%68, %69);
%70
}
""",
"from_string",
None,
metatable,
)
return {
"name": "gpt2_extract_16",
"input_shapes": {"x": [1600, 768]},
"input_dtypes": {"x": "float16"},
"mod": mod,
"params": None,
"main_dtype": "float16",
}
def gpt2_16_for_cutlass_extract_consts(dtype):
return make_consts(
"float16",
[
(2304, 768), # 0
(2304,), # 1
(600, 32, 64), # 2
(600, 32, 32), # 3
],
)
def gpt2_16_for_cutlass_extract():
metatable = {"relay.Constant": gpt2_16_for_cutlass_extract_consts("float16")}
mod = tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%x0: Tensor[(1600, 768), float16],
%x3: Tensor[(600, 32, 64), float16])
-> (Tensor[(1600, 2304), float16], Tensor[(1200, 32, 32), float16]) {
%0 = nn.dense(%x0, meta[relay.Constant][0], units=2304);
%1 = add(%0, meta[relay.Constant][1]);
%2 = nn.batch_matmul(%x3, meta[relay.Constant][2], out_dtype="float16", transpose_b=True);
%3 = (%2, meta[relay.Constant][3]);
%4 = concatenate(%3);
(%1, %4)
}
""",
"from_string",
None,
metatable,
)
return {
"name": "gpt2_16_for_cutlass_extract",
"input_shapes": {"x0": (1600, 768), "x3": (600, 32, 64)},
"input_dtypes": {"x0": "float16", "x3": "float16"},
"mod": mod,
"params": None,
"main_dtype": "float16",
}
def resnet50_consts(dtype):
return make_consts(
dtype,
[
(3,), # 0
(3,), # 1
(3,), # 2
(3,), # 3
(64, 3, 7, 7), # 4
(64,), # 5
(64,), # 6
(64,), # 7
(64,), # 8
(64,), # 9
(64,), # 10
(64,), # 11
(64,), # 12
(64, 64, 1, 1), # 13
(64,), # 14
(64,), # 15
(64,), # 16
(64,), # 17
(64, 64, 3, 3), # 18
(64,), # 19
(64,), # 20
(64,), # 21
(64,), # 22
(256, 64, 1, 1), # 23
(256, 64, 1, 1), # 24
(256,), # 25
(256,), # 26
(256,), # 27
(256,), # 28
(64, 256, 1, 1), # 29
(64,), # 30
(64,), # 31
(64,), # 32
(64,), # 33
(64, 64, 3, 3), # 34
(64,), # 35
(64,), # 36
(64,), # 37
(64,), # 38
(256, 64, 1, 1), # 39
(256,), # 40
(256,), # 41
(256,), # 42
(256,), # 43
(64, 256, 1, 1), # 44
(64,), # 45
(64,), # 46
(64,), # 47
(64,), # 48
(64, 64, 3, 3), # 49
(64,), # 50
(64,), # 51
(64,), # 52
(64,), # 53
(256, 64, 1, 1), # 54
(256,), # 55
(256,), # 56
(256,), # 57
(256,), # 58
(128, 256, 1, 1), # 59
(128,), # 60
(128,), # 61
(128,), # 62
(128,), # 63
(128, 128, 3, 3), # 64
(128,), # 65
(128,), # 66
(128,), # 67
(128,), # 68
(512, 128, 1, 1), # 69
(512, 256, 1, 1), # 70
(512,), # 71
(512,), # 72
(512,), # 73
(512,), # 74
(128, 512, 1, 1), # 75
(128,), # 76
(128,), # 77
(128,), # 78
(128,), # 79
(128, 128, 3, 3), # 80
(128,), # 81
(128,), # 82
(128,), # 83
(128,), # 84
(512, 128, 1, 1), # 85
(512,), # 86
(512,), # 87
(512,), # 88
(512,), # 89
(128, 512, 1, 1), # 90
(128,), # 91
(128,), # 92
(128,), # 93
(128,), # 94
(128, 128, 3, 3), # 95
(128,), # 96
(128,), # 97
(128,), # 98
(128,), # 99
(512, 128, 1, 1), # 100
(512,), # 101
(512,), # 102
(512,), # 103
(512,), # 104
(128, 512, 1, 1), # 105
(128,), # 106
(128,), # 107
(128,), # 108
(128,), # 109
(128, 128, 3, 3), # 110
(128,), # 111
(128,), # 112
(128,), # 113
(128,), # 114
(512, 128, 1, 1), # 115
(512,), # 116
(512,), # 117
(512,), # 118
(512,), # 119
(256, 512, 1, 1), # 120
(256,), # 121
(256,), # 122
(256,), # 123
(256,), # 124
(256, 256, 3, 3), # 125
(256,), # 126
(256,), # 127
(256,), # 128
(256,), # 129
(1024, 256, 1, 1), # 130
(1024, 512, 1, 1), # 131
(1024,), # 132
(1024,), # 133
(1024,), # 134
(1024,), # 135
(256, 1024, 1, 1), # 136
(256,), # 137
(256,), # 138
(256,), # 139
(256,), # 140
(256, 256, 3, 3), # 141
(256,), # 142
(256,), # 143
(256,), # 144
(256,), # 145
(1024, 256, 1, 1), # 146
(1024,), # 147
(1024,), # 148
(1024,), # 149
(1024,), # 150
(256, 1024, 1, 1), # 151
(256,), # 152
(256,), # 153
(256,), # 154
(256,), # 155
(256, 256, 3, 3), # 156
(256,), # 157
(256,), # 158
(256,), # 159
(256,), # 160
(1024, 256, 1, 1), # 161
(1024,), # 162
(1024,), # 163
(1024,), # 164
(1024,), # 165
(256, 1024, 1, 1), # 166
(256,), # 167
(256,), # 168
(256,), # 169
(256,), # 170
(256, 256, 3, 3), # 171
(256,), # 172
(256,), # 173
(256,), # 174
(256,), # 175
(1024, 256, 1, 1), # 176
(1024,), # 177
(1024,), # 178
(1024,), # 179
(1024,), # 180
(256, 1024, 1, 1), # 181
(256,), # 182
(256,), # 183
(256,), # 184
(256,), # 185
(256, 256, 3, 3), # 186
(256,), # 187
(256,), # 188
(256,), # 189
(256,), # 190
(1024, 256, 1, 1), # 191
(1024,), # 192
(1024,), # 193
(1024,), # 194
(1024,), # 195
(256, 1024, 1, 1), # 196
(256,), # 197
(256,), # 198
(256,), # 199
(256,), # 200
(256, 256, 3, 3), # 201
(256,), # 202
(256,), # 203
(256,), # 204
(256,), # 205
(1024, 256, 1, 1), # 206
(1024,), # 207
(1024,), # 208
(1024,), # 209
(1024,), # 210
(512, 1024, 1, 1), # 211
(512,), # 212
(512,), # 213
(512,), # 214
(512,), # 215
(512, 512, 3, 3), # 216
(512,), # 217
(512,), # 218
(512,), # 219
(512,), # 220
(2048, 512, 1, 1), # 221
(2048, 1024, 1, 1), # 222
(2048,), # 223
(2048,), # 224
(2048,), # 225
(2048,), # 226
(512, 2048, 1, 1), # 227
(512,), # 228
(512,), # 229
(512,), # 230
(512,), # 231
(512, 512, 3, 3), # 232
(512,), # 233
(512,), # 234
(512,), # 235
(512,), # 236
(2048, 512, 1, 1), # 237
(2048,), # 238
(2048,), # 239
(2048,), # 240
(2048,), # 241
(512, 2048, 1, 1), # 242
(512,), # 243
(512,), # 244
(512,), # 245
(512,), # 246
(512, 512, 3, 3), # 247
(512,), # 248
(512,), # 249
(512,), # 250
(512,), # 251
(2048, 512, 1, 1), # 252
(2048,), # 253
(2048,), # 254
(2048,), # 255
(2048,), # 256
(1000, 2048), # 257
(1000,), # 258
],
)
def resnet50():
metatable = {"relay.Constant": resnet50_consts("float32")}
mod = tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%data: Tensor[(1, 3, 224, 224), float32]) -> Tensor[(1, 1000), float32] {
%0 = nn.batch_norm(%data, meta[relay.Constant][0], meta[relay.Constant][1], meta[relay.Constant][2], meta[relay.Constant][3]);
%1 = %0.0;
%2 = nn.conv2d(%1, meta[relay.Constant][4], strides=[2, 2], padding=[3, 3, 3, 3], channels=64, kernel_size=[7, 7]);
%3 = nn.batch_norm(%2, meta[relay.Constant][5], meta[relay.Constant][6], meta[relay.Constant][7], meta[relay.Constant][8]);
%4 = %3.0;
%5 = nn.relu(%4);
%6 = nn.max_pool2d(%5, pool_size=[3, 3], strides=[2, 2], padding=[1, 1, 1, 1]);
%7 = nn.batch_norm(%6, meta[relay.Constant][9], meta[relay.Constant][10], meta[relay.Constant][11], meta[relay.Constant][12]);
%8 = %7.0;
%9 = nn.relu(%8);
%10 = nn.conv2d(%9, meta[relay.Constant][13], padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]);
%11 = nn.batch_norm(%10, meta[relay.Constant][14], meta[relay.Constant][15], meta[relay.Constant][16], meta[relay.Constant][17]);
%12 = %11.0;
%13 = nn.relu(%12);
%14 = nn.conv2d(%13, meta[relay.Constant][18], padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]);
%15 = nn.batch_norm(%14, meta[relay.Constant][19], meta[relay.Constant][20], meta[relay.Constant][21], meta[relay.Constant][22]);
%16 = %15.0;
%17 = nn.relu(%16);
%18 = nn.conv2d(%17, meta[relay.Constant][23], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%19 = nn.conv2d(%9, meta[relay.Constant][24], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%20 = add(%18, %19);
%21 = nn.batch_norm(%20, meta[relay.Constant][25], meta[relay.Constant][26], meta[relay.Constant][27], meta[relay.Constant][28]);
%22 = %21.0;
%23 = nn.relu(%22);
%24 = nn.conv2d(%23, meta[relay.Constant][29], padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]);
%25 = nn.batch_norm(%24, meta[relay.Constant][30], meta[relay.Constant][31], meta[relay.Constant][32], meta[relay.Constant][33]);
%26 = %25.0;
%27 = nn.relu(%26);
%28 = nn.conv2d(%27, meta[relay.Constant][34], padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]);
%29 = nn.batch_norm(%28, meta[relay.Constant][35], meta[relay.Constant][36], meta[relay.Constant][37], meta[relay.Constant][38]);
%30 = %29.0;
%31 = nn.relu(%30);
%32 = nn.conv2d(%31, meta[relay.Constant][39], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%33 = add(%32, %20);
%34 = nn.batch_norm(%33, meta[relay.Constant][40], meta[relay.Constant][41], meta[relay.Constant][42], meta[relay.Constant][43]);
%35 = %34.0;
%36 = nn.relu(%35);
%37 = nn.conv2d(%36, meta[relay.Constant][44], padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]);
%38 = nn.batch_norm(%37, meta[relay.Constant][45], meta[relay.Constant][46], meta[relay.Constant][47], meta[relay.Constant][48]);
%39 = %38.0;
%40 = nn.relu(%39);
%41 = nn.conv2d(%40, meta[relay.Constant][49], padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]);
%42 = nn.batch_norm(%41, meta[relay.Constant][50], meta[relay.Constant][51], meta[relay.Constant][52], meta[relay.Constant][53]);
%43 = %42.0;
%44 = nn.relu(%43);
%45 = nn.conv2d(%44, meta[relay.Constant][54], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%46 = add(%45, %33);
%47 = nn.batch_norm(%46, meta[relay.Constant][55], meta[relay.Constant][56], meta[relay.Constant][57], meta[relay.Constant][58]);
%48 = %47.0;
%49 = nn.relu(%48);
%50 = nn.conv2d(%49, meta[relay.Constant][59], padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]);
%51 = nn.batch_norm(%50, meta[relay.Constant][60], meta[relay.Constant][61], meta[relay.Constant][62], meta[relay.Constant][63]);
%52 = %51.0;
%53 = nn.relu(%52);
%54 = nn.conv2d(%53, meta[relay.Constant][64], strides=[2, 2], padding=[1, 1, 1, 1], channels=128, kernel_size=[3, 3]);
%55 = nn.batch_norm(%54, meta[relay.Constant][65], meta[relay.Constant][66], meta[relay.Constant][67], meta[relay.Constant][68]);
%56 = %55.0;
%57 = nn.relu(%56);
%58 = nn.conv2d(%57, meta[relay.Constant][69], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%59 = nn.conv2d(%49, meta[relay.Constant][70], strides=[2, 2], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%60 = add(%58, %59);
%61 = nn.batch_norm(%60, meta[relay.Constant][71], meta[relay.Constant][72], meta[relay.Constant][73], meta[relay.Constant][74]);
%62 = %61.0;
%63 = nn.relu(%62);
%64 = nn.conv2d(%63, meta[relay.Constant][75], padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]);
%65 = nn.batch_norm(%64, meta[relay.Constant][76], meta[relay.Constant][77], meta[relay.Constant][78], meta[relay.Constant][79]);
%66 = %65.0;
%67 = nn.relu(%66);
%68 = nn.conv2d(%67, meta[relay.Constant][80], padding=[1, 1, 1, 1], channels=128, kernel_size=[3, 3]);
%69 = nn.batch_norm(%68, meta[relay.Constant][81], meta[relay.Constant][82], meta[relay.Constant][83], meta[relay.Constant][84]);
%70 = %69.0;
%71 = nn.relu(%70);
%72 = nn.conv2d(%71, meta[relay.Constant][85], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%73 = add(%72, %60);
%74 = nn.batch_norm(%73, meta[relay.Constant][86], meta[relay.Constant][87], meta[relay.Constant][88], meta[relay.Constant][89]);
%75 = %74.0;
%76 = nn.relu(%75);
%77 = nn.conv2d(%76, meta[relay.Constant][90], padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]);
%78 = nn.batch_norm(%77, meta[relay.Constant][91], meta[relay.Constant][92], meta[relay.Constant][93], meta[relay.Constant][94]);
%79 = %78.0;
%80 = nn.relu(%79);
%81 = nn.conv2d(%80, meta[relay.Constant][95], padding=[1, 1, 1, 1], channels=128, kernel_size=[3, 3]);
%82 = nn.batch_norm(%81, meta[relay.Constant][96], meta[relay.Constant][97], meta[relay.Constant][98], meta[relay.Constant][99]);
%83 = %82.0;
%84 = nn.relu(%83);
%85 = nn.conv2d(%84, meta[relay.Constant][100], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%86 = add(%85, %73);
%87 = nn.batch_norm(%86, meta[relay.Constant][101], meta[relay.Constant][102], meta[relay.Constant][103], meta[relay.Constant][104]);
%88 = %87.0;
%89 = nn.relu(%88);
%90 = nn.conv2d(%89, meta[relay.Constant][105], padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]);
%91 = nn.batch_norm(%90, meta[relay.Constant][106], meta[relay.Constant][107], meta[relay.Constant][108], meta[relay.Constant][109]);
%92 = %91.0;
%93 = nn.relu(%92);
%94 = nn.conv2d(%93, meta[relay.Constant][110], padding=[1, 1, 1, 1], channels=128, kernel_size=[3, 3]);
%95 = nn.batch_norm(%94, meta[relay.Constant][111], meta[relay.Constant][112], meta[relay.Constant][113], meta[relay.Constant][114]);
%96 = %95.0;
%97 = nn.relu(%96);
%98 = nn.conv2d(%97, meta[relay.Constant][115], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%99 = add(%98, %86);
%100 = nn.batch_norm(%99, meta[relay.Constant][116], meta[relay.Constant][117], meta[relay.Constant][118], meta[relay.Constant][119]);
%101 = %100.0;
%102 = nn.relu(%101);
%103 = nn.conv2d(%102, meta[relay.Constant][120], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%104 = nn.batch_norm(%103, meta[relay.Constant][121], meta[relay.Constant][122], meta[relay.Constant][123], meta[relay.Constant][124]);
%105 = %104.0;
%106 = nn.relu(%105);
%107 = nn.conv2d(%106, meta[relay.Constant][125], strides=[2, 2], padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3]);
%108 = nn.batch_norm(%107, meta[relay.Constant][126], meta[relay.Constant][127], meta[relay.Constant][128], meta[relay.Constant][129]);
%109 = %108.0;
%110 = nn.relu(%109);
%111 = nn.conv2d(%110, meta[relay.Constant][130], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%112 = nn.conv2d(%102, meta[relay.Constant][131], strides=[2, 2], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%113 = add(%111, %112);
%114 = nn.batch_norm(%113, meta[relay.Constant][132], meta[relay.Constant][133], meta[relay.Constant][134], meta[relay.Constant][135]);
%115 = %114.0;
%116 = nn.relu(%115);
%117 = nn.conv2d(%116, meta[relay.Constant][136], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%118 = nn.batch_norm(%117, meta[relay.Constant][137], meta[relay.Constant][138], meta[relay.Constant][139], meta[relay.Constant][140]);
%119 = %118.0;
%120 = nn.relu(%119);
%121 = nn.conv2d(%120, meta[relay.Constant][141], padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3]);
%122 = nn.batch_norm(%121, meta[relay.Constant][142], meta[relay.Constant][143], meta[relay.Constant][144], meta[relay.Constant][145]);
%123 = %122.0;
%124 = nn.relu(%123);
%125 = nn.conv2d(%124, meta[relay.Constant][146], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%126 = add(%125, %113);
%127 = nn.batch_norm(%126, meta[relay.Constant][147], meta[relay.Constant][148], meta[relay.Constant][149], meta[relay.Constant][150]);
%128 = %127.0;
%129 = nn.relu(%128);
%130 = nn.conv2d(%129, meta[relay.Constant][151], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%131 = nn.batch_norm(%130, meta[relay.Constant][152], meta[relay.Constant][153], meta[relay.Constant][154], meta[relay.Constant][155]);
%132 = %131.0;
%133 = nn.relu(%132);
%134 = nn.conv2d(%133, meta[relay.Constant][156], padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3]);
%135 = nn.batch_norm(%134, meta[relay.Constant][157], meta[relay.Constant][158], meta[relay.Constant][159], meta[relay.Constant][160]);
%136 = %135.0;
%137 = nn.relu(%136);
%138 = nn.conv2d(%137, meta[relay.Constant][161], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%139 = add(%138, %126);
%140 = nn.batch_norm(%139, meta[relay.Constant][162], meta[relay.Constant][163], meta[relay.Constant][164], meta[relay.Constant][165]);
%141 = %140.0;
%142 = nn.relu(%141);
%143 = nn.conv2d(%142, meta[relay.Constant][166], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%144 = nn.batch_norm(%143, meta[relay.Constant][167], meta[relay.Constant][168], meta[relay.Constant][169], meta[relay.Constant][170]);
%145 = %144.0;
%146 = nn.relu(%145);
%147 = nn.conv2d(%146, meta[relay.Constant][171], padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3]);
%148 = nn.batch_norm(%147, meta[relay.Constant][172], meta[relay.Constant][173], meta[relay.Constant][174], meta[relay.Constant][175]);
%149 = %148.0;
%150 = nn.relu(%149);
%151 = nn.conv2d(%150, meta[relay.Constant][176], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%152 = add(%151, %139);
%153 = nn.batch_norm(%152, meta[relay.Constant][177], meta[relay.Constant][178], meta[relay.Constant][179], meta[relay.Constant][180]);
%154 = %153.0;
%155 = nn.relu(%154);
%156 = nn.conv2d(%155, meta[relay.Constant][181], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%157 = nn.batch_norm(%156, meta[relay.Constant][182], meta[relay.Constant][183], meta[relay.Constant][184], meta[relay.Constant][185]);
%158 = %157.0;
%159 = nn.relu(%158);
%160 = nn.conv2d(%159, meta[relay.Constant][186], padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3]);
%161 = nn.batch_norm(%160, meta[relay.Constant][187], meta[relay.Constant][188], meta[relay.Constant][189], meta[relay.Constant][190]);
%162 = %161.0;
%163 = nn.relu(%162);
%164 = nn.conv2d(%163, meta[relay.Constant][191], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%165 = add(%164, %152);
%166 = nn.batch_norm(%165, meta[relay.Constant][192], meta[relay.Constant][193], meta[relay.Constant][194], meta[relay.Constant][195]);
%167 = %166.0;
%168 = nn.relu(%167);
%169 = nn.conv2d(%168, meta[relay.Constant][196], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%170 = nn.batch_norm(%169, meta[relay.Constant][197], meta[relay.Constant][198], meta[relay.Constant][199], meta[relay.Constant][200]);
%171 = %170.0;
%172 = nn.relu(%171);
%173 = nn.conv2d(%172, meta[relay.Constant][201], padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3]);
%174 = nn.batch_norm(%173, meta[relay.Constant][202], meta[relay.Constant][203], meta[relay.Constant][204], meta[relay.Constant][205]);
%175 = %174.0;
%176 = nn.relu(%175);
%177 = nn.conv2d(%176, meta[relay.Constant][206], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%178 = add(%177, %165);
%179 = nn.batch_norm(%178, meta[relay.Constant][207], meta[relay.Constant][208], meta[relay.Constant][209], meta[relay.Constant][210]);
%180 = %179.0;
%181 = nn.relu(%180);
%182 = nn.conv2d(%181, meta[relay.Constant][211], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%183 = nn.batch_norm(%182, meta[relay.Constant][212], meta[relay.Constant][213], meta[relay.Constant][214], meta[relay.Constant][215]);
%184 = %183.0;
%185 = nn.relu(%184);
%186 = nn.conv2d(%185, meta[relay.Constant][216], strides=[2, 2], padding=[1, 1, 1, 1], channels=512, kernel_size=[3, 3]);
%187 = nn.batch_norm(%186, meta[relay.Constant][217], meta[relay.Constant][218], meta[relay.Constant][219], meta[relay.Constant][220]);
%188 = %187.0;
%189 = nn.relu(%188);
%190 = nn.conv2d(%189, meta[relay.Constant][221], padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]);
%191 = nn.conv2d(%181, meta[relay.Constant][222], strides=[2, 2], padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]);
%192 = add(%190, %191);
%193 = nn.batch_norm(%192, meta[relay.Constant][223], meta[relay.Constant][224], meta[relay.Constant][225], meta[relay.Constant][226]);
%194 = %193.0;
%195 = nn.relu(%194);
%196 = nn.conv2d(%195, meta[relay.Constant][227], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%197 = nn.batch_norm(%196, meta[relay.Constant][228], meta[relay.Constant][229], meta[relay.Constant][230], meta[relay.Constant][231]);
%198 = %197.0;
%199 = nn.relu(%198);
%200 = nn.conv2d(%199, meta[relay.Constant][232], padding=[1, 1, 1, 1], channels=512, kernel_size=[3, 3]);
%201 = nn.batch_norm(%200, meta[relay.Constant][233], meta[relay.Constant][234], meta[relay.Constant][235], meta[relay.Constant][236]);
%202 = %201.0;
%203 = nn.relu(%202);
%204 = nn.conv2d(%203, meta[relay.Constant][237], padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]);
%205 = add(%204, %192);
%206 = nn.batch_norm(%205, meta[relay.Constant][238], meta[relay.Constant][239], meta[relay.Constant][240], meta[relay.Constant][241]);
%207 = %206.0;
%208 = nn.relu(%207);
%209 = nn.conv2d(%208, meta[relay.Constant][242], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%210 = nn.batch_norm(%209, meta[relay.Constant][243], meta[relay.Constant][244], meta[relay.Constant][245], meta[relay.Constant][246]);
%211 = %210.0;
%212 = nn.relu(%211);
%213 = nn.conv2d(%212, meta[relay.Constant][247], padding=[1, 1, 1, 1], channels=512, kernel_size=[3, 3]);
%214 = nn.batch_norm(%213, meta[relay.Constant][248], meta[relay.Constant][249], meta[relay.Constant][250], meta[relay.Constant][251]);
%215 = %214.0;
%216 = nn.relu(%215);
%217 = nn.conv2d(%216, meta[relay.Constant][252], padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]);
%218 = add(%217, %205);
%219 = nn.batch_norm(%218, meta[relay.Constant][253], meta[relay.Constant][254], meta[relay.Constant][255], meta[relay.Constant][256]);
%220 = %219.0;
%221 = nn.relu(%220);
%222 = nn.global_avg_pool2d(%221);
%223 = reshape(%222, newshape=[0, -1]);
%224 = nn.dense(%223, meta[relay.Constant][257], units=1000);
add(%224, meta[relay.Constant][258])
}
""",
"from_string",
None,
metatable,
)
return {
"name": "resnet50",
"input_shapes": {"data": [1, 3, 224, 224]},
"input_dtypes": {"data": "float32"},
"mod": mod,
"params": None,
"main_dtype": "float32",
}
def resnet50_16():
metatable = {"relay.Constant": resnet50_consts("float16")}
mod = tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%data: Tensor[(1, 3, 224, 224), float16]) -> Tensor[(1, 1000), float16] {
%0 = nn.batch_norm(%data, meta[relay.Constant][0], meta[relay.Constant][1], meta[relay.Constant][2], meta[relay.Constant][3]);
%1 = %0.0;
%2 = nn.conv2d(%1, meta[relay.Constant][4], strides=[2, 2], padding=[3, 3, 3, 3], channels=64, kernel_size=[7, 7]);
%3 = nn.batch_norm(%2, meta[relay.Constant][5], meta[relay.Constant][6], meta[relay.Constant][7], meta[relay.Constant][8]);
%4 = %3.0;
%5 = nn.relu(%4);
%6 = nn.max_pool2d(%5, pool_size=[3, 3], strides=[2, 2], padding=[1, 1, 1, 1]);
%7 = nn.batch_norm(%6, meta[relay.Constant][9], meta[relay.Constant][10], meta[relay.Constant][11], meta[relay.Constant][12]);
%8 = %7.0;
%9 = nn.relu(%8);
%10 = nn.conv2d(%9, meta[relay.Constant][13], padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]);
%11 = nn.batch_norm(%10, meta[relay.Constant][14], meta[relay.Constant][15], meta[relay.Constant][16], meta[relay.Constant][17]);
%12 = %11.0;
%13 = nn.relu(%12);
%14 = nn.conv2d(%13, meta[relay.Constant][18], padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]);
%15 = nn.batch_norm(%14, meta[relay.Constant][19], meta[relay.Constant][20], meta[relay.Constant][21], meta[relay.Constant][22]);
%16 = %15.0;
%17 = nn.relu(%16);
%18 = nn.conv2d(%17, meta[relay.Constant][23], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%19 = nn.conv2d(%9, meta[relay.Constant][24], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%20 = add(%18, %19);
%21 = nn.batch_norm(%20, meta[relay.Constant][25], meta[relay.Constant][26], meta[relay.Constant][27], meta[relay.Constant][28]);
%22 = %21.0;
%23 = nn.relu(%22);
%24 = nn.conv2d(%23, meta[relay.Constant][29], padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]);
%25 = nn.batch_norm(%24, meta[relay.Constant][30], meta[relay.Constant][31], meta[relay.Constant][32], meta[relay.Constant][33]);
%26 = %25.0;
%27 = nn.relu(%26);
%28 = nn.conv2d(%27, meta[relay.Constant][34], padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]);
%29 = nn.batch_norm(%28, meta[relay.Constant][35], meta[relay.Constant][36], meta[relay.Constant][37], meta[relay.Constant][38]);
%30 = %29.0;
%31 = nn.relu(%30);
%32 = nn.conv2d(%31, meta[relay.Constant][39], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%33 = add(%32, %20);
%34 = nn.batch_norm(%33, meta[relay.Constant][40], meta[relay.Constant][41], meta[relay.Constant][42], meta[relay.Constant][43]);
%35 = %34.0;
%36 = nn.relu(%35);
%37 = nn.conv2d(%36, meta[relay.Constant][44], padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]);
%38 = nn.batch_norm(%37, meta[relay.Constant][45], meta[relay.Constant][46], meta[relay.Constant][47], meta[relay.Constant][48]);
%39 = %38.0;
%40 = nn.relu(%39);
%41 = nn.conv2d(%40, meta[relay.Constant][49], padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]);
%42 = nn.batch_norm(%41, meta[relay.Constant][50], meta[relay.Constant][51], meta[relay.Constant][52], meta[relay.Constant][53]);
%43 = %42.0;
%44 = nn.relu(%43);
%45 = nn.conv2d(%44, meta[relay.Constant][54], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%46 = add(%45, %33);
%47 = nn.batch_norm(%46, meta[relay.Constant][55], meta[relay.Constant][56], meta[relay.Constant][57], meta[relay.Constant][58]);
%48 = %47.0;
%49 = nn.relu(%48);
%50 = nn.conv2d(%49, meta[relay.Constant][59], padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]);
%51 = nn.batch_norm(%50, meta[relay.Constant][60], meta[relay.Constant][61], meta[relay.Constant][62], meta[relay.Constant][63]);
%52 = %51.0;
%53 = nn.relu(%52);
%54 = nn.conv2d(%53, meta[relay.Constant][64], strides=[2, 2], padding=[1, 1, 1, 1], channels=128, kernel_size=[3, 3]);
%55 = nn.batch_norm(%54, meta[relay.Constant][65], meta[relay.Constant][66], meta[relay.Constant][67], meta[relay.Constant][68]);
%56 = %55.0;
%57 = nn.relu(%56);
%58 = nn.conv2d(%57, meta[relay.Constant][69], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%59 = nn.conv2d(%49, meta[relay.Constant][70], strides=[2, 2], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%60 = add(%58, %59);
%61 = nn.batch_norm(%60, meta[relay.Constant][71], meta[relay.Constant][72], meta[relay.Constant][73], meta[relay.Constant][74]);
%62 = %61.0;
%63 = nn.relu(%62);
%64 = nn.conv2d(%63, meta[relay.Constant][75], padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]);
%65 = nn.batch_norm(%64, meta[relay.Constant][76], meta[relay.Constant][77], meta[relay.Constant][78], meta[relay.Constant][79]);
%66 = %65.0;
%67 = nn.relu(%66);
%68 = nn.conv2d(%67, meta[relay.Constant][80], padding=[1, 1, 1, 1], channels=128, kernel_size=[3, 3]);
%69 = nn.batch_norm(%68, meta[relay.Constant][81], meta[relay.Constant][82], meta[relay.Constant][83], meta[relay.Constant][84]);
%70 = %69.0;
%71 = nn.relu(%70);
%72 = nn.conv2d(%71, meta[relay.Constant][85], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%73 = add(%72, %60);
%74 = nn.batch_norm(%73, meta[relay.Constant][86], meta[relay.Constant][87], meta[relay.Constant][88], meta[relay.Constant][89]);
%75 = %74.0;
%76 = nn.relu(%75);
%77 = nn.conv2d(%76, meta[relay.Constant][90], padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]);
%78 = nn.batch_norm(%77, meta[relay.Constant][91], meta[relay.Constant][92], meta[relay.Constant][93], meta[relay.Constant][94]);
%79 = %78.0;
%80 = nn.relu(%79);
%81 = nn.conv2d(%80, meta[relay.Constant][95], padding=[1, 1, 1, 1], channels=128, kernel_size=[3, 3]);
%82 = nn.batch_norm(%81, meta[relay.Constant][96], meta[relay.Constant][97], meta[relay.Constant][98], meta[relay.Constant][99]);
%83 = %82.0;
%84 = nn.relu(%83);
%85 = nn.conv2d(%84, meta[relay.Constant][100], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%86 = add(%85, %73);
%87 = nn.batch_norm(%86, meta[relay.Constant][101], meta[relay.Constant][102], meta[relay.Constant][103], meta[relay.Constant][104]);
%88 = %87.0;
%89 = nn.relu(%88);
%90 = nn.conv2d(%89, meta[relay.Constant][105], padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]);
%91 = nn.batch_norm(%90, meta[relay.Constant][106], meta[relay.Constant][107], meta[relay.Constant][108], meta[relay.Constant][109]);
%92 = %91.0;
%93 = nn.relu(%92);
%94 = nn.conv2d(%93, meta[relay.Constant][110], padding=[1, 1, 1, 1], channels=128, kernel_size=[3, 3]);
%95 = nn.batch_norm(%94, meta[relay.Constant][111], meta[relay.Constant][112], meta[relay.Constant][113], meta[relay.Constant][114]);
%96 = %95.0;
%97 = nn.relu(%96);
%98 = nn.conv2d(%97, meta[relay.Constant][115], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%99 = add(%98, %86);
%100 = nn.batch_norm(%99, meta[relay.Constant][116], meta[relay.Constant][117], meta[relay.Constant][118], meta[relay.Constant][119]);
%101 = %100.0;
%102 = nn.relu(%101);
%103 = nn.conv2d(%102, meta[relay.Constant][120], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%104 = nn.batch_norm(%103, meta[relay.Constant][121], meta[relay.Constant][122], meta[relay.Constant][123], meta[relay.Constant][124]);
%105 = %104.0;
%106 = nn.relu(%105);
%107 = nn.conv2d(%106, meta[relay.Constant][125], strides=[2, 2], padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3]);
%108 = nn.batch_norm(%107, meta[relay.Constant][126], meta[relay.Constant][127], meta[relay.Constant][128], meta[relay.Constant][129]);
%109 = %108.0;
%110 = nn.relu(%109);
%111 = nn.conv2d(%110, meta[relay.Constant][130], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%112 = nn.conv2d(%102, meta[relay.Constant][131], strides=[2, 2], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%113 = add(%111, %112);
%114 = nn.batch_norm(%113, meta[relay.Constant][132], meta[relay.Constant][133], meta[relay.Constant][134], meta[relay.Constant][135]);
%115 = %114.0;
%116 = nn.relu(%115);
%117 = nn.conv2d(%116, meta[relay.Constant][136], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%118 = nn.batch_norm(%117, meta[relay.Constant][137], meta[relay.Constant][138], meta[relay.Constant][139], meta[relay.Constant][140]);
%119 = %118.0;
%120 = nn.relu(%119);
%121 = nn.conv2d(%120, meta[relay.Constant][141], padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3]);
%122 = nn.batch_norm(%121, meta[relay.Constant][142], meta[relay.Constant][143], meta[relay.Constant][144], meta[relay.Constant][145]);
%123 = %122.0;
%124 = nn.relu(%123);
%125 = nn.conv2d(%124, meta[relay.Constant][146], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%126 = add(%125, %113);
%127 = nn.batch_norm(%126, meta[relay.Constant][147], meta[relay.Constant][148], meta[relay.Constant][149], meta[relay.Constant][150]);
%128 = %127.0;
%129 = nn.relu(%128);
%130 = nn.conv2d(%129, meta[relay.Constant][151], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%131 = nn.batch_norm(%130, meta[relay.Constant][152], meta[relay.Constant][153], meta[relay.Constant][154], meta[relay.Constant][155]);
%132 = %131.0;
%133 = nn.relu(%132);
%134 = nn.conv2d(%133, meta[relay.Constant][156], padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3]);
%135 = nn.batch_norm(%134, meta[relay.Constant][157], meta[relay.Constant][158], meta[relay.Constant][159], meta[relay.Constant][160]);
%136 = %135.0;
%137 = nn.relu(%136);
%138 = nn.conv2d(%137, meta[relay.Constant][161], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%139 = add(%138, %126);
%140 = nn.batch_norm(%139, meta[relay.Constant][162], meta[relay.Constant][163], meta[relay.Constant][164], meta[relay.Constant][165]);
%141 = %140.0;
%142 = nn.relu(%141);
%143 = nn.conv2d(%142, meta[relay.Constant][166], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%144 = nn.batch_norm(%143, meta[relay.Constant][167], meta[relay.Constant][168], meta[relay.Constant][169], meta[relay.Constant][170]);
%145 = %144.0;
%146 = nn.relu(%145);
%147 = nn.conv2d(%146, meta[relay.Constant][171], padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3]);
%148 = nn.batch_norm(%147, meta[relay.Constant][172], meta[relay.Constant][173], meta[relay.Constant][174], meta[relay.Constant][175]);
%149 = %148.0;
%150 = nn.relu(%149);
%151 = nn.conv2d(%150, meta[relay.Constant][176], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%152 = add(%151, %139);
%153 = nn.batch_norm(%152, meta[relay.Constant][177], meta[relay.Constant][178], meta[relay.Constant][179], meta[relay.Constant][180]);
%154 = %153.0;
%155 = nn.relu(%154);
%156 = nn.conv2d(%155, meta[relay.Constant][181], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%157 = nn.batch_norm(%156, meta[relay.Constant][182], meta[relay.Constant][183], meta[relay.Constant][184], meta[relay.Constant][185]);
%158 = %157.0;
%159 = nn.relu(%158);
%160 = nn.conv2d(%159, meta[relay.Constant][186], padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3]);
%161 = nn.batch_norm(%160, meta[relay.Constant][187], meta[relay.Constant][188], meta[relay.Constant][189], meta[relay.Constant][190]);
%162 = %161.0;
%163 = nn.relu(%162);
%164 = nn.conv2d(%163, meta[relay.Constant][191], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%165 = add(%164, %152);
%166 = nn.batch_norm(%165, meta[relay.Constant][192], meta[relay.Constant][193], meta[relay.Constant][194], meta[relay.Constant][195]);
%167 = %166.0;
%168 = nn.relu(%167);
%169 = nn.conv2d(%168, meta[relay.Constant][196], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%170 = nn.batch_norm(%169, meta[relay.Constant][197], meta[relay.Constant][198], meta[relay.Constant][199], meta[relay.Constant][200]);
%171 = %170.0;
%172 = nn.relu(%171);
%173 = nn.conv2d(%172, meta[relay.Constant][201], padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3]);
%174 = nn.batch_norm(%173, meta[relay.Constant][202], meta[relay.Constant][203], meta[relay.Constant][204], meta[relay.Constant][205]);
%175 = %174.0;
%176 = nn.relu(%175);
%177 = nn.conv2d(%176, meta[relay.Constant][206], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%178 = add(%177, %165);
%179 = nn.batch_norm(%178, meta[relay.Constant][207], meta[relay.Constant][208], meta[relay.Constant][209], meta[relay.Constant][210]);
%180 = %179.0;
%181 = nn.relu(%180);
%182 = nn.conv2d(%181, meta[relay.Constant][211], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%183 = nn.batch_norm(%182, meta[relay.Constant][212], meta[relay.Constant][213], meta[relay.Constant][214], meta[relay.Constant][215]);
%184 = %183.0;
%185 = nn.relu(%184);
%186 = nn.conv2d(%185, meta[relay.Constant][216], strides=[2, 2], padding=[1, 1, 1, 1], channels=512, kernel_size=[3, 3]);
%187 = nn.batch_norm(%186, meta[relay.Constant][217], meta[relay.Constant][218], meta[relay.Constant][219], meta[relay.Constant][220]);
%188 = %187.0;
%189 = nn.relu(%188);
%190 = nn.conv2d(%189, meta[relay.Constant][221], padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]);
%191 = nn.conv2d(%181, meta[relay.Constant][222], strides=[2, 2], padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]);
%192 = add(%190, %191);
%193 = nn.batch_norm(%192, meta[relay.Constant][223], meta[relay.Constant][224], meta[relay.Constant][225], meta[relay.Constant][226]);
%194 = %193.0;
%195 = nn.relu(%194);
%196 = nn.conv2d(%195, meta[relay.Constant][227], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%197 = nn.batch_norm(%196, meta[relay.Constant][228], meta[relay.Constant][229], meta[relay.Constant][230], meta[relay.Constant][231]);
%198 = %197.0;
%199 = nn.relu(%198);
%200 = nn.conv2d(%199, meta[relay.Constant][232], padding=[1, 1, 1, 1], channels=512, kernel_size=[3, 3]);
%201 = nn.batch_norm(%200, meta[relay.Constant][233], meta[relay.Constant][234], meta[relay.Constant][235], meta[relay.Constant][236]);
%202 = %201.0;
%203 = nn.relu(%202);
%204 = nn.conv2d(%203, meta[relay.Constant][237], padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]);
%205 = add(%204, %192);
%206 = nn.batch_norm(%205, meta[relay.Constant][238], meta[relay.Constant][239], meta[relay.Constant][240], meta[relay.Constant][241]);
%207 = %206.0;
%208 = nn.relu(%207);
%209 = nn.conv2d(%208, meta[relay.Constant][242], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%210 = nn.batch_norm(%209, meta[relay.Constant][243], meta[relay.Constant][244], meta[relay.Constant][245], meta[relay.Constant][246]);
%211 = %210.0;
%212 = nn.relu(%211);
%213 = nn.conv2d(%212, meta[relay.Constant][247], padding=[1, 1, 1, 1], channels=512, kernel_size=[3, 3]);
%214 = nn.batch_norm(%213, meta[relay.Constant][248], meta[relay.Constant][249], meta[relay.Constant][250], meta[relay.Constant][251]);
%215 = %214.0;
%216 = nn.relu(%215);
%217 = nn.conv2d(%216, meta[relay.Constant][252], padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]);
%218 = add(%217, %205);
%219 = nn.batch_norm(%218, meta[relay.Constant][253], meta[relay.Constant][254], meta[relay.Constant][255], meta[relay.Constant][256]);
%220 = %219.0;
%221 = nn.relu(%220);
%222 = nn.global_avg_pool2d(%221);
%223 = reshape(%222, newshape=[0, -1]);
%224 = nn.dense(%223, meta[relay.Constant][257], units=1000);
add(%224, meta[relay.Constant][258])
}
""",
"from_string",
None,
metatable,
)
return {
"name": "resnet50_16",
"input_shapes": {"data": [1, 3, 224, 224]},
"input_dtypes": {"data": "float16"},
"mod": mod,
"params": None,
"main_dtype": "float16",
}
def mobilenet_consts(dtype):
return make_consts(
dtype,
[
(32, 3, 3, 3), # 0
(32,), # 1
(32,), # 2
(32,), # 3
(32,), # 4
(32, 32, 1, 1), # 5
(32,), # 6
(32,), # 7
(32,), # 8
(32,), # 9
(32, 1, 3, 3), # 10
(32,), # 11
(32,), # 12
(32,), # 13
(32,), # 14
(16, 32, 1, 1), # 15
(16,), # 16
(16,), # 17
(16,), # 18
(16,), # 19
(96, 16, 1, 1), # 20
(96,), # 21
(96,), # 22
(96,), # 23
(96,), # 24
(96, 1, 3, 3), # 25
(96,), # 26
(96,), # 27
(96,), # 28
(96,), # 29
(24, 96, 1, 1), # 30
(24,), # 31
(24,), # 32
(24,), # 33
(24,), # 34
(144, 24, 1, 1), # 35
(144,), # 36
(144,), # 37
(144,), # 38
(144,), # 39
(144, 1, 3, 3), # 40
(144,), # 41
(144,), # 42
(144,), # 43
(144,), # 44
(24, 144, 1, 1), # 45
(24,), # 46
(24,), # 47
(24,), # 48
(24,), # 49
(144, 24, 1, 1), # 50
(144,), # 51
(144,), # 52
(144,), # 53
(144,), # 54
(144, 1, 3, 3), # 55
(144,), # 56
(144,), # 57
(144,), # 58
(144,), # 59
(32, 144, 1, 1), # 60
(32,), # 61
(32,), # 62
(32,), # 63
(32,), # 64
(192, 32, 1, 1), # 65
(192,), # 66
(192,), # 67
(192,), # 68
(192,), # 69
(192, 1, 3, 3), # 70
(192,), # 71
(192,), # 72
(192,), # 73
(192,), # 74
(32, 192, 1, 1), # 75
(32,), # 76
(32,), # 77
(32,), # 78
(32,), # 79
(192, 32, 1, 1), # 80
(192,), # 81
(192,), # 82
(192,), # 83
(192,), # 84
(192, 1, 3, 3), # 85
(192,), # 86
(192,), # 87
(192,), # 88
(192,), # 89
(32, 192, 1, 1), # 90
(32,), # 91
(32,), # 92
(32,), # 93
(32,), # 94
(192, 32, 1, 1), # 95
(192,), # 96
(192,), # 97
(192,), # 98
(192,), # 99
(192, 1, 3, 3), # 100
(192,), # 101
(192,), # 102
(192,), # 103
(192,), # 104
(64, 192, 1, 1), # 105
(64,), # 106
(64,), # 107
(64,), # 108
(64,), # 109
(384, 64, 1, 1), # 110
(384,), # 111
(384,), # 112
(384,), # 113
(384,), # 114
(384, 1, 3, 3), # 115
(384,), # 116
(384,), # 117
(384,), # 118
(384,), # 119
(64, 384, 1, 1), # 120
(64,), # 121
(64,), # 122
(64,), # 123
(64,), # 124
(384, 64, 1, 1), # 125
(384,), # 126
(384,), # 127
(384,), # 128
(384,), # 129
(384, 1, 3, 3), # 130
(384,), # 131
(384,), # 132
(384,), # 133
(384,), # 134
(64, 384, 1, 1), # 135
(64,), # 136
(64,), # 137
(64,), # 138
(64,), # 139
(384, 64, 1, 1), # 140
(384,), # 141
(384,), # 142
(384,), # 143
(384,), # 144
(384, 1, 3, 3), # 145
(384,), # 146
(384,), # 147
(384,), # 148
(384,), # 149
(64, 384, 1, 1), # 150
(64,), # 151
(64,), # 152
(64,), # 153
(64,), # 154
(384, 64, 1, 1), # 155
(384,), # 156
(384,), # 157
(384,), # 158
(384,), # 159
(384, 1, 3, 3), # 160
(384,), # 161
(384,), # 162
(384,), # 163
(384,), # 164
(96, 384, 1, 1), # 165
(96,), # 166
(96,), # 167
(96,), # 168
(96,), # 169
(576, 96, 1, 1), # 170
(576,), # 171
(576,), # 172
(576,), # 173
(576,), # 174
(576, 1, 3, 3), # 175
(576,), # 176
(576,), # 177
(576,), # 178
(576,), # 179
(96, 576, 1, 1), # 180
(96,), # 181
(96,), # 182
(96,), # 183
(96,), # 184
(576, 96, 1, 1), # 185
(576,), # 186
(576,), # 187
(576,), # 188
(576,), # 189
(576, 1, 3, 3), # 190
(576,), # 191
(576,), # 192
(576,), # 193
(576,), # 194
(96, 576, 1, 1), # 195
(96,), # 196
(96,), # 197
(96,), # 198
(96,), # 199
(576, 96, 1, 1), # 200
(576,), # 201
(576,), # 202
(576,), # 203
(576,), # 204
(576, 1, 3, 3), # 205
(576,), # 206
(576,), # 207
(576,), # 208
(576,), # 209
(160, 576, 1, 1), # 210
(160,), # 211
(160,), # 212
(160,), # 213
(160,), # 214
(960, 160, 1, 1), # 215
(960,), # 216
(960,), # 217
(960,), # 218
(960,), # 219
(960, 1, 3, 3), # 220
(960,), # 221
(960,), # 222
(960,), # 223
(960,), # 224
(160, 960, 1, 1), # 225
(160,), # 226
(160,), # 227
(160,), # 228
(160,), # 229
(960, 160, 1, 1), # 230
(960,), # 231
(960,), # 232
(960,), # 233
(960,), # 234
(960, 1, 3, 3), # 235
(960,), # 236
(960,), # 237
(960,), # 238
(960,), # 239
(160, 960, 1, 1), # 240
(160,), # 241
(160,), # 242
(160,), # 243
(160,), # 244
(960, 160, 1, 1), # 245
(960,), # 246
(960,), # 247
(960,), # 248
(960,), # 249
(960, 1, 3, 3), # 250
(960,), # 251
(960,), # 252
(960,), # 253
(960,), # 254
(320, 960, 1, 1), # 255
(320,), # 256
(320,), # 257
(320,), # 258
(320,), # 259
(1280, 320, 1, 1), # 260
(1280,), # 261
(1280,), # 262
(1280,), # 263
(1280,), # 264
(1000, 1280, 1, 1), # 265
],
)
def mobilenet():
metatable = {"relay.Constant": mobilenet_consts("float32")}
mod = tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%data: Tensor[(1, 3, 224, 224), float32]) -> Tensor[(1, 1000), float32] {
%0 = nn.conv2d(%data, meta[relay.Constant][0], strides=[2, 2], padding=[1, 1, 1, 1], channels=32, kernel_size=[3, 3]);
%1 = nn.batch_norm(%0, meta[relay.Constant][1], meta[relay.Constant][2], meta[relay.Constant][3], meta[relay.Constant][4]);
%2 = %1.0;
%3 = nn.relu(%2);
%4 = nn.conv2d(%3, meta[relay.Constant][5], padding=[0, 0, 0, 0], channels=32, kernel_size=[1, 1]);
%5 = nn.batch_norm(%4, meta[relay.Constant][6], meta[relay.Constant][7], meta[relay.Constant][8], meta[relay.Constant][9]);
%6 = %5.0;
%7 = nn.relu(%6);
%8 = nn.conv2d(%7, meta[relay.Constant][10], padding=[1, 1, 1, 1], groups=32, channels=32, kernel_size=[3, 3]);
%9 = nn.batch_norm(%8, meta[relay.Constant][11], meta[relay.Constant][12], meta[relay.Constant][13], meta[relay.Constant][14]);
%10 = %9.0;
%11 = nn.relu(%10);
%12 = nn.conv2d(%11, meta[relay.Constant][15], padding=[0, 0, 0, 0], channels=16, kernel_size=[1, 1]);
%13 = nn.batch_norm(%12, meta[relay.Constant][16], meta[relay.Constant][17], meta[relay.Constant][18], meta[relay.Constant][19]);
%14 = %13.0;
%15 = nn.conv2d(%14, meta[relay.Constant][20], padding=[0, 0, 0, 0], channels=96, kernel_size=[1, 1]);
%16 = nn.batch_norm(%15, meta[relay.Constant][21], meta[relay.Constant][22], meta[relay.Constant][23], meta[relay.Constant][24]);
%17 = %16.0;
%18 = nn.relu(%17);
%19 = nn.conv2d(%18, meta[relay.Constant][25], strides=[2, 2], padding=[1, 1, 1, 1], groups=96, channels=96, kernel_size=[3, 3]);
%20 = nn.batch_norm(%19, meta[relay.Constant][26], meta[relay.Constant][27], meta[relay.Constant][28], meta[relay.Constant][29]);
%21 = %20.0;
%22 = nn.relu(%21);
%23 = nn.conv2d(%22, meta[relay.Constant][30], padding=[0, 0, 0, 0], channels=24, kernel_size=[1, 1]);
%24 = nn.batch_norm(%23, meta[relay.Constant][31], meta[relay.Constant][32], meta[relay.Constant][33], meta[relay.Constant][34]);
%25 = %24.0;
%26 = nn.conv2d(%25, meta[relay.Constant][35], padding=[0, 0, 0, 0], channels=144, kernel_size=[1, 1]);
%27 = nn.batch_norm(%26, meta[relay.Constant][36], meta[relay.Constant][37], meta[relay.Constant][38], meta[relay.Constant][39]);
%28 = %27.0;
%29 = nn.relu(%28);
%30 = nn.conv2d(%29, meta[relay.Constant][40], padding=[1, 1, 1, 1], groups=144, channels=144, kernel_size=[3, 3]);
%31 = nn.batch_norm(%30, meta[relay.Constant][41], meta[relay.Constant][42], meta[relay.Constant][43], meta[relay.Constant][44]);
%32 = %31.0;
%33 = nn.relu(%32);
%34 = nn.conv2d(%33, meta[relay.Constant][45], padding=[0, 0, 0, 0], channels=24, kernel_size=[1, 1]);
%35 = nn.batch_norm(%34, meta[relay.Constant][46], meta[relay.Constant][47], meta[relay.Constant][48], meta[relay.Constant][49]);
%36 = %35.0;
%37 = add(%36, %25);
%38 = nn.conv2d(%37, meta[relay.Constant][50], padding=[0, 0, 0, 0], channels=144, kernel_size=[1, 1]);
%39 = nn.batch_norm(%38, meta[relay.Constant][51], meta[relay.Constant][52], meta[relay.Constant][53], meta[relay.Constant][54]);
%40 = %39.0;
%41 = nn.relu(%40);
%42 = nn.conv2d(%41, meta[relay.Constant][55], strides=[2, 2], padding=[1, 1, 1, 1], groups=144, channels=144, kernel_size=[3, 3]);
%43 = nn.batch_norm(%42, meta[relay.Constant][56], meta[relay.Constant][57], meta[relay.Constant][58], meta[relay.Constant][59]);
%44 = %43.0;
%45 = nn.relu(%44);
%46 = nn.conv2d(%45, meta[relay.Constant][60], padding=[0, 0, 0, 0], channels=32, kernel_size=[1, 1]);
%47 = nn.batch_norm(%46, meta[relay.Constant][61], meta[relay.Constant][62], meta[relay.Constant][63], meta[relay.Constant][64]);
%48 = %47.0;
%49 = nn.conv2d(%48, meta[relay.Constant][65], padding=[0, 0, 0, 0], channels=192, kernel_size=[1, 1]);
%50 = nn.batch_norm(%49, meta[relay.Constant][66], meta[relay.Constant][67], meta[relay.Constant][68], meta[relay.Constant][69]);
%51 = %50.0;
%52 = nn.relu(%51);
%53 = nn.conv2d(%52, meta[relay.Constant][70], padding=[1, 1, 1, 1], groups=192, channels=192, kernel_size=[3, 3]);
%54 = nn.batch_norm(%53, meta[relay.Constant][71], meta[relay.Constant][72], meta[relay.Constant][73], meta[relay.Constant][74]);
%55 = %54.0;
%56 = nn.relu(%55);
%57 = nn.conv2d(%56, meta[relay.Constant][75], padding=[0, 0, 0, 0], channels=32, kernel_size=[1, 1]);
%58 = nn.batch_norm(%57, meta[relay.Constant][76], meta[relay.Constant][77], meta[relay.Constant][78], meta[relay.Constant][79]);
%59 = %58.0;
%60 = add(%59, %48);
%61 = nn.conv2d(%60, meta[relay.Constant][80], padding=[0, 0, 0, 0], channels=192, kernel_size=[1, 1]);
%62 = nn.batch_norm(%61, meta[relay.Constant][81], meta[relay.Constant][82], meta[relay.Constant][83], meta[relay.Constant][84]);
%63 = %62.0;
%64 = nn.relu(%63);
%65 = nn.conv2d(%64, meta[relay.Constant][85], padding=[1, 1, 1, 1], groups=192, channels=192, kernel_size=[3, 3]);
%66 = nn.batch_norm(%65, meta[relay.Constant][86], meta[relay.Constant][87], meta[relay.Constant][88], meta[relay.Constant][89]);
%67 = %66.0;
%68 = nn.relu(%67);
%69 = nn.conv2d(%68, meta[relay.Constant][90], padding=[0, 0, 0, 0], channels=32, kernel_size=[1, 1]);
%70 = nn.batch_norm(%69, meta[relay.Constant][91], meta[relay.Constant][92], meta[relay.Constant][93], meta[relay.Constant][94]);
%71 = %70.0;
%72 = add(%71, %60);
%73 = nn.conv2d(%72, meta[relay.Constant][95], padding=[0, 0, 0, 0], channels=192, kernel_size=[1, 1]);
%74 = nn.batch_norm(%73, meta[relay.Constant][96], meta[relay.Constant][97], meta[relay.Constant][98], meta[relay.Constant][99]);
%75 = %74.0;
%76 = nn.relu(%75);
%77 = nn.conv2d(%76, meta[relay.Constant][100], padding=[1, 1, 1, 1], groups=192, channels=192, kernel_size=[3, 3]);
%78 = nn.batch_norm(%77, meta[relay.Constant][101], meta[relay.Constant][102], meta[relay.Constant][103], meta[relay.Constant][104]);
%79 = %78.0;
%80 = nn.relu(%79);
%81 = nn.conv2d(%80, meta[relay.Constant][105], padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]);
%82 = nn.batch_norm(%81, meta[relay.Constant][106], meta[relay.Constant][107], meta[relay.Constant][108], meta[relay.Constant][109]);
%83 = %82.0;
%84 = nn.conv2d(%83, meta[relay.Constant][110], padding=[0, 0, 0, 0], channels=384, kernel_size=[1, 1]);
%85 = nn.batch_norm(%84, meta[relay.Constant][111], meta[relay.Constant][112], meta[relay.Constant][113], meta[relay.Constant][114]);
%86 = %85.0;
%87 = nn.relu(%86);
%88 = nn.conv2d(%87, meta[relay.Constant][115], padding=[1, 1, 1, 1], groups=384, channels=384, kernel_size=[3, 3]);
%89 = nn.batch_norm(%88, meta[relay.Constant][116], meta[relay.Constant][117], meta[relay.Constant][118], meta[relay.Constant][119]);
%90 = %89.0;
%91 = nn.relu(%90);
%92 = nn.conv2d(%91, meta[relay.Constant][120], padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]);
%93 = nn.batch_norm(%92, meta[relay.Constant][121], meta[relay.Constant][122], meta[relay.Constant][123], meta[relay.Constant][124]);
%94 = %93.0;
%95 = add(%94, %83);
%96 = nn.conv2d(%95, meta[relay.Constant][125], padding=[0, 0, 0, 0], channels=384, kernel_size=[1, 1]);
%97 = nn.batch_norm(%96, meta[relay.Constant][126], meta[relay.Constant][127], meta[relay.Constant][128], meta[relay.Constant][129]);
%98 = %97.0;
%99 = nn.relu(%98);
%100 = nn.conv2d(%99, meta[relay.Constant][130], padding=[1, 1, 1, 1], groups=384, channels=384, kernel_size=[3, 3]);
%101 = nn.batch_norm(%100, meta[relay.Constant][131], meta[relay.Constant][132], meta[relay.Constant][133], meta[relay.Constant][134]);
%102 = %101.0;
%103 = nn.relu(%102);
%104 = nn.conv2d(%103, meta[relay.Constant][135], padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]);
%105 = nn.batch_norm(%104, meta[relay.Constant][136], meta[relay.Constant][137], meta[relay.Constant][138], meta[relay.Constant][139]);
%106 = %105.0;
%107 = add(%106, %95);
%108 = nn.conv2d(%107, meta[relay.Constant][140], padding=[0, 0, 0, 0], channels=384, kernel_size=[1, 1]);
%109 = nn.batch_norm(%108, meta[relay.Constant][141], meta[relay.Constant][142], meta[relay.Constant][143], meta[relay.Constant][144]);
%110 = %109.0;
%111 = nn.relu(%110);
%112 = nn.conv2d(%111, meta[relay.Constant][145], padding=[1, 1, 1, 1], groups=384, channels=384, kernel_size=[3, 3]);
%113 = nn.batch_norm(%112, meta[relay.Constant][146], meta[relay.Constant][147], meta[relay.Constant][148], meta[relay.Constant][149]);
%114 = %113.0;
%115 = nn.relu(%114);
%116 = nn.conv2d(%115, meta[relay.Constant][150], padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]);
%117 = nn.batch_norm(%116, meta[relay.Constant][151], meta[relay.Constant][152], meta[relay.Constant][153], meta[relay.Constant][154]);
%118 = %117.0;
%119 = add(%118, %107);
%120 = nn.conv2d(%119, meta[relay.Constant][155], padding=[0, 0, 0, 0], channels=384, kernel_size=[1, 1]);
%121 = nn.batch_norm(%120, meta[relay.Constant][156], meta[relay.Constant][157], meta[relay.Constant][158], meta[relay.Constant][159]);
%122 = %121.0;
%123 = nn.relu(%122);
%124 = nn.conv2d(%123, meta[relay.Constant][160], strides=[2, 2], padding=[1, 1, 1, 1], groups=384, channels=384, kernel_size=[3, 3]);
%125 = nn.batch_norm(%124, meta[relay.Constant][161], meta[relay.Constant][162], meta[relay.Constant][163], meta[relay.Constant][164]);
%126 = %125.0;
%127 = nn.relu(%126);
%128 = nn.conv2d(%127, meta[relay.Constant][165], padding=[0, 0, 0, 0], channels=96, kernel_size=[1, 1]);
%129 = nn.batch_norm(%128, meta[relay.Constant][166], meta[relay.Constant][167], meta[relay.Constant][168], meta[relay.Constant][169]);
%130 = %129.0;
%131 = nn.conv2d(%130, meta[relay.Constant][170], padding=[0, 0, 0, 0], channels=576, kernel_size=[1, 1]);
%132 = nn.batch_norm(%131, meta[relay.Constant][171], meta[relay.Constant][172], meta[relay.Constant][173], meta[relay.Constant][174]);
%133 = %132.0;
%134 = nn.relu(%133);
%135 = nn.conv2d(%134, meta[relay.Constant][175], padding=[1, 1, 1, 1], groups=576, channels=576, kernel_size=[3, 3]);
%136 = nn.batch_norm(%135, meta[relay.Constant][176], meta[relay.Constant][177], meta[relay.Constant][178], meta[relay.Constant][179]);
%137 = %136.0;
%138 = nn.relu(%137);
%139 = nn.conv2d(%138, meta[relay.Constant][180], padding=[0, 0, 0, 0], channels=96, kernel_size=[1, 1]);
%140 = nn.batch_norm(%139, meta[relay.Constant][181], meta[relay.Constant][182], meta[relay.Constant][183], meta[relay.Constant][184]);
%141 = %140.0;
%142 = add(%141, %130);
%143 = nn.conv2d(%142, meta[relay.Constant][185], padding=[0, 0, 0, 0], channels=576, kernel_size=[1, 1]);
%144 = nn.batch_norm(%143, meta[relay.Constant][186], meta[relay.Constant][187], meta[relay.Constant][188], meta[relay.Constant][189]);
%145 = %144.0;
%146 = nn.relu(%145);
%147 = nn.conv2d(%146, meta[relay.Constant][190], padding=[1, 1, 1, 1], groups=576, channels=576, kernel_size=[3, 3]);
%148 = nn.batch_norm(%147, meta[relay.Constant][191], meta[relay.Constant][192], meta[relay.Constant][193], meta[relay.Constant][194]);
%149 = %148.0;
%150 = nn.relu(%149);
%151 = nn.conv2d(%150, meta[relay.Constant][195], padding=[0, 0, 0, 0], channels=96, kernel_size=[1, 1]);
%152 = nn.batch_norm(%151, meta[relay.Constant][196], meta[relay.Constant][197], meta[relay.Constant][198], meta[relay.Constant][199]);
%153 = %152.0;
%154 = add(%153, %142);
%155 = nn.conv2d(%154, meta[relay.Constant][200], padding=[0, 0, 0, 0], channels=576, kernel_size=[1, 1]);
%156 = nn.batch_norm(%155, meta[relay.Constant][201], meta[relay.Constant][202], meta[relay.Constant][203], meta[relay.Constant][204]);
%157 = %156.0;
%158 = nn.relu(%157);
%159 = nn.conv2d(%158, meta[relay.Constant][205], strides=[2, 2], padding=[1, 1, 1, 1], groups=576, channels=576, kernel_size=[3, 3]);
%160 = nn.batch_norm(%159, meta[relay.Constant][206], meta[relay.Constant][207], meta[relay.Constant][208], meta[relay.Constant][209]);
%161 = %160.0;
%162 = nn.relu(%161);
%163 = nn.conv2d(%162, meta[relay.Constant][210], padding=[0, 0, 0, 0], channels=160, kernel_size=[1, 1]);
%164 = nn.batch_norm(%163, meta[relay.Constant][211], meta[relay.Constant][212], meta[relay.Constant][213], meta[relay.Constant][214]);
%165 = %164.0;
%166 = nn.conv2d(%165, meta[relay.Constant][215], padding=[0, 0, 0, 0], channels=960, kernel_size=[1, 1]);
%167 = nn.batch_norm(%166, meta[relay.Constant][216], meta[relay.Constant][217], meta[relay.Constant][218], meta[relay.Constant][219]);
%168 = %167.0;
%169 = nn.relu(%168);
%170 = nn.conv2d(%169, meta[relay.Constant][220], padding=[1, 1, 1, 1], groups=960, channels=960, kernel_size=[3, 3]);
%171 = nn.batch_norm(%170, meta[relay.Constant][221], meta[relay.Constant][222], meta[relay.Constant][223], meta[relay.Constant][224]);
%172 = %171.0;
%173 = nn.relu(%172);
%174 = nn.conv2d(%173, meta[relay.Constant][225], padding=[0, 0, 0, 0], channels=160, kernel_size=[1, 1]);
%175 = nn.batch_norm(%174, meta[relay.Constant][226], meta[relay.Constant][227], meta[relay.Constant][228], meta[relay.Constant][229]);
%176 = %175.0;
%177 = add(%176, %165);
%178 = nn.conv2d(%177, meta[relay.Constant][230], padding=[0, 0, 0, 0], channels=960, kernel_size=[1, 1]);
%179 = nn.batch_norm(%178, meta[relay.Constant][231], meta[relay.Constant][232], meta[relay.Constant][233], meta[relay.Constant][234]);
%180 = %179.0;
%181 = nn.relu(%180);
%182 = nn.conv2d(%181, meta[relay.Constant][235], padding=[1, 1, 1, 1], groups=960, channels=960, kernel_size=[3, 3]);
%183 = nn.batch_norm(%182, meta[relay.Constant][236], meta[relay.Constant][237], meta[relay.Constant][238], meta[relay.Constant][239]);
%184 = %183.0;
%185 = nn.relu(%184);
%186 = nn.conv2d(%185, meta[relay.Constant][240], padding=[0, 0, 0, 0], channels=160, kernel_size=[1, 1]);
%187 = nn.batch_norm(%186, meta[relay.Constant][241], meta[relay.Constant][242], meta[relay.Constant][243], meta[relay.Constant][244]);
%188 = %187.0;
%189 = add(%188, %177);
%190 = nn.conv2d(%189, meta[relay.Constant][245], padding=[0, 0, 0, 0], channels=960, kernel_size=[1, 1]);
%191 = nn.batch_norm(%190, meta[relay.Constant][246], meta[relay.Constant][247], meta[relay.Constant][248], meta[relay.Constant][249]);
%192 = %191.0;
%193 = nn.relu(%192);
%194 = nn.conv2d(%193, meta[relay.Constant][250], padding=[1, 1, 1, 1], groups=960, channels=960, kernel_size=[3, 3]);
%195 = nn.batch_norm(%194, meta[relay.Constant][251], meta[relay.Constant][252], meta[relay.Constant][253], meta[relay.Constant][254]);
%196 = %195.0;
%197 = nn.relu(%196);
%198 = nn.conv2d(%197, meta[relay.Constant][255], padding=[0, 0, 0, 0], channels=320, kernel_size=[1, 1]);
%199 = nn.batch_norm(%198, meta[relay.Constant][256], meta[relay.Constant][257], meta[relay.Constant][258], meta[relay.Constant][259]);
%200 = %199.0;
%201 = nn.conv2d(%200, meta[relay.Constant][260], padding=[0, 0, 0, 0], channels=1280, kernel_size=[1, 1]);
%202 = nn.batch_norm(%201, meta[relay.Constant][261], meta[relay.Constant][262], meta[relay.Constant][263], meta[relay.Constant][264]);
%203 = %202.0;
%204 = nn.relu(%203);
%205 = nn.global_avg_pool2d(%204);
%206 = nn.conv2d(%205, meta[relay.Constant][265], padding=[0, 0, 0, 0], channels=1000, kernel_size=[1, 1]);
reshape(%206, newshape=[0, -1])
}
""",
"from_string",
None,
metatable,
)
return {
"name": "mobilenet",
"input_shapes": {"data": [1, 3, 224, 224]},
"input_dtypes": {"data": "float32"},
"mod": mod,
"params": None,
"main_dtype": "float32",
}
def mobilenet_16():
metatable = {"relay.Constant": mobilenet_consts("float16")}
mod = tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%data: Tensor[(1, 3, 224, 224), float16]) -> Tensor[(1, 1000), float16] {
%0 = nn.conv2d(%data, meta[relay.Constant][0], strides=[2, 2], padding=[1, 1, 1, 1], channels=32, kernel_size=[3, 3]);
%1 = nn.batch_norm(%0, meta[relay.Constant][1], meta[relay.Constant][2], meta[relay.Constant][3], meta[relay.Constant][4]);
%2 = %1.0;
%3 = nn.relu(%2);
%4 = nn.conv2d(%3, meta[relay.Constant][5], padding=[0, 0, 0, 0], channels=32, kernel_size=[1, 1]);
%5 = nn.batch_norm(%4, meta[relay.Constant][6], meta[relay.Constant][7], meta[relay.Constant][8], meta[relay.Constant][9]);
%6 = %5.0;
%7 = nn.relu(%6);
%8 = nn.conv2d(%7, meta[relay.Constant][10], padding=[1, 1, 1, 1], groups=32, channels=32, kernel_size=[3, 3]);
%9 = nn.batch_norm(%8, meta[relay.Constant][11], meta[relay.Constant][12], meta[relay.Constant][13], meta[relay.Constant][14]);
%10 = %9.0;
%11 = nn.relu(%10);
%12 = nn.conv2d(%11, meta[relay.Constant][15], padding=[0, 0, 0, 0], channels=16, kernel_size=[1, 1]);
%13 = nn.batch_norm(%12, meta[relay.Constant][16], meta[relay.Constant][17], meta[relay.Constant][18], meta[relay.Constant][19]);
%14 = %13.0;
%15 = nn.conv2d(%14, meta[relay.Constant][20], padding=[0, 0, 0, 0], channels=96, kernel_size=[1, 1]);
%16 = nn.batch_norm(%15, meta[relay.Constant][21], meta[relay.Constant][22], meta[relay.Constant][23], meta[relay.Constant][24]);
%17 = %16.0;
%18 = nn.relu(%17);
%19 = nn.conv2d(%18, meta[relay.Constant][25], strides=[2, 2], padding=[1, 1, 1, 1], groups=96, channels=96, kernel_size=[3, 3]);
%20 = nn.batch_norm(%19, meta[relay.Constant][26], meta[relay.Constant][27], meta[relay.Constant][28], meta[relay.Constant][29]);
%21 = %20.0;
%22 = nn.relu(%21);
%23 = nn.conv2d(%22, meta[relay.Constant][30], padding=[0, 0, 0, 0], channels=24, kernel_size=[1, 1]);
%24 = nn.batch_norm(%23, meta[relay.Constant][31], meta[relay.Constant][32], meta[relay.Constant][33], meta[relay.Constant][34]);
%25 = %24.0;
%26 = nn.conv2d(%25, meta[relay.Constant][35], padding=[0, 0, 0, 0], channels=144, kernel_size=[1, 1]);
%27 = nn.batch_norm(%26, meta[relay.Constant][36], meta[relay.Constant][37], meta[relay.Constant][38], meta[relay.Constant][39]);
%28 = %27.0;
%29 = nn.relu(%28);
%30 = nn.conv2d(%29, meta[relay.Constant][40], padding=[1, 1, 1, 1], groups=144, channels=144, kernel_size=[3, 3]);
%31 = nn.batch_norm(%30, meta[relay.Constant][41], meta[relay.Constant][42], meta[relay.Constant][43], meta[relay.Constant][44]);
%32 = %31.0;
%33 = nn.relu(%32);
%34 = nn.conv2d(%33, meta[relay.Constant][45], padding=[0, 0, 0, 0], channels=24, kernel_size=[1, 1]);
%35 = nn.batch_norm(%34, meta[relay.Constant][46], meta[relay.Constant][47], meta[relay.Constant][48], meta[relay.Constant][49]);
%36 = %35.0;
%37 = add(%36, %25);
%38 = nn.conv2d(%37, meta[relay.Constant][50], padding=[0, 0, 0, 0], channels=144, kernel_size=[1, 1]);
%39 = nn.batch_norm(%38, meta[relay.Constant][51], meta[relay.Constant][52], meta[relay.Constant][53], meta[relay.Constant][54]);
%40 = %39.0;
%41 = nn.relu(%40);
%42 = nn.conv2d(%41, meta[relay.Constant][55], strides=[2, 2], padding=[1, 1, 1, 1], groups=144, channels=144, kernel_size=[3, 3]);
%43 = nn.batch_norm(%42, meta[relay.Constant][56], meta[relay.Constant][57], meta[relay.Constant][58], meta[relay.Constant][59]);
%44 = %43.0;
%45 = nn.relu(%44);
%46 = nn.conv2d(%45, meta[relay.Constant][60], padding=[0, 0, 0, 0], channels=32, kernel_size=[1, 1]);
%47 = nn.batch_norm(%46, meta[relay.Constant][61], meta[relay.Constant][62], meta[relay.Constant][63], meta[relay.Constant][64]);
%48 = %47.0;
%49 = nn.conv2d(%48, meta[relay.Constant][65], padding=[0, 0, 0, 0], channels=192, kernel_size=[1, 1]);
%50 = nn.batch_norm(%49, meta[relay.Constant][66], meta[relay.Constant][67], meta[relay.Constant][68], meta[relay.Constant][69]);
%51 = %50.0;
%52 = nn.relu(%51);
%53 = nn.conv2d(%52, meta[relay.Constant][70], padding=[1, 1, 1, 1], groups=192, channels=192, kernel_size=[3, 3]);
%54 = nn.batch_norm(%53, meta[relay.Constant][71], meta[relay.Constant][72], meta[relay.Constant][73], meta[relay.Constant][74]);
%55 = %54.0;
%56 = nn.relu(%55);
%57 = nn.conv2d(%56, meta[relay.Constant][75], padding=[0, 0, 0, 0], channels=32, kernel_size=[1, 1]);
%58 = nn.batch_norm(%57, meta[relay.Constant][76], meta[relay.Constant][77], meta[relay.Constant][78], meta[relay.Constant][79]);
%59 = %58.0;
%60 = add(%59, %48);
%61 = nn.conv2d(%60, meta[relay.Constant][80], padding=[0, 0, 0, 0], channels=192, kernel_size=[1, 1]);
%62 = nn.batch_norm(%61, meta[relay.Constant][81], meta[relay.Constant][82], meta[relay.Constant][83], meta[relay.Constant][84]);
%63 = %62.0;
%64 = nn.relu(%63);
%65 = nn.conv2d(%64, meta[relay.Constant][85], padding=[1, 1, 1, 1], groups=192, channels=192, kernel_size=[3, 3]);
%66 = nn.batch_norm(%65, meta[relay.Constant][86], meta[relay.Constant][87], meta[relay.Constant][88], meta[relay.Constant][89]);
%67 = %66.0;
%68 = nn.relu(%67);
%69 = nn.conv2d(%68, meta[relay.Constant][90], padding=[0, 0, 0, 0], channels=32, kernel_size=[1, 1]);
%70 = nn.batch_norm(%69, meta[relay.Constant][91], meta[relay.Constant][92], meta[relay.Constant][93], meta[relay.Constant][94]);
%71 = %70.0;
%72 = add(%71, %60);
%73 = nn.conv2d(%72, meta[relay.Constant][95], padding=[0, 0, 0, 0], channels=192, kernel_size=[1, 1]);
%74 = nn.batch_norm(%73, meta[relay.Constant][96], meta[relay.Constant][97], meta[relay.Constant][98], meta[relay.Constant][99]);
%75 = %74.0;
%76 = nn.relu(%75);
%77 = nn.conv2d(%76, meta[relay.Constant][100], padding=[1, 1, 1, 1], groups=192, channels=192, kernel_size=[3, 3]);
%78 = nn.batch_norm(%77, meta[relay.Constant][101], meta[relay.Constant][102], meta[relay.Constant][103], meta[relay.Constant][104]);
%79 = %78.0;
%80 = nn.relu(%79);
%81 = nn.conv2d(%80, meta[relay.Constant][105], padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]);
%82 = nn.batch_norm(%81, meta[relay.Constant][106], meta[relay.Constant][107], meta[relay.Constant][108], meta[relay.Constant][109]);
%83 = %82.0;
%84 = nn.conv2d(%83, meta[relay.Constant][110], padding=[0, 0, 0, 0], channels=384, kernel_size=[1, 1]);
%85 = nn.batch_norm(%84, meta[relay.Constant][111], meta[relay.Constant][112], meta[relay.Constant][113], meta[relay.Constant][114]);
%86 = %85.0;
%87 = nn.relu(%86);
%88 = nn.conv2d(%87, meta[relay.Constant][115], padding=[1, 1, 1, 1], groups=384, channels=384, kernel_size=[3, 3]);
%89 = nn.batch_norm(%88, meta[relay.Constant][116], meta[relay.Constant][117], meta[relay.Constant][118], meta[relay.Constant][119]);
%90 = %89.0;
%91 = nn.relu(%90);
%92 = nn.conv2d(%91, meta[relay.Constant][120], padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]);
%93 = nn.batch_norm(%92, meta[relay.Constant][121], meta[relay.Constant][122], meta[relay.Constant][123], meta[relay.Constant][124]);
%94 = %93.0;
%95 = add(%94, %83);
%96 = nn.conv2d(%95, meta[relay.Constant][125], padding=[0, 0, 0, 0], channels=384, kernel_size=[1, 1]);
%97 = nn.batch_norm(%96, meta[relay.Constant][126], meta[relay.Constant][127], meta[relay.Constant][128], meta[relay.Constant][129]);
%98 = %97.0;
%99 = nn.relu(%98);
%100 = nn.conv2d(%99, meta[relay.Constant][130], padding=[1, 1, 1, 1], groups=384, channels=384, kernel_size=[3, 3]);
%101 = nn.batch_norm(%100, meta[relay.Constant][131], meta[relay.Constant][132], meta[relay.Constant][133], meta[relay.Constant][134]);
%102 = %101.0;
%103 = nn.relu(%102);
%104 = nn.conv2d(%103, meta[relay.Constant][135], padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]);
%105 = nn.batch_norm(%104, meta[relay.Constant][136], meta[relay.Constant][137], meta[relay.Constant][138], meta[relay.Constant][139]);
%106 = %105.0;
%107 = add(%106, %95);
%108 = nn.conv2d(%107, meta[relay.Constant][140], padding=[0, 0, 0, 0], channels=384, kernel_size=[1, 1]);
%109 = nn.batch_norm(%108, meta[relay.Constant][141], meta[relay.Constant][142], meta[relay.Constant][143], meta[relay.Constant][144]);
%110 = %109.0;
%111 = nn.relu(%110);
%112 = nn.conv2d(%111, meta[relay.Constant][145], padding=[1, 1, 1, 1], groups=384, channels=384, kernel_size=[3, 3]);
%113 = nn.batch_norm(%112, meta[relay.Constant][146], meta[relay.Constant][147], meta[relay.Constant][148], meta[relay.Constant][149]);
%114 = %113.0;
%115 = nn.relu(%114);
%116 = nn.conv2d(%115, meta[relay.Constant][150], padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]);
%117 = nn.batch_norm(%116, meta[relay.Constant][151], meta[relay.Constant][152], meta[relay.Constant][153], meta[relay.Constant][154]);
%118 = %117.0;
%119 = add(%118, %107);
%120 = nn.conv2d(%119, meta[relay.Constant][155], padding=[0, 0, 0, 0], channels=384, kernel_size=[1, 1]);
%121 = nn.batch_norm(%120, meta[relay.Constant][156], meta[relay.Constant][157], meta[relay.Constant][158], meta[relay.Constant][159]);
%122 = %121.0;
%123 = nn.relu(%122);
%124 = nn.conv2d(%123, meta[relay.Constant][160], strides=[2, 2], padding=[1, 1, 1, 1], groups=384, channels=384, kernel_size=[3, 3]);
%125 = nn.batch_norm(%124, meta[relay.Constant][161], meta[relay.Constant][162], meta[relay.Constant][163], meta[relay.Constant][164]);
%126 = %125.0;
%127 = nn.relu(%126);
%128 = nn.conv2d(%127, meta[relay.Constant][165], padding=[0, 0, 0, 0], channels=96, kernel_size=[1, 1]);
%129 = nn.batch_norm(%128, meta[relay.Constant][166], meta[relay.Constant][167], meta[relay.Constant][168], meta[relay.Constant][169]);
%130 = %129.0;
%131 = nn.conv2d(%130, meta[relay.Constant][170], padding=[0, 0, 0, 0], channels=576, kernel_size=[1, 1]);
%132 = nn.batch_norm(%131, meta[relay.Constant][171], meta[relay.Constant][172], meta[relay.Constant][173], meta[relay.Constant][174]);
%133 = %132.0;
%134 = nn.relu(%133);
%135 = nn.conv2d(%134, meta[relay.Constant][175], padding=[1, 1, 1, 1], groups=576, channels=576, kernel_size=[3, 3]);
%136 = nn.batch_norm(%135, meta[relay.Constant][176], meta[relay.Constant][177], meta[relay.Constant][178], meta[relay.Constant][179]);
%137 = %136.0;
%138 = nn.relu(%137);
%139 = nn.conv2d(%138, meta[relay.Constant][180], padding=[0, 0, 0, 0], channels=96, kernel_size=[1, 1]);
%140 = nn.batch_norm(%139, meta[relay.Constant][181], meta[relay.Constant][182], meta[relay.Constant][183], meta[relay.Constant][184]);
%141 = %140.0;
%142 = add(%141, %130);
%143 = nn.conv2d(%142, meta[relay.Constant][185], padding=[0, 0, 0, 0], channels=576, kernel_size=[1, 1]);
%144 = nn.batch_norm(%143, meta[relay.Constant][186], meta[relay.Constant][187], meta[relay.Constant][188], meta[relay.Constant][189]);
%145 = %144.0;
%146 = nn.relu(%145);
%147 = nn.conv2d(%146, meta[relay.Constant][190], padding=[1, 1, 1, 1], groups=576, channels=576, kernel_size=[3, 3]);
%148 = nn.batch_norm(%147, meta[relay.Constant][191], meta[relay.Constant][192], meta[relay.Constant][193], meta[relay.Constant][194]);
%149 = %148.0;
%150 = nn.relu(%149);
%151 = nn.conv2d(%150, meta[relay.Constant][195], padding=[0, 0, 0, 0], channels=96, kernel_size=[1, 1]);
%152 = nn.batch_norm(%151, meta[relay.Constant][196], meta[relay.Constant][197], meta[relay.Constant][198], meta[relay.Constant][199]);
%153 = %152.0;
%154 = add(%153, %142);
%155 = nn.conv2d(%154, meta[relay.Constant][200], padding=[0, 0, 0, 0], channels=576, kernel_size=[1, 1]);
%156 = nn.batch_norm(%155, meta[relay.Constant][201], meta[relay.Constant][202], meta[relay.Constant][203], meta[relay.Constant][204]);
%157 = %156.0;
%158 = nn.relu(%157);
%159 = nn.conv2d(%158, meta[relay.Constant][205], strides=[2, 2], padding=[1, 1, 1, 1], groups=576, channels=576, kernel_size=[3, 3]);
%160 = nn.batch_norm(%159, meta[relay.Constant][206], meta[relay.Constant][207], meta[relay.Constant][208], meta[relay.Constant][209]);
%161 = %160.0;
%162 = nn.relu(%161);
%163 = nn.conv2d(%162, meta[relay.Constant][210], padding=[0, 0, 0, 0], channels=160, kernel_size=[1, 1]);
%164 = nn.batch_norm(%163, meta[relay.Constant][211], meta[relay.Constant][212], meta[relay.Constant][213], meta[relay.Constant][214]);
%165 = %164.0;
%166 = nn.conv2d(%165, meta[relay.Constant][215], padding=[0, 0, 0, 0], channels=960, kernel_size=[1, 1]);
%167 = nn.batch_norm(%166, meta[relay.Constant][216], meta[relay.Constant][217], meta[relay.Constant][218], meta[relay.Constant][219]);
%168 = %167.0;
%169 = nn.relu(%168);
%170 = nn.conv2d(%169, meta[relay.Constant][220], padding=[1, 1, 1, 1], groups=960, channels=960, kernel_size=[3, 3]);
%171 = nn.batch_norm(%170, meta[relay.Constant][221], meta[relay.Constant][222], meta[relay.Constant][223], meta[relay.Constant][224]);
%172 = %171.0;
%173 = nn.relu(%172);
%174 = nn.conv2d(%173, meta[relay.Constant][225], padding=[0, 0, 0, 0], channels=160, kernel_size=[1, 1]);
%175 = nn.batch_norm(%174, meta[relay.Constant][226], meta[relay.Constant][227], meta[relay.Constant][228], meta[relay.Constant][229]);
%176 = %175.0;
%177 = add(%176, %165);
%178 = nn.conv2d(%177, meta[relay.Constant][230], padding=[0, 0, 0, 0], channels=960, kernel_size=[1, 1]);
%179 = nn.batch_norm(%178, meta[relay.Constant][231], meta[relay.Constant][232], meta[relay.Constant][233], meta[relay.Constant][234]);
%180 = %179.0;
%181 = nn.relu(%180);
%182 = nn.conv2d(%181, meta[relay.Constant][235], padding=[1, 1, 1, 1], groups=960, channels=960, kernel_size=[3, 3]);
%183 = nn.batch_norm(%182, meta[relay.Constant][236], meta[relay.Constant][237], meta[relay.Constant][238], meta[relay.Constant][239]);
%184 = %183.0;
%185 = nn.relu(%184);
%186 = nn.conv2d(%185, meta[relay.Constant][240], padding=[0, 0, 0, 0], channels=160, kernel_size=[1, 1]);
%187 = nn.batch_norm(%186, meta[relay.Constant][241], meta[relay.Constant][242], meta[relay.Constant][243], meta[relay.Constant][244]);
%188 = %187.0;
%189 = add(%188, %177);
%190 = nn.conv2d(%189, meta[relay.Constant][245], padding=[0, 0, 0, 0], channels=960, kernel_size=[1, 1]);
%191 = nn.batch_norm(%190, meta[relay.Constant][246], meta[relay.Constant][247], meta[relay.Constant][248], meta[relay.Constant][249]);
%192 = %191.0;
%193 = nn.relu(%192);
%194 = nn.conv2d(%193, meta[relay.Constant][250], padding=[1, 1, 1, 1], groups=960, channels=960, kernel_size=[3, 3]);
%195 = nn.batch_norm(%194, meta[relay.Constant][251], meta[relay.Constant][252], meta[relay.Constant][253], meta[relay.Constant][254]);
%196 = %195.0;
%197 = nn.relu(%196);
%198 = nn.conv2d(%197, meta[relay.Constant][255], padding=[0, 0, 0, 0], channels=320, kernel_size=[1, 1]);
%199 = nn.batch_norm(%198, meta[relay.Constant][256], meta[relay.Constant][257], meta[relay.Constant][258], meta[relay.Constant][259]);
%200 = %199.0;
%201 = nn.conv2d(%200, meta[relay.Constant][260], padding=[0, 0, 0, 0], channels=1280, kernel_size=[1, 1]);
%202 = nn.batch_norm(%201, meta[relay.Constant][261], meta[relay.Constant][262], meta[relay.Constant][263], meta[relay.Constant][264]);
%203 = %202.0;
%204 = nn.relu(%203);
%205 = nn.global_avg_pool2d(%204);
%206 = nn.conv2d(%205, meta[relay.Constant][265], padding=[0, 0, 0, 0], channels=1000, kernel_size=[1, 1]);
reshape(%206, newshape=[0, -1])
}
""",
"from_string",
None,
metatable,
)
return {
"name": "mobilenet_16",
"input_shapes": {"data": [1, 3, 224, 224]},
"input_dtypes": {"data": "float16"},
"mod": mod,
"params": None,
"main_dtype": "float16",
}
def batch_norm_extract():
consts = make_consts(
"float32",
[
(32,), # 0
(32,), # 1
(32,), # 2
(32,), # 3
],
)
metatable = {"relay.Constant": consts}
mod = tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%FunctionVar_0: Tensor[(1, 32, 112, 112), float32]) -> Tensor[(1, 32, 112, 112), float32] {
%3 = nn.batch_norm(%FunctionVar_0, meta[relay.Constant][0], meta[relay.Constant][1], meta[relay.Constant][2], meta[relay.Constant][3]);
%3.0
}
""",
"from_string",
None,
metatable,
)
return {
"name": "batch_norm_extract",
"input_shapes": {"FunctionVar_0": [1, 32, 112, 112]},
"input_dtypes": {"FunctionVar_0": "float32"},
"mod": mod,
"params": None,
"main_dtype": "float32",
}
def resnext50_32x4d_consts(dtype):
return make_consts(
dtype,
[
(128, 64, 1, 1), # 0
(128, 4, 3, 3), # 1
(256, 128, 1, 1), # 2
(256, 64, 1, 1), # 3
(128, 256, 1, 1), # 4
(128, 4, 3, 3), # 5
(256, 128, 1, 1), # 6
(128, 256, 1, 1), # 7
(128, 4, 3, 3), # 8
(256, 128, 1, 1), # 9
(256, 256, 1, 1), # 10
(256, 8, 3, 3), # 11
(512, 256, 1, 1), # 12
(512, 256, 1, 1), # 13
(256, 512, 1, 1), # 14
(256, 8, 3, 3), # 15
(512, 256, 1, 1), # 16
(256, 512, 1, 1), # 17
(256, 8, 3, 3), # 18
(512, 256, 1, 1), # 19
(256, 512, 1, 1), # 20
(256, 8, 3, 3), # 21
(512, 256, 1, 1), # 22
(512, 512, 1, 1), # 23
(512, 16, 3, 3), # 24
(1024, 512, 1, 1), # 25
(1024, 512, 1, 1), # 26
(512, 1024, 1, 1), # 27
(512, 16, 3, 3), # 28
(1024, 512, 1, 1), # 29
(512, 1024, 1, 1), # 30
(512, 16, 3, 3), # 31
(1024, 512, 1, 1), # 32
(512, 1024, 1, 1), # 33
(512, 16, 3, 3), # 34
(1024, 512, 1, 1), # 35
(512, 1024, 1, 1), # 36
(512, 16, 3, 3), # 37
(1024, 512, 1, 1), # 38
(512, 1024, 1, 1), # 39
(512, 16, 3, 3), # 40
(1024, 512, 1, 1), # 41
(1024, 1024, 1, 1), # 42
(1024, 32, 3, 3), # 43
(2048, 1024, 1, 1), # 44
(2048, 1024, 1, 1), # 45
(1024, 2048, 1, 1), # 46
(1024, 32, 3, 3), # 47
(2048, 1024, 1, 1), # 48
(1024, 2048, 1, 1), # 49
(1024, 32, 3, 3), # 50
(2048, 1024, 1, 1), # 51
],
)
def resnext50_32x4d():
metatable = {"relay.Constant": resnext50_32x4d_consts("float32")}
mod = tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%x: Tensor[(1, 64, 56, 56), float32]) {
%0 = nn.conv2d(%x, meta[relay.Constant][0], padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]);
%1 = nn.relu(%0);
%2 = nn.conv2d(%1, meta[relay.Constant][1], padding=[1, 1, 1, 1], groups=32, channels=128, kernel_size=[3, 3]);
%3 = nn.relu(%2);
%4 = nn.conv2d(%3, meta[relay.Constant][2], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%5 = nn.conv2d(%x, meta[relay.Constant][3], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%6 = add(%4, %5);
%7 = nn.relu(%6);
%8 = nn.conv2d(%7, meta[relay.Constant][4], padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]);
%9 = nn.relu(%8);
%10 = nn.conv2d(%9, meta[relay.Constant][5], padding=[1, 1, 1, 1], groups=32, channels=128, kernel_size=[3, 3]);
%11 = nn.relu(%10);
%12 = nn.conv2d(%11, meta[relay.Constant][6], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%13 = add(%12, %7);
%14 = nn.relu(%13);
%15 = nn.conv2d(%14, meta[relay.Constant][7], padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]);
%16 = nn.relu(%15);
%17 = nn.conv2d(%16, meta[relay.Constant][8], padding=[1, 1, 1, 1], groups=32, channels=128, kernel_size=[3, 3]);
%18 = nn.relu(%17);
%19 = nn.conv2d(%18, meta[relay.Constant][9], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%20 = add(%19, %14);
%21 = nn.relu(%20);
%22 = nn.conv2d(%21, meta[relay.Constant][10], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%23 = nn.relu(%22);
%24 = nn.conv2d(%23, meta[relay.Constant][11], strides=[2, 2], padding=[1, 1, 1, 1], groups=32, channels=256, kernel_size=[3, 3]);
%25 = nn.relu(%24);
%26 = nn.conv2d(%25, meta[relay.Constant][12], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%27 = nn.conv2d(%21, meta[relay.Constant][13], strides=[2, 2], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%28 = add(%26, %27);
%29 = nn.relu(%28);
%30 = nn.conv2d(%29, meta[relay.Constant][14], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%31 = nn.relu(%30);
%32 = nn.conv2d(%31, meta[relay.Constant][15], padding=[1, 1, 1, 1], groups=32, channels=256, kernel_size=[3, 3]);
%33 = nn.relu(%32);
%34 = nn.conv2d(%33, meta[relay.Constant][16], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%35 = add(%34, %29);
%36 = nn.relu(%35);
%37 = nn.conv2d(%36, meta[relay.Constant][17], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%38 = nn.relu(%37);
%39 = nn.conv2d(%38, meta[relay.Constant][18], padding=[1, 1, 1, 1], groups=32, channels=256, kernel_size=[3, 3]);
%40 = nn.relu(%39);
%41 = nn.conv2d(%40, meta[relay.Constant][19], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%42 = add(%41, %36);
%43 = nn.relu(%42);
%44 = nn.conv2d(%43, meta[relay.Constant][20], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%45 = nn.relu(%44);
%46 = nn.conv2d(%45, meta[relay.Constant][21], padding=[1, 1, 1, 1], groups=32, channels=256, kernel_size=[3, 3]);
%47 = nn.relu(%46);
%48 = nn.conv2d(%47, meta[relay.Constant][22], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%49 = add(%48, %43);
%50 = nn.relu(%49);
%51 = nn.conv2d(%50, meta[relay.Constant][23], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%52 = nn.relu(%51);
%53 = nn.conv2d(%52, meta[relay.Constant][24], strides=[2, 2], padding=[1, 1, 1, 1], groups=32, channels=512, kernel_size=[3, 3]);
%54 = nn.relu(%53);
%55 = nn.conv2d(%54, meta[relay.Constant][25], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%56 = nn.conv2d(%50, meta[relay.Constant][26], strides=[2, 2], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%57 = add(%55, %56);
%58 = nn.relu(%57);
%59 = nn.conv2d(%58, meta[relay.Constant][27], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%60 = nn.relu(%59);
%61 = nn.conv2d(%60, meta[relay.Constant][28], padding=[1, 1, 1, 1], groups=32, channels=512, kernel_size=[3, 3]);
%62 = nn.relu(%61);
%63 = nn.conv2d(%62, meta[relay.Constant][29], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%64 = add(%63, %58);
%65 = nn.relu(%64);
%66 = nn.conv2d(%65, meta[relay.Constant][30], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%67 = nn.relu(%66);
%68 = nn.conv2d(%67, meta[relay.Constant][31], padding=[1, 1, 1, 1], groups=32, channels=512, kernel_size=[3, 3]);
%69 = nn.relu(%68);
%70 = nn.conv2d(%69, meta[relay.Constant][32], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%71 = add(%70, %65);
%72 = nn.relu(%71);
%73 = nn.conv2d(%72, meta[relay.Constant][33], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%74 = nn.relu(%73);
%75 = nn.conv2d(%74, meta[relay.Constant][34], padding=[1, 1, 1, 1], groups=32, channels=512, kernel_size=[3, 3]);
%76 = nn.relu(%75);
%77 = nn.conv2d(%76, meta[relay.Constant][35], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%78 = add(%77, %72);
%79 = nn.relu(%78);
%80 = nn.conv2d(%79, meta[relay.Constant][36], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%81 = nn.relu(%80);
%82 = nn.conv2d(%81, meta[relay.Constant][37], padding=[1, 1, 1, 1], groups=32, channels=512, kernel_size=[3, 3]);
%83 = nn.relu(%82);
%84 = nn.conv2d(%83, meta[relay.Constant][38], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%85 = add(%84, %79);
%86 = nn.relu(%85);
%87 = nn.conv2d(%86, meta[relay.Constant][39], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%88 = nn.relu(%87);
%89 = nn.conv2d(%88, meta[relay.Constant][40], padding=[1, 1, 1, 1], groups=32, channels=512, kernel_size=[3, 3]);
%90 = nn.relu(%89);
%91 = nn.conv2d(%90, meta[relay.Constant][41], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%92 = add(%91, %86);
%93 = nn.relu(%92);
%94 = nn.conv2d(%93, meta[relay.Constant][42], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%95 = nn.relu(%94);
%96 = nn.conv2d(%95, meta[relay.Constant][43], strides=[2, 2], padding=[1, 1, 1, 1], groups=32, channels=1024, kernel_size=[3, 3]);
%97 = nn.relu(%96);
%98 = nn.conv2d(%97, meta[relay.Constant][44], padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]);
%99 = nn.conv2d(%93, meta[relay.Constant][45], strides=[2, 2], padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]);
%100 = add(%98, %99);
%101 = nn.relu(%100);
%102 = nn.conv2d(%101, meta[relay.Constant][46], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%103 = nn.relu(%102);
%104 = nn.conv2d(%103, meta[relay.Constant][47], padding=[1, 1, 1, 1], groups=32, channels=1024, kernel_size=[3, 3]);
%105 = nn.relu(%104);
%106 = nn.conv2d(%105, meta[relay.Constant][48], padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]);
%107 = add(%106, %101);
%108 = nn.relu(%107);
%109 = nn.conv2d(%108, meta[relay.Constant][49], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%110 = nn.relu(%109);
%111 = nn.conv2d(%110, meta[relay.Constant][50], padding=[1, 1, 1, 1], groups=32, channels=1024, kernel_size=[3, 3]);
%112 = nn.relu(%111);
%113 = nn.conv2d(%112, meta[relay.Constant][51], padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]);
%114 = add(%113, %108);
nn.relu(%114)
}
""",
"from_string",
None,
metatable,
)
return {
"name": "resnext50_32x4d",
"input_shapes": {"x": [1, 64, 56, 56]},
"input_dtypes": {"x": "float32"},
"mod": mod,
"params": None,
"main_dtype": "float32",
}
def resnext50_32x4d_16():
metatable = {"relay.Constant": resnext50_32x4d_consts("float16")}
mod = tvm.relay.parse(
"""
#[version = "0.0.5"]
def @main(%x: Tensor[(1, 64, 56, 56), float16]) {
%0 = nn.conv2d(%x, meta[relay.Constant][0], padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]);
%1 = nn.relu(%0);
%2 = nn.conv2d(%1, meta[relay.Constant][1], padding=[1, 1, 1, 1], groups=32, channels=128, kernel_size=[3, 3]);
%3 = nn.relu(%2);
%4 = nn.conv2d(%3, meta[relay.Constant][2], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%5 = nn.conv2d(%x, meta[relay.Constant][3], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%6 = add(%4, %5);
%7 = nn.relu(%6);
%8 = nn.conv2d(%7, meta[relay.Constant][4], padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]);
%9 = nn.relu(%8);
%10 = nn.conv2d(%9, meta[relay.Constant][5], padding=[1, 1, 1, 1], groups=32, channels=128, kernel_size=[3, 3]);
%11 = nn.relu(%10);
%12 = nn.conv2d(%11, meta[relay.Constant][6], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%13 = add(%12, %7);
%14 = nn.relu(%13);
%15 = nn.conv2d(%14, meta[relay.Constant][7], padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]);
%16 = nn.relu(%15);
%17 = nn.conv2d(%16, meta[relay.Constant][8], padding=[1, 1, 1, 1], groups=32, channels=128, kernel_size=[3, 3]);
%18 = nn.relu(%17);
%19 = nn.conv2d(%18, meta[relay.Constant][9], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%20 = add(%19, %14);
%21 = nn.relu(%20);
%22 = nn.conv2d(%21, meta[relay.Constant][10], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%23 = nn.relu(%22);
%24 = nn.conv2d(%23, meta[relay.Constant][11], strides=[2, 2], padding=[1, 1, 1, 1], groups=32, channels=256, kernel_size=[3, 3]);
%25 = nn.relu(%24);
%26 = nn.conv2d(%25, meta[relay.Constant][12], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%27 = nn.conv2d(%21, meta[relay.Constant][13], strides=[2, 2], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%28 = add(%26, %27);
%29 = nn.relu(%28);
%30 = nn.conv2d(%29, meta[relay.Constant][14], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%31 = nn.relu(%30);
%32 = nn.conv2d(%31, meta[relay.Constant][15], padding=[1, 1, 1, 1], groups=32, channels=256, kernel_size=[3, 3]);
%33 = nn.relu(%32);
%34 = nn.conv2d(%33, meta[relay.Constant][16], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%35 = add(%34, %29);
%36 = nn.relu(%35);
%37 = nn.conv2d(%36, meta[relay.Constant][17], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%38 = nn.relu(%37);
%39 = nn.conv2d(%38, meta[relay.Constant][18], padding=[1, 1, 1, 1], groups=32, channels=256, kernel_size=[3, 3]);
%40 = nn.relu(%39);
%41 = nn.conv2d(%40, meta[relay.Constant][19], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%42 = add(%41, %36);
%43 = nn.relu(%42);
%44 = nn.conv2d(%43, meta[relay.Constant][20], padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]);
%45 = nn.relu(%44);
%46 = nn.conv2d(%45, meta[relay.Constant][21], padding=[1, 1, 1, 1], groups=32, channels=256, kernel_size=[3, 3]);
%47 = nn.relu(%46);
%48 = nn.conv2d(%47, meta[relay.Constant][22], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%49 = add(%48, %43);
%50 = nn.relu(%49);
%51 = nn.conv2d(%50, meta[relay.Constant][23], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%52 = nn.relu(%51);
%53 = nn.conv2d(%52, meta[relay.Constant][24], strides=[2, 2], padding=[1, 1, 1, 1], groups=32, channels=512, kernel_size=[3, 3]);
%54 = nn.relu(%53);
%55 = nn.conv2d(%54, meta[relay.Constant][25], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%56 = nn.conv2d(%50, meta[relay.Constant][26], strides=[2, 2], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%57 = add(%55, %56);
%58 = nn.relu(%57);
%59 = nn.conv2d(%58, meta[relay.Constant][27], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%60 = nn.relu(%59);
%61 = nn.conv2d(%60, meta[relay.Constant][28], padding=[1, 1, 1, 1], groups=32, channels=512, kernel_size=[3, 3]);
%62 = nn.relu(%61);
%63 = nn.conv2d(%62, meta[relay.Constant][29], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%64 = add(%63, %58);
%65 = nn.relu(%64);
%66 = nn.conv2d(%65, meta[relay.Constant][30], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%67 = nn.relu(%66);
%68 = nn.conv2d(%67, meta[relay.Constant][31], padding=[1, 1, 1, 1], groups=32, channels=512, kernel_size=[3, 3]);
%69 = nn.relu(%68);
%70 = nn.conv2d(%69, meta[relay.Constant][32], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%71 = add(%70, %65);
%72 = nn.relu(%71);
%73 = nn.conv2d(%72, meta[relay.Constant][33], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%74 = nn.relu(%73);
%75 = nn.conv2d(%74, meta[relay.Constant][34], padding=[1, 1, 1, 1], groups=32, channels=512, kernel_size=[3, 3]);
%76 = nn.relu(%75);
%77 = nn.conv2d(%76, meta[relay.Constant][35], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%78 = add(%77, %72);
%79 = nn.relu(%78);
%80 = nn.conv2d(%79, meta[relay.Constant][36], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%81 = nn.relu(%80);
%82 = nn.conv2d(%81, meta[relay.Constant][37], padding=[1, 1, 1, 1], groups=32, channels=512, kernel_size=[3, 3]);
%83 = nn.relu(%82);
%84 = nn.conv2d(%83, meta[relay.Constant][38], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%85 = add(%84, %79);
%86 = nn.relu(%85);
%87 = nn.conv2d(%86, meta[relay.Constant][39], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]);
%88 = nn.relu(%87);
%89 = nn.conv2d(%88, meta[relay.Constant][40], padding=[1, 1, 1, 1], groups=32, channels=512, kernel_size=[3, 3]);
%90 = nn.relu(%89);
%91 = nn.conv2d(%90, meta[relay.Constant][41], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%92 = add(%91, %86);
%93 = nn.relu(%92);
%94 = nn.conv2d(%93, meta[relay.Constant][42], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%95 = nn.relu(%94);
%96 = nn.conv2d(%95, meta[relay.Constant][43], strides=[2, 2], padding=[1, 1, 1, 1], groups=32, channels=1024, kernel_size=[3, 3]);
%97 = nn.relu(%96);
%98 = nn.conv2d(%97, meta[relay.Constant][44], padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]);
%99 = nn.conv2d(%93, meta[relay.Constant][45], strides=[2, 2], padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]);
%100 = add(%98, %99);
%101 = nn.relu(%100);
%102 = nn.conv2d(%101, meta[relay.Constant][46], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%103 = nn.relu(%102);
%104 = nn.conv2d(%103, meta[relay.Constant][47], padding=[1, 1, 1, 1], groups=32, channels=1024, kernel_size=[3, 3]);
%105 = nn.relu(%104);
%106 = nn.conv2d(%105, meta[relay.Constant][48], padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]);
%107 = add(%106, %101);
%108 = nn.relu(%107);
%109 = nn.conv2d(%108, meta[relay.Constant][49], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]);
%110 = nn.relu(%109);
%111 = nn.conv2d(%110, meta[relay.Constant][50], padding=[1, 1, 1, 1], groups=32, channels=1024, kernel_size=[3, 3]);
%112 = nn.relu(%111);
%113 = nn.conv2d(%112, meta[relay.Constant][51], padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]);
%114 = add(%113, %108);
nn.relu(%114)
}
""",
"from_string",
None,
metatable,
)
return {
"name": "resnext50_32x4d_16",
"input_shapes": {"x": [1, 64, 56, 56]},
"input_dtypes": {"x": "float16"},
"mod": mod,
"params": None,
"main_dtype": "float16",
}
def describe_onnx(name, filename):
"""Returns the description of the ONNX model at filename, which can be passed to from_onnx to actually load
the model. Note that ? (ie unknown) shape dimensions must be manually changed to concrete dimensions
which are consistent with the overall model."""
onnx_model = onnx.load(MODEL_PREFIX + filename)
input_shapes = {}
input_dtypes = {}
initializer_names = [n.name for n in onnx_model.graph.initializer]
for input_info in onnx_model.graph.input:
if input_info.name not in initializer_names:
_, shape, dtype, _ = tvm.relay.frontend.onnx.get_info(input_info)
if dtype is None:
raise ValueError(f"Unknown dtype on input '{input_info.name}' is not supported.")
input_shapes.update({input_info.name: shape})
input_dtypes.update({input_info.name: dtype})
print(
f"{{'name': '{name}', 'filename': '{filename}', 'input_shapes': {input_shapes}, 'input_dtypes': {input_dtypes}, 'main_dtype': 'float32'}}"
)
def from_onnx(model):
logging.info("-------------------- BEGIN ONNX IMPORT --------------------")
filename = MODEL_PREFIX + model["filename"]
logging.info(f"Loading ONNX model from {filename}")
onnx_model = onnx.load(filename)
logging.info(f"Loaded model from {filename}")
mod, params = tvm.relay.frontend.from_onnx(
onnx_model, model["input_shapes"], freeze_params=True
)
mod = tvm.relay.transform.InferType()(mod)
logging.info("-------------------- END ONNX IMPORT --------------------")
logging.info(f"Imported model:\n{mod}")
logging.info(f"Params:\n{params}")
return {
"name": model["name"],
"input_shapes": model["input_shapes"],
"input_dtypes": model["input_dtypes"],
"mod": mod,
"params": params,
"main_dtype": model["main_dtype"],
}
def to_onnx(model):
logging.info("-------------------- BEGIN ONNX EXPORT --------------------")
short_filename = model["name"] + ".onnx"
filename = MODEL_PREFIX + short_filename
logging.info(f"Saving ONNX model to {filename}")
params = model["params"]
if params is None:
params = {}
tvm.contrib.target.onnx.to_onnx(model["mod"], params, model["name"], path=filename)
logging.info("-------------------- END ONNX EXPORT --------------------")
return {
"name": model["name"],
"filename": short_filename,
"input_shapes": model["input_shapes"],
"input_dtypes": model["input_dtypes"],
"main_dtype": model["main_dtype"],
}
| 215,685 | 49.288179 | 146 | py |
tvm | tvm-main/tests/python/driver/tvmc/conftest.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import pytest
import tarfile
import textwrap
import numpy as np
from PIL import Image
import tvm
from tvm import relay
from tvm.driver import tvmc
from tvm.contrib.download import download_testdata
# Support functions
def download_and_untar(model_url, model_sub_path, temp_dir):
model_tar_name = os.path.basename(model_url)
model_path = download_testdata(model_url, model_tar_name, module=["tvmc"])
if model_path.endswith("tgz") or model_path.endswith("gz") or model_path.endswith("tar"):
tar = tarfile.open(model_path)
tar.extractall(path=temp_dir)
tar.close()
return os.path.join(temp_dir, model_sub_path)
# PyTest fixtures
@pytest.fixture(scope="session")
def tflite_mobilenet_v1_1_quant(tmpdir_factory):
base_url = "https://storage.googleapis.com/download.tensorflow.org/models"
model_url = "mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz"
model_file = download_and_untar(
"{}/{}".format(base_url, model_url),
"mobilenet_v1_1.0_224_quant.tflite",
temp_dir=tmpdir_factory.mktemp("data"),
)
return model_file
@pytest.fixture(scope="session")
def pb_mobilenet_v1_1_quant(tmpdir_factory):
base_url = "https://storage.googleapis.com/download.tensorflow.org/models"
model_url = "mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224.tgz"
model_file = download_and_untar(
"{}/{}".format(base_url, model_url),
"mobilenet_v1_1.0_224_frozen.pb",
temp_dir=tmpdir_factory.mktemp("data"),
)
return model_file
@pytest.fixture(scope="session")
def keras_resnet50(tmpdir_factory):
try:
from tensorflow.keras.applications.resnet50 import ResNet50
except ImportError:
# not all environments provide TensorFlow, so skip this fixture
# if that is that case.
return ""
model_file_name = "{}/{}".format(tmpdir_factory.mktemp("data"), "resnet50.h5")
model = ResNet50(include_top=True, weights="imagenet", input_shape=(224, 224, 3), classes=1000)
model.save(model_file_name)
return model_file_name
@pytest.fixture(scope="session")
def keras_simple(tmpdir_factory):
try:
from tensorflow import keras
except ImportError:
# not all environments provide TensorFlow, so skip this fixture
# if that is that case.
return ""
model_file_name = "{}/{}".format(tmpdir_factory.mktemp("data"), "simple_conv.h5")
model = keras.Sequential(
[
keras.layers.InputLayer(input_shape=[32, 32, 3], batch_size=1),
keras.layers.Conv2D(8, kernel_size=(3, 3)),
keras.layers.Flatten(),
keras.layers.Dense(64),
]
)
model.save(model_file_name)
return model_file_name
@pytest.fixture(scope="session")
def pytorch_resnet18(tmpdir_factory):
try:
import torch
import torchvision.models as models
except ImportError:
# Not all environments provide Pytorch, so skip if that's the case.
return ""
model = models.resnet18()
model_file_name = "{}/{}".format(tmpdir_factory.mktemp("data"), "resnet18.pth")
# Trace model into torchscript.
traced_cpu = torch.jit.trace(model, torch.randn(1, 3, 224, 224))
torch.jit.save(traced_cpu, model_file_name)
return model_file_name
@pytest.fixture(scope="session")
def pytorch_mobilenetv2_quantized(tmpdir_factory):
try:
import torch
import torchvision.models as models
except ImportError:
# Not all environments provide Pytorch, so skip if that's the case.
return ""
model = models.quantization.mobilenet_v2(quantize=True)
model_file_name = "{}/{}".format(tmpdir_factory.mktemp("data"), "mobilenet_v2_quantized.pth")
# Trace model into torchscript.
traced_cpu = torch.jit.trace(model, torch.randn(1, 3, 224, 224))
torch.jit.save(traced_cpu, model_file_name)
return model_file_name
@pytest.fixture(scope="session")
def onnx_resnet50():
base_url = "https://github.com/onnx/models/raw/bd206494e8b6a27b25e5cf7199dbcdbfe9d05d1c/vision/classification/resnet/model"
file_to_download = "resnet50-v2-7.onnx"
model_file = download_testdata(
"{}/{}".format(base_url, file_to_download), file_to_download, module=["tvmc"]
)
return model_file
@pytest.fixture(scope="session")
def paddle_resnet50(tmpdir_factory):
base_url = "https://bj.bcebos.com/x2paddle/models"
model_url = "paddle_resnet50.tar"
model_file = download_and_untar(
"{}/{}".format(base_url, model_url),
"paddle_resnet50/model.pdmodel",
temp_dir=tmpdir_factory.mktemp("data"),
)
return model_file
@pytest.fixture(scope="session")
def onnx_mnist():
base_url = "https://github.com/onnx/models/raw/bd206494e8b6a27b25e5cf7199dbcdbfe9d05d1c/vision/classification/mnist/model"
file_to_download = "mnist-1.onnx"
model_file = download_testdata(
"{}/{}".format(base_url, file_to_download), file_to_download, module=["tvmc"]
)
return model_file
@pytest.fixture
def tflite_compile_model(tmpdir_factory):
"""Support function that returns a TFLite compiled module"""
def model_compiler(model_file, **overrides):
package_path = tmpdir_factory.mktemp("data").join("mock.tar")
tvmc_model = tvmc.frontends.load_model(model_file)
args = {"target": "llvm", **overrides}
return tvmc.compiler.compile_model(tvmc_model, package_path=package_path, **args)
# Returns a TVMCPackage
return model_compiler
@pytest.fixture
def relay_compile_model(tmpdir_factory):
"""Support function that returns a TFLite compiled module"""
def model_compiler(model_file, shape_dict, **overrides):
package_path = tmpdir_factory.mktemp("data").join("mock.tar")
tvmc_model = tvmc.frontends.load_model(
model_file, model_format="relay", shape_dict=shape_dict
)
args = {"target": "llvm", **overrides}
return tvmc.compiler.compile_model(tvmc_model, package_path=package_path, **args)
# Returns a TVMCPackage
return model_compiler
@pytest.fixture(scope="session")
def imagenet_cat(tmpdir_factory):
tmpdir_name = tmpdir_factory.mktemp("data")
cat_file_name = "imagenet_cat.npz"
cat_url = "https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true"
image_path = download_testdata(cat_url, "inputs", module=["tvmc"])
resized_image = Image.open(image_path).resize((224, 224))
image_data = np.asarray(resized_image).astype("float32")
image_data = np.expand_dims(image_data, axis=0)
cat_file_full_path = os.path.join(tmpdir_name, cat_file_name)
np.savez(cat_file_full_path, input=image_data)
return cat_file_full_path
@pytest.fixture(scope="session")
def tflite_mobilenet_v1_0_25_128(tmpdir_factory):
base_url = "https://storage.googleapis.com/download.tensorflow.org/models"
model_url = "mobilenet_v1_2018_02_22/mobilenet_v1_0.25_128.tgz"
model_file = download_and_untar(
"{}/{}".format(base_url, model_url),
"mobilenet_v1_0.25_128.tflite",
temp_dir=tmpdir_factory.mktemp("data"),
)
return model_file
@pytest.fixture(scope="session")
def tflite_cnn_s_quantized(tmpdir_factory):
base_url = "https://github.com/ARM-software/ML-zoo/raw/48a22ee22325d15d2371a6df24eb7d67e21dcc97/models/keyword_spotting/cnn_small/tflite_int8"
file_to_download = "cnn_s_quantized.tflite"
model_file = download_testdata(
"{}/{}".format(base_url, file_to_download), file_to_download, module=["tvmc"]
)
return model_file
@pytest.fixture(scope="session")
def relay_text_conv2d(tmpdir_factory):
file_path = os.path.join(tmpdir_factory.mktemp("model"), "relay.txt")
RELAY_MODEL = textwrap.dedent(
"""\
#[version = "0.0.5"]
def @main(%data : Tensor[(1, 3, 64, 64), uint8], %weight : Tensor[(3, 3, 5, 5), int8]) {
%1 = nn.conv2d(
%data,
%weight,
padding=[2, 2],
channels=3,
kernel_size=[5, 5],
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32");
%2 = cast(nn.max_pool2d(%1, pool_size=[3, 3]), dtype="int8");
%3 = nn.conv2d(
%2,
%weight,
padding=[2, 2],
channels=3,
kernel_size=[5, 5],
data_layout="NCHW",
kernel_layout="OIHW",
out_dtype="int32");
%4 = nn.max_pool2d(%3, pool_size=[3, 3]);
%4
}
"""
)
with open(file_path, "w") as relay_text:
relay_text.write(RELAY_MODEL)
return file_path
@pytest.fixture(scope="session")
def relay_conv2d():
"""
Simple conv2d Relay implementation.
"""
dtype = "float32"
x = relay.var("x", shape=(1, 4, 2, 2), dtype=dtype)
weight = relay.const(np.random.uniform(size=(2, 4, 2, 2)), dtype=dtype)
x = relay.nn.conv2d(x, weight)
func = relay.Function(relay.analysis.free_vars(x), x)
return tvm.IRModule.from_expr(func)
| 9,963 | 31.884488 | 146 | py |
tvm | tvm-main/tests/python/driver/tvmc/test_command_line.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import platform
import pytest
import shutil
import logging
import sys
from pytest_lazyfixture import lazy_fixture
from unittest import mock
import tvm
from tvm.driver.tvmc.main import _main
from tvm.driver.tvmc.model import TVMCException
from tvm.driver.tvmc import compiler
from unittest.mock import MagicMock
@pytest.mark.skipif(
platform.machine() == "aarch64",
reason="Currently failing on AArch64 - see https://github.com/apache/tvm/issues/10673",
)
def test_tvmc_cl_workflow(keras_simple, tmpdir_factory):
pytest.importorskip("tensorflow")
tmpdir = tmpdir_factory.mktemp("data")
# Test model tuning
log_path = os.path.join(tmpdir, "keras-autotuner_records.json")
tuning_str = (
f"tvmc tune --target llvm --output {log_path} "
f"--trials 2 --enable-autoscheduler {keras_simple}"
)
tuning_args = tuning_str.split(" ")[1:]
_main(tuning_args)
assert os.path.exists(log_path)
# Test model compilation
package_path = os.path.join(tmpdir, "keras-tvm.tar")
compile_str = (
f"tvmc compile --target llvm --tuning-records {log_path} "
f"--output {package_path} {keras_simple}"
)
compile_args = compile_str.split(" ")[1:]
_main(compile_args)
assert os.path.exists(package_path)
# Test running the model
output_path = os.path.join(tmpdir, "predictions.npz")
run_str = f"tvmc run --end-to-end --outputs {output_path} {package_path}"
run_args = run_str.split(" ")[1:]
_main(run_args)
assert os.path.exists(output_path)
@pytest.mark.skipif(
platform.machine() == "aarch64",
reason="Currently failing on AArch64 - see https://github.com/apache/tvm/issues/10673",
)
def test_tvmc_cl_workflow_json_config(keras_simple, tmpdir_factory):
pytest.importorskip("tensorflow")
tune_config_file = "tune_config_test"
tmpdir = tmpdir_factory.mktemp("data")
# Test model tuning
log_path = os.path.join(tmpdir, "keras-autotuner_records.json")
tuning_str = (
f"tvmc tune --config {tune_config_file} --output {log_path} "
f"--enable-autoscheduler {keras_simple}"
)
tuning_args = tuning_str.split(" ")[1:]
_main(tuning_args)
assert os.path.exists(log_path)
# Test model compilation
package_path = os.path.join(tmpdir, "keras-tvm.tar")
compile_str = (
f"tvmc compile --tuning-records {log_path} " f"--output {package_path} {keras_simple}"
)
compile_args = compile_str.split(" ")[1:]
_main(compile_args)
assert os.path.exists(package_path)
# Test running the model
output_path = os.path.join(tmpdir, "predictions.npz")
run_str = f"tvmc run --outputs {output_path} {package_path}"
run_args = run_str.split(" ")[1:]
_main(run_args)
assert os.path.exists(output_path)
@pytest.fixture
def missing_file():
missing_file_name = "missing_file_as_invalid_input.tfite"
return missing_file_name
@pytest.fixture
def broken_symlink(tmp_path):
broken_symlink = "broken_symlink_as_invalid_input.tflite"
os.symlink("non_existing_file", tmp_path / broken_symlink)
yield broken_symlink
os.unlink(tmp_path / broken_symlink)
@pytest.fixture
def fake_directory(tmp_path):
dir_as_invalid = "dir_as_invalid_input.tflite"
os.mkdir(tmp_path / dir_as_invalid)
yield dir_as_invalid
shutil.rmtree(tmp_path / dir_as_invalid)
@pytest.mark.parametrize(
"invalid_input",
[lazy_fixture("missing_file"), lazy_fixture("broken_symlink"), lazy_fixture("fake_directory")],
)
def test_tvmc_compile_file_check(capsys, invalid_input):
compile_cmd = f"tvmc compile --target 'c' {invalid_input}"
run_arg = compile_cmd.split(" ")[1:]
_main(run_arg)
captured = capsys.readouterr()
expected_err = (
f"Error: Input file '{invalid_input}' doesn't exist, "
"is a broken symbolic link, or a directory.\n"
)
on_assert_error = f"'tvmc compile' failed to check invalid FILE: {invalid_input}"
assert captured.err == expected_err, on_assert_error
@pytest.mark.parametrize(
"invalid_input",
[lazy_fixture("missing_file"), lazy_fixture("broken_symlink"), lazy_fixture("fake_directory")],
)
def test_tvmc_tune_file_check(capsys, invalid_input):
tune_cmd = f"tvmc tune --target 'llvm' --output output.json {invalid_input}"
run_arg = tune_cmd.split(" ")[1:]
_main(run_arg)
captured = capsys.readouterr()
expected_err = (
f"Error: Input file '{invalid_input}' doesn't exist, "
"is a broken symbolic link, or a directory.\n"
)
on_assert_error = f"'tvmc tune' failed to check invalid FILE: {invalid_input}"
assert captured.err == expected_err, on_assert_error
@mock.patch("tvm.relay.build", side_effect=tvm.relay.build)
@mock.patch("tvm.driver.tvmc.model.TVMCPackage.__init__", return_value=None)
def test_tvmc_workspace_pools_check(mock_pkg, mock_relay, keras_simple, tmpdir_factory):
pytest.importorskip("tensorflow")
tmpdir = tmpdir_factory.mktemp("data")
# Test model compilation
package_path = os.path.join(tmpdir, "keras-tvm.tar")
compile_str = (
f"tvmc compile --target=llvm --workspace-pools=sram "
f"--workspace-pools-targets=sram:llvm "
f"--output={package_path} {keras_simple}"
)
compile_args = compile_str.split(" ")[1:]
_main(compile_args)
assert os.path.exists(package_path)
assert mock_relay.call_count == 1
assert mock_relay.call_args_list[0][1]["workspace_memory_pools"].pools[0].pool_name == "sram"
@pytest.fixture
def paddle_model(paddle_resnet50):
# If we can't import "paddle" module, skip testing paddle as the input model.
if pytest.importorskip("paddle", reason="'paddle' module not installed"):
return paddle_resnet50
@pytest.mark.parametrize(
"model",
[
lazy_fixture("paddle_model"),
],
)
# compile_model() can take too long and is tested elsewhere, hence it's mocked below
@mock.patch.object(compiler, "compile_model")
# @mock.patch.object(compiler, "compile_model")
def test_tvmc_compile_input_model(mock_compile_model, tmpdir_factory, model):
output_dir = tmpdir_factory.mktemp("output")
output_file = output_dir / "model.tar"
compile_cmd = (
f"tvmc compile --target 'llvm' {model} --model-format paddle --output {output_file}"
)
run_arg = compile_cmd.split(" ")[1:]
_main(run_arg)
mock_compile_model.assert_called_once()
def test_tvmc_logger(caplog, tmpdir_factory, keras_simple):
pytest.importorskip("tensorflow")
tmpdir = tmpdir_factory.mktemp("out")
# TUNE
log_path = os.path.join(tmpdir, "records.json")
tune_cmd = f"tvmc tune --target llvm -vvvv --output {log_path} " f"--trials 2 {keras_simple}"
tuning_args = tune_cmd.split(" ")[1:]
_main(tuning_args)
# Check that we log during tvmc tune
for log_str in ("DEBUG", "INFO", "WARNING", "TVMC"):
assert log_str in caplog.text
caplog.clear()
# COMPILE
module_file = os.path.join(tmpdir, "m.tar")
compile_cmd = f"tvmc compile --target 'llvm' {keras_simple} -vvvv --output {module_file}"
compile_args = compile_cmd.split(" ")[1:]
_main(compile_args)
# Check that we log during tvmc compile
for log_str in ("DEBUG", "WARNING", "TVMC"):
assert log_str in caplog.text
caplog.clear()
# RUN
run_cmd = f"tvmc run -vvvv {module_file}"
run_args = run_cmd.split(" ")[1:]
_main(run_args)
# Check that we log during tvmc run
for log_str in ("DEBUG", "TVMC"):
assert log_str in caplog.text
# Unfortunately pytest seems to intercept the logging output, so we can't test whether it
# actually writes the logging output to sys.stdout, but we can test that we call
# logging.basicConfig with the correct arguments
def test_tvmc_logger_set_basicConfig(monkeypatch, tmpdir_factory, keras_simple):
pytest.importorskip("tensorflow")
mock_basicConfig = MagicMock()
monkeypatch.setattr(logging, "basicConfig", mock_basicConfig)
# Run a random tvmc command
tmpdir = tmpdir_factory.mktemp("out")
module_file = os.path.join(tmpdir, "m.tar")
compile_cmd = f"tvmc compile --target 'llvm' {keras_simple} -vvvv --output {module_file}"
compile_args = compile_cmd.split(" ")[1:]
_main(compile_args)
mock_basicConfig.assert_called_with(stream=sys.stdout)
| 9,164 | 32.327273 | 99 | py |
tvm | tvm-main/tests/python/driver/tvmc/test_frontends.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import platform
import pytest
import builtins
import importlib
import tvm
from unittest import mock
from tvm.ir.module import IRModule
from tvm.driver import tvmc
from tvm.driver.tvmc import TVMCException, TVMCImportError
from tvm.driver.tvmc.model import TVMCModel
orig_import = importlib.import_module
def mock_error_on_name(name):
def mock_imports(module_name, package=None):
if module_name == name:
raise ImportError()
return orig_import(module_name, package)
return mock_imports
def test_get_frontends_contains_only_strings():
sut = tvmc.frontends.get_frontend_names()
assert all([type(x) is str for x in sut]) is True
def test_get_frontend_by_name_valid():
# some CI environments wont offer TensorFlow/Keras, so skip in case it is not present
pytest.importorskip("tensorflow")
sut = tvmc.frontends.get_frontend_by_name("keras")
assert type(sut) is tvmc.frontends.KerasFrontend
def test_get_frontend_by_name_invalid():
with pytest.raises(TVMCException):
tvmc.frontends.get_frontend_by_name("unsupported_thing")
def test_guess_frontend_tflite():
# some CI environments wont offer TFLite, so skip in case it is not present
pytest.importorskip("tflite")
sut = tvmc.frontends.guess_frontend("a_model.tflite")
assert type(sut) is tvmc.frontends.TFLiteFrontend
def test_guess_frontend_onnx():
# some CI environments wont offer onnx, so skip in case it is not present
pytest.importorskip("onnx")
sut = tvmc.frontends.guess_frontend("a_model.onnx")
assert type(sut) is tvmc.frontends.OnnxFrontend
@pytest.mark.skipif(
platform.machine() == "aarch64",
reason="Currently failing on AArch64 - see https://github.com/apache/tvm/issues/10673",
)
def test_guess_frontend_pytorch():
# some CI environments wont offer pytorch, so skip in case it is not present
pytest.importorskip("torch")
sut = tvmc.frontends.guess_frontend("a_model.pth")
assert type(sut) is tvmc.frontends.PyTorchFrontend
def test_guess_frontend_keras():
# some CI environments wont offer TensorFlow/Keras, so skip in case it is not present
pytest.importorskip("tensorflow")
sut = tvmc.frontends.guess_frontend("a_model.h5")
assert type(sut) is tvmc.frontends.KerasFrontend
def test_guess_frontend_tensorflow():
# some CI environments wont offer TensorFlow, so skip in case it is not present
pytest.importorskip("tensorflow")
sut = tvmc.frontends.guess_frontend("a_model.pb")
assert type(sut) is tvmc.frontends.TensorflowFrontend
def test_guess_frontend_paddle():
# some CI environments wont offer Paddle, so skip in case it is not present
pytest.importorskip("paddle")
sut = tvmc.frontends.guess_frontend("a_model.pdmodel")
assert type(sut) is tvmc.frontends.PaddleFrontend
def test_guess_frontend_relay():
sut = tvmc.frontends.guess_frontend("relay.relay")
assert type(sut) is tvmc.frontends.RelayFrontend
def test_guess_frontend_invalid():
with pytest.raises(TVMCException):
tvmc.frontends.guess_frontend("not/a/file.txt")
def test_load_model__invalid_path__no_language():
# some CI environments wont offer TFLite, so skip in case it is not present
pytest.importorskip("tflite")
with pytest.raises(FileNotFoundError):
tvmc.load("not/a/file.tflite")
def test_load_model__invalid_path__with_language():
# some CI environments wont offer onnx, so skip in case it is not present
pytest.importorskip("onnx")
with pytest.raises(FileNotFoundError):
tvmc.load("not/a/file.txt", model_format="onnx")
def test_load_model__tflite(tflite_mobilenet_v1_1_quant):
# some CI environments wont offer TFLite, so skip in case it is not present
pytest.importorskip("tflite")
tvmc_model = tvmc.load(tflite_mobilenet_v1_1_quant)
assert type(tvmc_model) is TVMCModel
assert type(tvmc_model.mod) is IRModule
assert type(tvmc_model.params) is dict
# check whether one known value is part of the params dict
assert "_param_1" in tvmc_model.params.keys()
@pytest.mark.parametrize("load_model_kwargs", [{}, {"layout": "NCHW"}])
def test_load_model__keras(keras_resnet50, load_model_kwargs):
# some CI environments wont offer TensorFlow/Keras, so skip in case it is not present
pytest.importorskip("tensorflow")
tvmc_model = tvmc.frontends.load_model(keras_resnet50, **load_model_kwargs)
assert type(tvmc_model) is TVMCModel
assert type(tvmc_model.mod) is IRModule
assert type(tvmc_model.params) is dict
## check whether one known value is part of the params dict
assert "_param_1" in tvmc_model.params.keys()
def verify_load_model__onnx(model, **kwargs):
tvmc_model = tvmc.frontends.load_model(model, **kwargs)
assert type(tvmc_model) is TVMCModel
assert type(tvmc_model.mod) is IRModule
assert type(tvmc_model.params) is dict
return tvmc_model
def test_load_model__onnx(onnx_resnet50):
# some CI environments wont offer onnx, so skip in case it is not present
pytest.importorskip("onnx")
tvmc_model = verify_load_model__onnx(onnx_resnet50, freeze_params=False)
# check whether one known value is part of the params dict
assert "resnetv24_batchnorm0_gamma" in tvmc_model.params.keys()
tvmc_model = verify_load_model__onnx(onnx_resnet50, freeze_params=True)
# check that the parameter dict is empty, implying that they have been folded into constants
assert tvmc_model.params == {}
def test_load_model__pb(pb_mobilenet_v1_1_quant):
# some CI environments wont offer TensorFlow, so skip in case it is not present
pytest.importorskip("tensorflow")
tvmc_model = tvmc.load(pb_mobilenet_v1_1_quant)
assert type(tvmc_model) is TVMCModel
assert type(tvmc_model.mod) is IRModule
assert type(tvmc_model.params) is dict
# check whether one known value is part of the params dict
assert "MobilenetV1/Conv2d_0/weights" in tvmc_model.params.keys()
def test_load_model__paddle(paddle_resnet50):
# some CI environments wont offer Paddle, so skip in case it is not present
pytest.importorskip("paddle")
tvmc_model = tvmc.load(paddle_resnet50, model_format="paddle")
assert type(tvmc_model) is TVMCModel
assert type(tvmc_model.mod) is IRModule
assert type(tvmc_model.params) is dict
def test_load_model__relay(relay_text_conv2d):
tvmc_model = tvmc.load(relay_text_conv2d, model_format="relay")
assert type(tvmc_model) is TVMCModel
assert type(tvmc_model.mod) is IRModule
assert type(tvmc_model.params) is dict
def test_load_model___wrong_language__to_keras(tflite_mobilenet_v1_1_quant):
# some CI environments wont offer TensorFlow/Keras, so skip in case it is not present
pytest.importorskip("tensorflow")
with pytest.raises(OSError):
tvmc.load(tflite_mobilenet_v1_1_quant, model_format="keras")
def test_load_model___wrong_language__to_tflite(keras_resnet50):
# some CI environments wont offer TFLite, so skip in case it is not present
pytest.importorskip("tflite")
with pytest.raises(TVMCException):
tvmc.frontends.load_model(keras_resnet50, model_format="tflite")
def test_load_model___wrong_language__to_onnx(tflite_mobilenet_v1_1_quant):
# some CI environments wont offer onnx, so skip in case it is not present
pytest.importorskip("onnx")
from google.protobuf.message import DecodeError
with pytest.raises(DecodeError):
tvmc.load(tflite_mobilenet_v1_1_quant, model_format="onnx")
@pytest.mark.skip(
reason="free(): invalid pointer error despite using llvm-config --link-static and -DHIDE_PRIVATE_SYMBOLS=ON",
)
def test_load_model__pth(pytorch_resnet18):
# some CI environments wont offer torch, so skip in case it is not present
pytest.importorskip("torch")
pytest.importorskip("torchvision")
tvmc_model = tvmc.load(pytorch_resnet18, shape_dict={"input": [1, 3, 224, 224]})
assert type(tvmc_model) is TVMCModel
assert type(tvmc_model.mod) is IRModule
assert type(tvmc_model.params) is dict
# check whether one known value is part of the params dict
assert "layer1.0.conv1.weight" in tvmc_model.params.keys()
@pytest.mark.skip(
reason="free(): invalid pointer error despite using llvm-config --link-static and -DHIDE_PRIVATE_SYMBOLS=ON",
)
def test_load_quantized_model__pth(pytorch_mobilenetv2_quantized):
# some CI environments wont offer torch, so skip in case it is not present
pytest.importorskip("torch")
pytest.importorskip("torchvision")
tvmc_model = tvmc.load(pytorch_mobilenetv2_quantized, shape_dict={"input": [1, 3, 224, 224]})
assert type(tvmc_model) is TVMCModel
assert type(tvmc_model.mod) is IRModule
assert type(tvmc_model.params) is dict
# checking weights remain quantized and are not float32
for p in tvmc_model.params.values():
assert p.dtype in ["int8", "uint8", "int32"] # int32 for bias
@pytest.mark.skipif(
platform.machine() == "aarch64",
reason="Currently failing on AArch64 - see https://github.com/apache/tvm/issues/10673",
)
def test_load_model___wrong_language__to_pytorch(tflite_mobilenet_v1_1_quant):
# some CI environments wont offer pytorch, so skip in case it is not present
pytest.importorskip("torch")
with pytest.raises(RuntimeError) as e:
tvmc.load(
tflite_mobilenet_v1_1_quant,
model_format="pytorch",
shape_dict={"input": [1, 3, 224, 224]},
)
def test_compile_tflite_module_nhwc_to_nchw(tflite_mobilenet_v1_1_quant):
# some CI environments wont offer TFLite, so skip in case it is not present
pytest.importorskip("tflite")
tvmc_model = tvmc.frontends.load_model(tflite_mobilenet_v1_1_quant)
before = tvmc_model.mod
expected_layout = "NCHW"
with tvm.transform.PassContext(opt_level=3):
after = tvmc.transform.convert_graph_layout(before, expected_layout)
layout_transform_calls = []
def _is_layout_transform(node):
if isinstance(node, tvm.relay.expr.Call):
layout_transform_calls.append(
node.op.name == "layout_transform"
and node.attrs.src_layout == "NHWC"
and node.attrs.dst_layout == "NCHW"
)
tvm.relay.analysis.post_order_visit(after["main"], _is_layout_transform)
assert any(layout_transform_calls), "Expected 'layout_transform NHWC->NCHW' not found"
def test_compile_onnx_module_nchw_to_nhwc(onnx_resnet50):
# some CI environments wont offer ONNX, so skip in case it is not present
pytest.importorskip("onnx")
tvmc_model = tvmc.frontends.load_model(onnx_resnet50)
before = tvmc_model.mod
expected_layout = "NHWC"
with tvm.transform.PassContext(opt_level=3):
after = tvmc.transform.convert_graph_layout(before, expected_layout)
layout_transform_calls = []
def _is_layout_transform(node):
if isinstance(node, tvm.relay.expr.Call):
layout_transform_calls.append(
node.op.name == "layout_transform"
and node.attrs.src_layout == "NCHW"
and node.attrs.dst_layout == "NHWC"
)
tvm.relay.analysis.post_order_visit(after["main"], _is_layout_transform)
assert any(layout_transform_calls), "Expected 'layout_transform NCWH->NHWC' not found"
def test_compile_paddle_module_nchw_to_nhwc(paddle_resnet50):
# some CI environments wont offer Paddle, so skip in case it is not present
pytest.importorskip("paddle")
tvmc_model = tvmc.frontends.load_model(paddle_resnet50, "paddle")
before = tvmc_model.mod
expected_layout = "NHWC"
with tvm.transform.PassContext(opt_level=3):
after = tvmc.transform.convert_graph_layout(before, expected_layout)
layout_transform_calls = []
def _is_layout_transform(node):
if isinstance(node, tvm.relay.expr.Call):
layout_transform_calls.append(
node.op.name == "layout_transform"
and node.attrs.src_layout == "NCHW"
and node.attrs.dst_layout == "NHWC"
)
tvm.relay.analysis.post_order_visit(after["main"], _is_layout_transform)
assert any(layout_transform_calls), "Expected 'layout_transform NCWH->NHWC' not found"
def test_compile_tflite_module__same_layout__nhwc_to_nhwc(tflite_mobilenet_v1_1_quant):
# some CI environments wont offer TFLite, so skip in case it is not present
pytest.importorskip("tflite")
tvmc_model = tvmc.frontends.load_model(tflite_mobilenet_v1_1_quant)
before = tvmc_model.mod
expected_layout = "NHWC"
with tvm.transform.PassContext(opt_level=3):
after = tvmc.transform.convert_graph_layout(before, expected_layout)
layout_transform_calls = []
def _is_layout_transform(node):
if isinstance(node, tvm.relay.expr.Call):
layout_transform_calls.append(
node.op.name == "layout_transform"
and node.attrs.src_layout == "NHWC"
and node.attrs.dst_layout == "NHWC"
)
tvm.relay.analysis.post_order_visit(after["main"], _is_layout_transform)
assert not any(layout_transform_calls), "Unexpected 'layout_transform' call"
def test_compile_onnx_module__same_layout__nchw_to_nchw(onnx_resnet50):
# some CI environments wont offer ONNX, so skip in case it is not present
pytest.importorskip("onnx")
tvmc_model = tvmc.frontends.load_model(onnx_resnet50)
before = tvmc_model.mod
expected_layout = "NCHW"
with tvm.transform.PassContext(opt_level=3):
after = tvmc.transform.convert_graph_layout(before, expected_layout)
layout_transform_calls = []
def _is_layout_transform(node):
if isinstance(node, tvm.relay.expr.Call):
layout_transform_calls.append(
node.op.name == "layout_transform"
and node.attrs.src_layout == "NCHW"
and node.attrs.dst_layout == "NCHW"
)
tvm.relay.analysis.post_order_visit(after["main"], _is_layout_transform)
assert not any(layout_transform_calls), "Unexpected 'layout_transform' call"
def test_import_keras_friendly_message(keras_resnet50, monkeypatch):
# keras is part of tensorflow
monkeypatch.setattr("importlib.import_module", mock_error_on_name("tensorflow"))
with pytest.raises(TVMCImportError, match="tensorflow") as e:
_ = tvmc.frontends.load_model(keras_resnet50, model_format="keras")
def test_import_onnx_friendly_message(onnx_resnet50, monkeypatch):
monkeypatch.setattr("importlib.import_module", mock_error_on_name("onnx"))
with pytest.raises(TVMCImportError, match="onnx") as e:
_ = tvmc.frontends.load_model(onnx_resnet50, model_format="onnx")
def test_import_tensorflow_friendly_message(pb_mobilenet_v1_1_quant, monkeypatch):
monkeypatch.setattr("importlib.import_module", mock_error_on_name("tensorflow"))
with pytest.raises(TVMCImportError, match="tensorflow") as e:
_ = tvmc.frontends.load_model(pb_mobilenet_v1_1_quant, model_format="pb")
@pytest.mark.skipif(
platform.machine() == "aarch64",
reason="Currently failing on AArch64 - see https://github.com/apache/tvm/issues/10673",
)
def test_import_torch_friendly_message(pytorch_resnet18, monkeypatch):
monkeypatch.setattr("importlib.import_module", mock_error_on_name("torch"))
with pytest.raises(TVMCImportError, match="torch") as e:
_ = tvmc.frontends.load_model(pytorch_resnet18, model_format="pytorch")
def test_import_tflite_friendly_message(tflite_mobilenet_v1_1_quant, monkeypatch):
monkeypatch.setattr("importlib.import_module", mock_error_on_name("tflite.Model"))
with pytest.raises(TVMCImportError, match="tflite.Model") as e:
_ = tvmc.frontends.load_model(tflite_mobilenet_v1_1_quant, model_format="tflite")
| 16,710 | 35.328261 | 113 | py |
tvm | tvm-main/tests/python/driver/tvmc/test_autoscheduler.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import platform
import pytest
import os
from os import path
from tvm import auto_scheduler
from tvm.driver import tvmc
def _get_tasks(model):
tvmc_model = tvmc.frontends.load_model(model)
tasks, weights = tvmc.autotuner.autoscheduler_get_tuning_tasks(
tvmc_model.mod, tvmc_model.params, "llvm"
)
return (tasks, weights)
def _autoscheduler_test_helper(model, tmpdir_name, early_stopping=1, prior_records=None):
tvmc_model = tvmc.frontends.load_model(model)
log_file = os.path.join(tmpdir_name, "autoscheduler.json")
hardware_params = auto_scheduler.HardwareParams(num_cores=4, target="llvm")
tvmc.tune(
tvmc_model,
target="llvm",
tuning_records=log_file,
prior_records=prior_records,
early_stopping=early_stopping,
enable_autoscheduler=True,
trials=2,
hardware_params=hardware_params,
)
# testing whether the log file was produced
assert path.exists(log_file), "autoscheduler log file should exist"
with auto_scheduler.ApplyHistoryBest(log_file) as best:
assert isinstance(
best, auto_scheduler.dispatcher.ApplyHistoryBest
), "unable to load the best results of tuning"
return log_file
def test_get_tuning_tasks(keras_simple):
pytest.importorskip("tensorflow")
tasks, weights = _get_tasks(keras_simple)
expected_task_type = auto_scheduler.SearchTask
assert type(tasks) is list
assert len(tasks) > 0
assert all([type(x) is expected_task_type for x in tasks]) is True
@pytest.mark.skipif(
platform.machine() == "aarch64",
reason="Currently failing on AArch64 - see https://github.com/apache/tvm/issues/10673",
)
def test_tune_tasks(keras_simple, tmpdir_factory):
pytest.importorskip("tensorflow")
tmpdir_name = tmpdir_factory.mktemp("data")
_autoscheduler_test_helper(keras_simple, tmpdir_name)
@pytest.mark.skipif(
platform.machine() == "aarch64",
reason="Currently failing on AArch64 - see https://github.com/apache/tvm/issues/10673",
)
def test_tune_tasks__tuning_records(keras_simple, tmpdir_factory):
pytest.importorskip("tensorflow")
tmpdir_name = tmpdir_factory.mktemp("data")
output_log_phase_1 = _autoscheduler_test_helper(keras_simple, tmpdir_name)
# Exercises transfer learning by making sure a previous log exists
_autoscheduler_test_helper(keras_simple, tmpdir_name, prior_records=output_log_phase_1)
@pytest.mark.skipif(
platform.machine() == "aarch64",
reason="Currently failing on AArch64 - see https://github.com/apache/tvm/issues/10673",
)
def test_tune_tasks__no_early_stopping(keras_simple, tmpdir_factory):
pytest.importorskip("tensorflow")
tmpdir_name = tmpdir_factory.mktemp("data")
_autoscheduler_test_helper(keras_simple, tmpdir_name, early_stopping=None)
| 3,632 | 32.638889 | 91 | py |
tvm | tvm-main/tests/python/driver/tvmc/test_model.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import platform
import pytest
import os
import numpy as np
from os import path
from tvm.driver import tvmc
from tvm.driver.tvmc.model import TVMCModel, TVMCPackage, TVMCResult
from tvm.runtime.module import BenchmarkResult
@pytest.mark.skipif(
platform.machine() == "aarch64",
reason="Currently failing on AArch64 - see https://github.com/apache/tvm/issues/10673",
)
@pytest.mark.parametrize("use_vm", [True, False])
def test_tvmc_workflow(use_vm, keras_simple):
pytest.importorskip("tensorflow")
import tensorflow as tf
# Reset so the input name remains consistent across unit test runs
tf.keras.backend.clear_session()
tvmc_model = tvmc.load(keras_simple)
tuning_records = tvmc.tune(tvmc_model, target="llvm", enable_autoscheduler=True, trials=2)
tvmc_package = tvmc.compile(
tvmc_model, tuning_records=tuning_records, target="llvm", use_vm=use_vm
)
input_dict = {"input_1": np.random.uniform(size=(1, 32, 32, 3)).astype("float32")}
result = tvmc.run(
tvmc_package, device="cpu", end_to_end=True, benchmark=True, inputs=input_dict
)
assert type(tvmc_model) is TVMCModel
assert type(tvmc_package) is TVMCPackage
assert type(result) is TVMCResult
assert path.exists(tuning_records)
assert type(result.outputs) is dict
assert type(result.times) is BenchmarkResult
assert "output_0" in result.outputs.keys()
@pytest.mark.skipif(
platform.machine() == "aarch64",
reason="Currently failing on AArch64 - see https://github.com/apache/tvm/issues/10673",
)
@pytest.mark.parametrize("use_vm", [True, False])
def test_save_load_model(use_vm, keras_simple, tmpdir_factory):
pytest.importorskip("onnx")
tmpdir = tmpdir_factory.mktemp("data")
tvmc_model = tvmc.load(keras_simple)
# Create tuning artifacts
tvmc.tune(tvmc_model, target="llvm", trials=2)
# Create package artifacts
tvmc.compile(tvmc_model, target="llvm", use_vm=use_vm)
# Save the model to disk
model_path = os.path.join(tmpdir, "saved_model.tar")
tvmc_model.save(model_path)
# Load the model into a new TVMCModel
new_tvmc_model = TVMCModel(model_path=model_path)
# Check that the two models match.
assert str(new_tvmc_model.mod) == str(tvmc_model.mod)
# Check that tuning records and the compiled package are recoverable.
assert path.exists(new_tvmc_model.default_package_path())
assert path.exists(new_tvmc_model.default_tuning_records_path())
| 3,271 | 35.764045 | 94 | py |
tvm | tvm-main/tests/python/driver/tvmc/test_compiler.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import re
import numpy as np
import shutil
import tarfile
from os import path
from unittest import mock
import pytest
import tvm
from tvm.ir.memory_pools import WorkspacePoolInfo, WorkspaceMemoryPools
from tvm.target import Target
import tvm.testing
from tvm.relay.op.contrib.ethosn import ethosn_available
from tvm.relay.backend import Runtime, Executor
from tvm import relay
from tvm.contrib.target.vitis_ai import vitis_ai_available
from tvm.driver import tvmc
from tvm.driver.tvmc.model import TVMCPackage
from tvm.contrib import utils
def test_save_dumps(tmpdir_factory):
tmpdir = tmpdir_factory.mktemp("data")
dump_formats = {"relay": "fake relay", "tir": "fake tir", "ll": "fake llvm", "asm": "fake asm"}
tvmc.compiler.save_dumps("fake_module", dump_formats, dump_root=tmpdir)
assert path.exists("{}/{}".format(tmpdir, "fake_module.ll"))
assert path.exists("{}/{}".format(tmpdir, "fake_module.asm"))
assert path.exists("{}/{}".format(tmpdir, "fake_module.tir"))
assert path.exists("{}/{}".format(tmpdir, "fake_module.relay"))
def test_save_dump_offloads_ethosu(tmp_path_factory):
tflite = pytest.importorskip("tflite")
tensorflow = pytest.importorskip("tensorflow")
pytest.importorskip("ethosu.vela")
import tensorflow as tf
import tflite.Model
from tvm.driver.tvmc.model import TVMCModel
inp = (224, 224, 9)
input_shape = (1, *inp)
kernel_shape = (3, 3)
padding = (1, 1, 1, 1)
padding_out = (1, 33, 33, 1)
@tf.function
def simple_net(x):
weight_shape = [kernel_shape[0], kernel_shape[1], input_shape[3], 3]
weights = tf.constant(np.random.uniform(size=weight_shape), dtype=tf.float32)
weight_shape[2] = 3
weights1 = tf.constant(np.random.uniform(size=weight_shape), dtype=tf.float32)
weights2 = tf.constant(np.random.uniform(size=weight_shape), dtype=tf.float32)
op = tf.nn.conv2d(
x,
filters=weights,
strides=1,
padding="SAME",
data_format="NHWC",
dilations=1,
)
op1 = tf.nn.conv2d(
op,
filters=weights1,
strides=1,
padding="SAME",
data_format="NHWC",
dilations=1,
)
op2 = tf.nn.conv2d(
op,
filters=weights2,
strides=1,
padding="SAME",
data_format="NHWC",
dilations=1,
)
op = tf.concat([op1, op2], 1)
op = tf.pad(
op,
[[0, 0], [padding[0], padding_out[1]], [padding_out[2], padding[3]], [0, 0]],
"CONSTANT",
)
return op
from tests.python.contrib.test_ethosu.infra import get_tflite_graph
_, tflite_graph = get_tflite_graph(simple_net, [input_shape])
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
mod, params = relay.frontend.from_tflite(tflite_model)
tvmc_model = TVMCModel(mod, params)
output_dir = tmp_path_factory.mktemp("tmp")
output_file_name = os.path.join(str(output_dir), "list.txt")
tvmc.compiler.compile_model(
tvmc_model,
target="ethos-u,cmsis-nn,c",
runtime=Runtime("crt"),
tuning_records="",
package_path="module.tar",
executor=Executor("aot", {"unpacked-api": 1, "interface-api": "c", "link-params": True}),
cross="",
cross_options="",
output_format="mlf",
dump_offloads=output_file_name,
disabled_pass=[""],
pass_context_configs=[
"tir.disable_vectorize=1",
"tir.usmp.enable=1",
"tir.usmp.algorithm=hill_climb",
"tir.disable_storage_rewrite=1",
"relay.frontend.fill_span=1",
],
additional_target_options={
"c": {"mcpu": "cortex-m55"},
"cmsis-nn": {"mcpu": "cortex-m55"},
"ethos-u": {
"accelerator_config": "ethos-u55-256",
},
},
)
expected = [
r"Total number of operators and distribution by targets",
r"Total: 11",
r"ethos-u: 10",
r"generic: 1",
r"",
r"ethos-u <- ethos-u.qnn_conv2d",
r'ethos-u <- %0 = qnn.conv2d(%x, %v_param_1, -128, 0, 0.00392157f, meta[relay.Constant][0], padding=[1, 1, 1, 1], channels=3, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="HWIO", out_dtype="int32")',
r"ethos-u <- %1 = nn.bias_add(%0, %v_param_2, axis=3)",
r'ethos-u <- %2 = qnn.requantize(%1, meta[relay.Constant][1], 0, 0.11364f, -128, axis=3, out_dtype="int8")',
r"ethos-u <- ethos-u.qnn_conv2d",
r'ethos-u <- %3 = qnn.conv2d(%2, %v_param_3, -128, 0, 0.11364f, meta[relay.Constant][2], padding=[1, 1, 1, 1], channels=3, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="HWIO", out_dtype="int32")',
r"ethos-u <- %4 = nn.bias_add(%3, %v_param_4, axis=3)",
r'ethos-u <- %7 = qnn.requantize(%4, meta[relay.Constant][3], 0, 1.56803f, -128, axis=3, out_dtype="int8")',
r"ethos-u <- ethos-u.qnn_conv2d",
r'ethos-u <- %5 = qnn.conv2d(%2, %v_param_5, -128, 0, 0.11364f, meta[relay.Constant][4], padding=[1, 1, 1, 1], channels=3, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="HWIO", out_dtype="int32")',
r"ethos-u <- %6 = nn.bias_add(%5, %v_param_6, axis=3)",
r'ethos-u <- %8 = qnn.requantize(%6, meta[relay.Constant][5], 0, 1.20538f, -128, axis=3, out_dtype="int8")',
r" %9 = (%7, %8)",
r" %10 = (1.59778f, 1.59778f)",
r" %11 = (-128, -128)",
r"ethos-u <- ethos-u.concat",
r"ethos-u <- %12 = qnn.concatenate(%9, %10, %11, 1.59778f, -128, axis=1)",
r"generic <- nn.pad(%12, -128f, pad_width=[[0, 0], [1, 33], [33, 1], [0, 0]])",
]
file_path = os.path.abspath(output_file_name)
# check that file file_path was created
assert os.path.exists(file_path)
with open(file_path, "r") as f:
for i, file_string in enumerate(f):
r_output = re.search(r"(.*)\(", file_string.strip(), re.DOTALL)
r_expected = re.search(r"(.*)\(", expected[i].strip(), re.DOTALL)
# check that there is the same sequence of operations and composites,
# combined with target names
if r_output and r_expected:
assert r_output.group(0) == r_expected.group(0)
else:
assert r_output == r_expected
def test_save_dump_offloads_cmsis(tmp_path_factory):
tflite = pytest.importorskip("tflite")
tensorflow = pytest.importorskip("tensorflow")
pytest.importorskip("ethosu.vela")
import tensorflow as tf
from tvm.driver.tvmc.model import TVMCModel
inp = (224, 224, 9)
input_shape = (1, *inp)
kernel_shape = (3, 3)
padding = (1, 1, 1, 1)
padding_out = (1, 33, 33, 1)
@tf.function
def simple_net(x):
weight_shape = [kernel_shape[0], kernel_shape[1], input_shape[3], 3]
weights = tf.constant(np.random.uniform(size=weight_shape), dtype=tf.float32)
weight_shape[2] = 3
weights1 = tf.constant(np.random.uniform(size=weight_shape), dtype=tf.float32)
weights2 = tf.constant(np.random.uniform(size=weight_shape), dtype=tf.float32)
op = tf.nn.conv2d(
x,
filters=weights,
strides=1,
padding="SAME",
data_format="NHWC",
dilations=1,
)
op1 = tf.nn.conv2d(
op,
filters=weights1,
strides=1,
padding="SAME",
data_format="NHWC",
dilations=1,
)
op2 = tf.nn.conv2d(
op,
filters=weights2,
strides=1,
padding="SAME",
data_format="NHWC",
dilations=1,
)
op = tf.concat([op1, op2], 1)
op = tf.pad(
op,
[[0, 0], [padding[0], padding_out[1]], [padding_out[2], padding[3]], [0, 0]],
"CONSTANT",
)
return op
from tests.python.contrib.test_ethosu.infra import get_tflite_graph
_, tflite_graph = get_tflite_graph(simple_net, [input_shape])
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
mod, params = relay.frontend.from_tflite(tflite_model)
tvmc_model = TVMCModel(mod, params)
output_dir = tmp_path_factory.mktemp("tmp")
output_file_name = os.path.join(str(output_dir), "list.txt")
tvmc.compiler.compile_model(
tvmc_model,
target="cmsis-nn,c",
runtime=Runtime("crt"),
tuning_records="",
package_path="module.tar",
executor=Executor("aot", {"unpacked-api": 1, "interface-api": "c", "link-params": True}),
cross="",
cross_options="",
output_format="mlf",
dump_offloads=output_file_name,
disabled_pass=[""],
pass_context_configs=[
"tir.disable_vectorize=1",
"tir.usmp.enable=1",
"tir.usmp.algorithm=hill_climb",
"tir.disable_storage_rewrite=1",
"relay.frontend.fill_span=1",
],
additional_target_options={
"c": {"mcpu": "cortex-m55"},
"cmsis-nn": {"mcpu": "cortex-m55"},
},
)
expected = [
r"Total number of operators and distribution by targets",
r"Total: 11",
r"cmsis-nn: 9",
r"generic: 2",
r"",
r"cmsis-nn <- cmsis-nn.qnn_conv2d",
r'cmsis-nn <- %0 = qnn.conv2d(%x, %v_param_1, -128, 0, 0.00392157f, meta[relay.Constant][0], padding=[1, 1, 1, 1], channels=3, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="HWIO", out_dtype="int32")',
r"cmsis-nn <- %1 = nn.bias_add(%0, %v_param_2, axis=3)",
r'cmsis-nn <- %2 = qnn.requantize(%1, meta[relay.Constant][1], 0, 0.115114f, -128, axis=3, out_dtype="int8")',
r"cmsis-nn <- cmsis-nn.qnn_conv2d",
r'cmsis-nn <- %3 = qnn.conv2d(%2, %v_param_3, -128, 0, 0.115114f, meta[relay.Constant][2], padding=[1, 1, 1, 1], channels=3, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="HWIO", out_dtype="int32")',
r"cmsis-nn <- %4 = nn.bias_add(%3, %v_param_4, axis=3)",
r'cmsis-nn <- %7 = qnn.requantize(%4, meta[relay.Constant][3], 0, 1.59328f, -128, axis=3, out_dtype="int8")',
r"cmsis-nn <- cmsis-nn.qnn_conv2d",
r'cmsis-nn <- %5 = qnn.conv2d(%2, %v_param_5, -128, 0, 0.115114f, meta[relay.Constant][4], padding=[1, 1, 1, 1], channels=3, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="HWIO", out_dtype="int32")',
r"cmsis-nn <- %6 = nn.bias_add(%5, %v_param_6, axis=3)",
r'cmsis-nn <- %8 = qnn.requantize(%6, meta[relay.Constant][5], 0, 1.59328f, -128, axis=3, out_dtype="int8")',
r" %9 = (%7, %8)",
r" %10 = (1.59328f, 1.59328f)",
r" %11 = (-128, -128)",
r"generic <- %12 = qnn.concatenate(%9, %10, %11, 1.59328f, -128, axis=1)",
r"generic <- nn.pad(%12, -128f, pad_width=[[0, 0], [1, 33], [33, 1], [0, 0]])",
]
file_path = os.path.abspath(output_file_name)
# check that file file_path was created
assert os.path.exists(file_path)
with open(file_path, "r") as f:
for i, file_string in enumerate(f):
r_output = re.search(r"(.*)\(", file_string.replace("\n", ""), re.DOTALL)
r_expected = re.search(r"(.*)\(", expected[i], re.DOTALL)
# check that there is the same sequence of operations and composites,
# combined with target names
if r_output and r_expected:
assert r_output.group(0) == r_expected.group(0)
else:
assert file_string.replace("\n", "") == expected[i]
def test_save_dump_offloads_generic(tmp_path_factory):
tflite = pytest.importorskip("tflite")
tensorflow = pytest.importorskip("tensorflow")
pytest.importorskip("ethosu.vela")
import tensorflow as tf
from tvm.driver.tvmc.model import TVMCModel
inp = (224, 224, 9)
input_shape = (1, *inp)
kernel_shape = (3, 3)
padding = (1, 1, 1, 1)
padding_out = (1, 33, 33, 1)
@tf.function
def simple_net(x):
weight_shape = [kernel_shape[0], kernel_shape[1], input_shape[3], 3]
weights = tf.constant(np.random.uniform(size=weight_shape), dtype=tf.float32)
weight_shape[2] = 3
weights1 = tf.constant(np.random.uniform(size=weight_shape), dtype=tf.float32)
weights2 = tf.constant(np.random.uniform(size=weight_shape), dtype=tf.float32)
op = tf.nn.conv2d(
x,
filters=weights,
strides=1,
padding="SAME",
data_format="NHWC",
dilations=1,
)
op1 = tf.nn.conv2d(
op,
filters=weights1,
strides=1,
padding="SAME",
data_format="NHWC",
dilations=1,
)
op2 = tf.nn.conv2d(
op,
filters=weights2,
strides=1,
padding="SAME",
data_format="NHWC",
dilations=1,
)
op = tf.concat([op1, op2], 1)
op = tf.pad(
op,
[[0, 0], [padding[0], padding_out[1]], [padding_out[2], padding[3]], [0, 0]],
"CONSTANT",
)
return op
from tests.python.contrib.test_ethosu.infra import get_tflite_graph
_, tflite_graph = get_tflite_graph(simple_net, [input_shape])
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
mod, params = relay.frontend.from_tflite(tflite_model)
tvmc_model = TVMCModel(mod, params)
output_dir = tmp_path_factory.mktemp("tmp")
output_file_name = os.path.join(str(output_dir), "list.txt")
tvmc.compiler.compile_model(
tvmc_model,
target="c",
runtime=Runtime("crt"),
tuning_records="",
package_path="module.tar",
executor=Executor("aot", {"unpacked-api": 1, "interface-api": "c", "link-params": True}),
cross="",
cross_options="",
output_format="mlf",
dump_offloads=output_file_name,
disabled_pass=[""],
pass_context_configs=[
"tir.disable_vectorize=1",
"tir.usmp.enable=1",
"tir.usmp.algorithm=hill_climb",
"tir.disable_storage_rewrite=1",
"relay.frontend.fill_span=1",
],
additional_target_options={
"c": {"mcpu": "cortex-m55"},
},
)
expected = [
r"Total number of operators and distribution by targets",
r"Total: 11",
r"generic: 11",
r"",
r'generic <- %0 = qnn.conv2d(%x, %v_param_1, -128, 0, 0.00392157f, meta[relay.Constant][0], padding=[1, 1, 1, 1], channels=3, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="HWIO", out_dtype="int32")',
r"generic <- %1 = nn.bias_add(%0, %v_param_2, axis=3)",
r'generic <- %2 = qnn.requantize(%1, meta[relay.Constant][1], 0, 0.109484f, -128, axis=3, out_dtype="int8")',
r'generic <- %3 = qnn.conv2d(%2, %v_param_3, -128, 0, 0.109484f, meta[relay.Constant][2], padding=[1, 1, 1, 1], channels=3, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="HWIO", out_dtype="int32")',
r"generic <- %4 = nn.bias_add(%3, %v_param_4, axis=3)",
r'generic <- %5 = qnn.conv2d(%2, %v_param_5, -128, 0, 0.109484f, meta[relay.Constant][4], padding=[1, 1, 1, 1], channels=3, kernel_size=[3, 3], data_layout="NHWC", kernel_layout="HWIO", out_dtype="int32")',
r"generic <- %6 = nn.bias_add(%5, %v_param_6, axis=3)",
r'generic <- %7 = qnn.requantize(%4, meta[relay.Constant][3], 0, 1.45572f, -128, axis=3, out_dtype="int8")',
r'generic <- %8 = qnn.requantize(%6, meta[relay.Constant][5], 0, 1.45572f, -128, axis=3, out_dtype="int8")',
r" %9 = (%7, %8)",
r" %10 = (1.45572f, 1.45572f)",
r" %11 = (-128, -128)",
r"generic <- %12 = qnn.concatenate(%9, %10, %11, 1.45572f, -128, axis=1)",
r"generic <- nn.pad(%12, -128f, pad_width=[[0, 0], [1, 33], [33, 1], [0, 0]])",
]
file_path = os.path.abspath(output_file_name)
# check that file file_path was created
assert os.path.exists(file_path)
with open(file_path, "r") as f:
for i, file_string in enumerate(f):
r_output = re.search(r"(.*)\(", file_string.replace("\n", ""), re.DOTALL)
r_expected = re.search(r"(.*)\(", expected[i], re.DOTALL)
# check that there is the same sequence of operations and composites,
# combined with target names
if r_output and r_expected:
assert r_output.group(0) == r_expected.group(0)
else:
assert file_string.replace("\n", "") == expected[i]
# End to end tests for compilation
def verify_tvmc_package(tvmc_package, dumps_path, use_vm=False):
# check for output types
assert type(tvmc_package) is TVMCPackage
assert os.path.exists(dumps_path)
assert type(tvmc_package.lib_path) is str
if use_vm:
assert tvmc_package.graph is None
assert tvmc_package.params is None
else:
assert type(tvmc_package.graph) is str
assert type(tvmc_package.params) is bytearray
def verify_compile_tflite_module(model, shape_dict=None, use_vm=False):
pytest.importorskip("tflite")
tvmc_model = tvmc.load(model, shape_dict=shape_dict)
tvmc_package = tvmc.compile(
tvmc_model,
target="llvm",
dump_code="ll",
desired_layout="NCHW",
use_vm=use_vm,
)
dumps_path = tvmc_package.package_path + ".ll"
verify_tvmc_package(tvmc_package, dumps_path, use_vm=use_vm)
@pytest.mark.parametrize("use_vm", [True, False])
def test_compile_tflite_module(use_vm, tflite_mobilenet_v1_1_quant):
# some CI environments wont offer tflite, so skip in case it is not present
pytest.importorskip("tflite")
# Check default compilation.
verify_compile_tflite_module(tflite_mobilenet_v1_1_quant)
# Check with manual shape override
shape_string = "input:[1,224,224,3]"
shape_dict = tvmc.shape_parser.parse_shape_string(shape_string)
verify_compile_tflite_module(tflite_mobilenet_v1_1_quant, shape_dict, use_vm=use_vm)
def test_single_tir_dump(tflite_mobilenet_v1_1_quant):
pytest.importorskip("tflite")
tvmc_model = tvmc.load(tflite_mobilenet_v1_1_quant)
tvmc_package = tvmc.compile(tvmc_model, target="llvm", dump_code="tir")
dumps_path = tvmc_package.package_path + ".tir"
assert os.path.exists(dumps_path)
with open(dumps_path) as f:
assert "tir" in f.read()
def test_code_dumps(tflite_mobilenet_v1_1_quant):
pytest.importorskip("tflite")
tvmc_model = tvmc.load(tflite_mobilenet_v1_1_quant)
dump_code = ["asm", "ll", "tir", "relay"]
tvmc_package = tvmc.compile(tvmc_model, target="llvm", dump_code=dump_code)
for ext in dump_code:
dumps_path = tvmc_package.package_path + "." + ext
assert os.path.exists(dumps_path)
with open(dumps_path) as f:
assert len(f.read()) > 0
# This test will be skipped if the AArch64 cross-compilation toolchain is not installed.
@pytest.mark.skipif(
not shutil.which("aarch64-linux-gnu-gcc"), reason="cross-compilation toolchain not installed"
)
def test_cross_compile_aarch64_tflite_module(tflite_mobilenet_v1_1_quant):
pytest.importorskip("tflite")
tvmc_model = tvmc.load(tflite_mobilenet_v1_1_quant)
tvmc_package = tvmc.compile(
tvmc_model,
target="llvm -device=arm_cpu -mtriple=aarch64-linux-gnu -mattr='+neon'",
dump_code="asm",
cross="aarch64-linux-gnu-gcc",
)
dumps_path = tvmc_package.package_path + ".asm"
# check for output types
assert type(tvmc_package) is TVMCPackage
assert type(tvmc_package.graph) is str
assert type(tvmc_package.lib_path) is str
assert type(tvmc_package.params) is bytearray
assert os.path.exists(dumps_path)
# This test will be skipped if the AArch64 cross-compilation toolchain is not installed.
@pytest.mark.skipif(
not shutil.which("aarch64-linux-gnu-gcc"), reason="cross-compilation toolchain not installed"
)
def test_cross_compile_options_aarch64_tflite_module(tflite_mobilenet_v1_1_quant):
pytest.importorskip("tflite")
fake_sysroot_dir = utils.tempdir().relpath("")
tvmc_model = tvmc.load(tflite_mobilenet_v1_1_quant)
tvmc_package = tvmc.compile(
tvmc_model,
target="llvm -device=arm_cpu -mtriple=aarch64-linux-gnu -mattr='+neon'",
dump_code="asm",
cross="aarch64-linux-gnu-gcc",
cross_options="--sysroot=" + fake_sysroot_dir,
)
dumps_path = tvmc_package.package_path + ".asm"
# check for output types
assert type(tvmc_package) is TVMCPackage
assert type(tvmc_package.graph) is str
assert type(tvmc_package.lib_path) is str
assert type(tvmc_package.params) is bytearray
assert os.path.exists(dumps_path)
def test_compile_keras__save_module(keras_resnet50, tmpdir_factory):
# some CI environments wont offer tensorflow/Keras, so skip in case it is not present
pytest.importorskip("tensorflow")
expected_temp_dir = tmpdir_factory.mktemp("saved_output")
expected_file_name = "saved.tar"
module_file = os.path.join(expected_temp_dir, expected_file_name)
tvmc_model = tvmc.load(keras_resnet50)
tvmc.compile(tvmc_model, target="llvm", dump_code="ll", package_path=module_file)
assert os.path.exists(module_file), "output file {0} should exist".format(module_file)
# Test that we can load back in a module.
tvmc_package = TVMCPackage(package_path=module_file)
assert type(tvmc_package.lib_path) is str
assert type(tvmc_package.graph) is str
assert type(tvmc_package.params) is bytearray
# This test will be skipped if the AArch64 cross-compilation toolchain is not installed.
@pytest.mark.skipif(
not shutil.which("aarch64-linux-gnu-gcc"), reason="cross-compilation toolchain not installed"
)
def test_cross_compile_aarch64_keras_module(keras_resnet50):
# some CI environments wont offer tensorflow/Keras, so skip in case it is not present
pytest.importorskip("tensorflow")
tvmc_model = tvmc.load(keras_resnet50)
tvmc_package = tvmc.compile(
tvmc_model,
target="llvm -device=arm_cpu -mtriple=aarch64-linux-gnu -mattr='+neon'",
dump_code="asm",
cross="aarch64-linux-gnu-gcc",
)
dumps_path = tvmc_package.package_path + ".asm"
# check for output types
assert type(tvmc_package) is TVMCPackage
assert type(tvmc_package.graph) is str
assert type(tvmc_package.lib_path) is str
assert type(tvmc_package.params) is bytearray
assert os.path.exists(dumps_path)
# This test will be skipped if the AArch64 cross-compilation toolchain is not installed.
@pytest.mark.skipif(
not shutil.which("aarch64-linux-gnu-gcc"), reason="cross-compilation toolchain not installed"
)
def test_cross_compile_options_aarch64_keras_module(keras_resnet50):
# some CI environments wont offer tensorflow/Keras, so skip in case it is not present
pytest.importorskip("tensorflow")
fake_sysroot_dir = utils.tempdir().relpath("")
tvmc_model = tvmc.load(keras_resnet50)
tvmc_package = tvmc.compile(
tvmc_model,
target="llvm -device=arm_cpu -mtriple=aarch64-linux-gnu -mattr='+neon'",
dump_code="asm",
cross="aarch64-linux-gnu-gcc",
cross_options="--sysroot=" + fake_sysroot_dir,
)
dumps_path = tvmc_package.package_path + ".asm"
# check for output types
assert type(tvmc_package) is TVMCPackage
assert type(tvmc_package.graph) is str
assert type(tvmc_package.lib_path) is str
assert type(tvmc_package.params) is bytearray
assert os.path.exists(dumps_path)
def verify_compile_onnx_module(model, shape_dict=None, use_vm=False):
# some CI environments wont offer onnx, so skip in case it is not present
pytest.importorskip("onnx")
tvmc_model = tvmc.load(model, shape_dict=shape_dict)
tvmc_package = tvmc.compile(tvmc_model, target="llvm", dump_code="ll", use_vm=use_vm)
dumps_path = tvmc_package.package_path + ".ll"
verify_tvmc_package(tvmc_package, dumps_path, use_vm=use_vm)
@pytest.mark.parametrize("use_vm", [True, False])
def test_compile_onnx_module(use_vm, onnx_resnet50):
# Test default compilation
verify_compile_onnx_module(onnx_resnet50)
# Test with manual shape dict
shape_string = "data:[1,3,200,200]"
shape_dict = tvmc.shape_parser.parse_shape_string(shape_string)
verify_compile_onnx_module(onnx_resnet50, shape_dict, use_vm=use_vm)
# This test will be skipped if the AArch64 cross-compilation toolchain is not installed.
@pytest.mark.skipif(
not shutil.which("aarch64-linux-gnu-gcc"), reason="cross-compilation toolchain not installed"
)
def test_cross_compile_aarch64_onnx_module(onnx_resnet50):
# some CI environments wont offer onnx, so skip in case it is not present
pytest.importorskip("onnx")
tvmc_model = tvmc.load(onnx_resnet50)
tvmc_package = tvmc.compile(
tvmc_model,
target="llvm -device=arm_cpu -mtriple=aarch64-linux-gnu -mattr=+neon",
dump_code="asm",
cross="aarch64-linux-gnu-gcc",
)
dumps_path = tvmc_package.package_path + ".asm"
# check for output types
assert type(tvmc_package) is TVMCPackage
assert type(tvmc_package.graph) is str
assert type(tvmc_package.lib_path) is str
assert type(tvmc_package.params) is bytearray
assert os.path.exists(dumps_path)
# This test will be skipped if the AArch64 cross-compilation toolchain is not installed.
@pytest.mark.skipif(
not shutil.which("aarch64-linux-gnu-gcc"), reason="cross-compilation toolchain not installed"
)
def test_cross_compile_options_aarch64_onnx_module(onnx_resnet50):
# some CI environments wont offer onnx, so skip in case it is not present
pytest.importorskip("onnx")
fake_sysroot_dir = utils.tempdir().relpath("")
tvmc_model = tvmc.load(onnx_resnet50)
tvmc_package = tvmc.compile(
tvmc_model,
target="llvm -device=arm_cpu -mtriple=aarch64-linux-gnu -mattr=+neon",
dump_code="asm",
cross="aarch64-linux-gnu-gcc",
cross_options="--sysroot=" + fake_sysroot_dir,
)
dumps_path = tvmc_package.package_path + ".asm"
# check for output types
assert type(tvmc_package) is TVMCPackage
assert type(tvmc_package.graph) is str
assert type(tvmc_package.lib_path) is str
assert type(tvmc_package.params) is bytearray
assert os.path.exists(dumps_path)
def verify_compile_paddle_module(model, shape_dict=None):
pytest.importorskip("paddle")
tvmc_model = tvmc.load(model, "paddle", shape_dict=shape_dict)
tvmc_package = tvmc.compile(tvmc_model, target="llvm", dump_code="ll", desired_layout="NCHW")
dumps_path = tvmc_package.package_path + ".ll"
# check for output types
assert type(tvmc_package) is TVMCPackage
assert type(tvmc_package.graph) is str
assert type(tvmc_package.lib_path) is str
assert type(tvmc_package.params) is bytearray
assert os.path.exists(dumps_path)
def test_compile_paddle_module(paddle_resnet50):
# some CI environments wont offer Paddle, so skip in case it is not present
pytest.importorskip("paddle")
# Check default compilation.
verify_compile_paddle_module(paddle_resnet50)
# Check with manual shape override
shape_string = "inputs:[1,3,224,224]"
shape_dict = tvmc.shape_parser.parse_shape_string(shape_string)
verify_compile_paddle_module(paddle_resnet50, shape_dict)
# This test will be skipped if the AArch64 cross-compilation toolchain is not installed.
@pytest.mark.skipif(
not shutil.which("aarch64-linux-gnu-gcc"), reason="cross-compilation toolchain not installed"
)
def test_cross_compile_aarch64_paddle_module(paddle_resnet50):
# some CI environments wont offer paddle, so skip in case it is not present
pytest.importorskip("paddle")
tvmc_model = tvmc.load(paddle_resnet50, "paddle")
tvmc_package = tvmc.compile(
tvmc_model,
target="llvm -device=arm_cpu -mtriple=aarch64-linux-gnu -mattr=+neon",
dump_code="asm",
cross="aarch64-linux-gnu-gcc",
)
dumps_path = tvmc_package.package_path + ".asm"
# check for output types
assert type(tvmc_package) is TVMCPackage
assert type(tvmc_package.graph) is str
assert type(tvmc_package.lib_path) is str
assert type(tvmc_package.params) is bytearray
assert os.path.exists(dumps_path)
# This test will be skipped if the AArch64 cross-compilation toolchain is not installed.
@pytest.mark.skipif(
not shutil.which("aarch64-linux-gnu-gcc"), reason="cross-compilation toolchain not installed"
)
def test_cross_compile_options_aarch64_paddle_module(paddle_resnet50):
# some CI environments wont offer paddle, so skip in case it is not present
pytest.importorskip("paddle")
fake_sysroot_dir = utils.tempdir().relpath("")
tvmc_model = tvmc.load(paddle_resnet50, "paddle")
tvmc_package = tvmc.compile(
tvmc_model,
target="llvm -device=arm_cpu -mtriple=aarch64-linux-gnu -mattr=+neon",
dump_code="asm",
cross="aarch64-linux-gnu-gcc",
cross_options="--sysroot=" + fake_sysroot_dir,
)
dumps_path = tvmc_package.package_path + ".asm"
# check for output types
assert type(tvmc_package) is TVMCPackage
assert type(tvmc_package.graph) is str
assert type(tvmc_package.lib_path) is str
assert type(tvmc_package.params) is bytearray
assert os.path.exists(dumps_path)
@tvm.testing.requires_opencl
def test_compile_opencl(tflite_mobilenet_v1_0_25_128):
pytest.importorskip("tflite")
tvmc_model = tvmc.load(tflite_mobilenet_v1_0_25_128)
tvmc_package = tvmc.compile(
tvmc_model,
target="opencl -host=llvm",
desired_layout="NCHW",
dump_code="asm",
)
dumps_path = tvmc_package.package_path + ".asm"
# check for output types
assert type(tvmc_package) is TVMCPackage
assert type(tvmc_package.graph) is str
assert type(tvmc_package.lib_path) is str
assert type(tvmc_package.params) is bytearray
assert os.path.exists(dumps_path)
@tvm.testing.requires_cmsisnn
def test_compile_tflite_module_with_external_codegen_cmsisnn(
tmpdir_factory, tflite_cnn_s_quantized
):
pytest.importorskip("tflite")
output_dir = tmpdir_factory.mktemp("mlf")
tvmc_model = tvmc.load(tflite_cnn_s_quantized)
output_file_name = f"{output_dir}/file.tar"
tvmc.compiler.compile_model(
tvmc_model,
target=f"cmsis-nn, c -mcpu=cortex-m55",
runtime=Runtime("crt", {"system-lib": True}),
executor=Executor("aot"),
output_format="mlf",
package_path=output_file_name,
pass_context_configs=["tir.disable_vectorize=true"],
)
# check whether an MLF package was created
assert os.path.exists(output_file_name)
# check whether the expected number of C sources are in the tarfile
with tarfile.open(output_file_name) as mlf_package:
c_source_files = [
name
for name in mlf_package.getnames()
if re.match(r"\./codegen/host/src/\D+\d+\.c", name)
]
assert len(c_source_files) == 4
@tvm.testing.requires_ethosn
def test_compile_tflite_module_with_external_codegen_ethos_n78(tflite_mobilenet_v1_1_quant):
pytest.importorskip("tflite")
tvmc_model = tvmc.load(tflite_mobilenet_v1_1_quant)
tvmc_package = tvmc.compile(tvmc_model, target="ethos-n -variant=n78, llvm", dump_code="relay")
dumps_path = tvmc_package.package_path + ".relay"
# check for output types
assert type(tvmc_package) is TVMCPackage
assert type(tvmc_package.graph) is str
assert type(tvmc_package.lib_path) is str
assert type(tvmc_package.params) is bytearray
assert os.path.exists(dumps_path)
@tvm.testing.requires_vitis_ai
def test_compile_tflite_module_with_external_codegen_vitis_ai(tflite_mobilenet_v1_1_quant):
pytest.importorskip("tflite")
tvmc_model = tvmc.load(tflite_mobilenet_v1_1_quant)
tvmc_package = tvmc.compiler.compile_model(
tvmc_model,
target="vitis-ai -dpu=DPUCZDX8G-zcu104 -export_runtime_module=vitis_ai.rtmod, llvm",
dump_code="relay",
)
dumps_path = tvmc_package.package_path + ".relay"
# check for output types
assert type(tvmc_package) is TVMCPackage
assert type(tvmc_package.graph) is str
assert type(tvmc_package.lib_path) is str
assert type(tvmc_package.params) is bytearray
assert os.path.exists(dumps_path)
def test_compile_tflite_module_with_external_codegen_ethosu(
tmpdir_factory, tflite_mobilenet_v1_1_quant
):
pytest.importorskip("tflite")
pytest.importorskip("ethosu.vela")
ACCEL_TYPES = ["ethos-u55-256", "ethos-u55-128", "ethos-u55-64", "ethos-u55-32"]
output_dir = tmpdir_factory.mktemp("mlf")
tvmc_model = tvmc.load(tflite_mobilenet_v1_1_quant)
for accel_type in ACCEL_TYPES:
output_file_name = f"{output_dir}/file_{accel_type}.tar"
tvmc.compiler.compile_model(
tvmc_model,
target=f"ethos-u -accelerator_config={accel_type}, c -mcpu=cortex-m55",
runtime=Runtime("crt"),
executor=Executor("aot", {"unpacked-api": True}),
output_format="mlf",
package_path=output_file_name,
pass_context_configs=["tir.disable_vectorize=true"],
)
# check whether an MLF package was created
assert os.path.exists(output_file_name)
# check whether the expected number of C sources are in the tarfile
with tarfile.open(output_file_name) as mlf_package:
c_source_files = [
name
for name in mlf_package.getnames()
if re.match(r"\./codegen/host/src/\D+\d+\.c", name)
]
# The number of c_source_files depends on the number of fused subgraphs that
# get offloaded to the NPU, e.g. conv2d->depthwise_conv2d->conv2d gets offloaded
# as a single subgraph if both of these operators are supported by the NPU.
# Currently there are three source files for CPU execution and one offload graph
assert len(c_source_files) == 4
@mock.patch("tvm.relay.build")
@mock.patch("tvm.driver.tvmc.composite_target.get_codegen_by_target")
@mock.patch("tvm.driver.tvmc.load")
@mock.patch("tvm.transform.PassContext")
@mock.patch("tvm.driver.tvmc.model.TVMCPackage.__init__", return_value=None)
def test_compile_check_configs_composite_target(mock_pkg, mock_pc, mock_fe, mock_ct, mock_relay):
mock_codegen = {}
mock_codegen["config_key"] = "relay.ext.mock.options"
mock_codegen["pass_pipeline"] = lambda *args, **kwargs: None
mock_fe.return_value = mock.MagicMock()
mock_ct.return_value = mock_codegen
mock_relay.return_value = mock.MagicMock()
tvmc_model = tvmc.load("no_file_needed")
tvmc.compile(tvmc_model, target="mockcodegen -testopt=value, llvm")
assert mock_pc.call_count == 1
codegen_compile_context = mock.call(
config={"relay.ext.mock.options": {"testopt": "value"}},
opt_level=3,
disabled_pass=None,
instruments=None,
)
mock_pc.assert_has_calls(
[
codegen_compile_context,
codegen_compile_context.__enter__(),
codegen_compile_context.__exit__(None, None, None),
]
)
def test_compile_tflite_module_with_mod_name(tmpdir_factory, tflite_cnn_s_quantized):
pytest.importorskip("tflite")
output_dir = tmpdir_factory.mktemp("mlf")
tvmc_model = tvmc.load(tflite_cnn_s_quantized)
output_file_name = f"{output_dir}/file.tar"
tvmc.compiler.compile_model(
tvmc_model,
target=f"c -mcpu=cortex-m55",
runtime=Runtime("crt", {"system-lib": True}),
executor=Executor("aot"),
output_format="mlf",
package_path=output_file_name,
pass_context_configs=["tir.disable_vectorize=true"],
mod_name="classify",
)
# check that an MLF package was created
assert os.path.exists(output_file_name)
with tarfile.open(output_file_name) as mlf_package:
# check that the C source files have been named classify_lib*.c
c_source_files = [
name
for name in mlf_package.getnames()
if re.match(r"\./codegen/host/src/classify_lib\d+\.c", name)
]
assert len(c_source_files) > 0
# check that "default" doesn't occur in any of the C source files
# check that function names are of the form "tvmgen_classify_*"
for file_name in c_source_files:
with mlf_package.extractfile(file_name) as f:
content = f.read()
assert b"default" not in content
assert b"tvmgen_classify_" in content
# check that tvmgen_classify_run() function exists
with mlf_package.extractfile("./codegen/host/src/classify_lib0.c") as f:
content = f.read()
assert b"tvmgen_classify_run(" in content
@tvm.testing.requires_cmsisnn
def test_compile_tflite_module_with_mod_name_and_cmsisnn(tmpdir_factory, tflite_cnn_s_quantized):
pytest.importorskip("tflite")
output_dir = tmpdir_factory.mktemp("mlf")
tvmc_model = tvmc.load(tflite_cnn_s_quantized)
output_file_name = f"{output_dir}/file.tar"
tvmc.compiler.compile_model(
tvmc_model,
target=f"cmsis-nn, c -mcpu=cortex-m55",
runtime=Runtime("crt", {"system-lib": True}),
executor=Executor("aot"),
output_format="mlf",
package_path=output_file_name,
pass_context_configs=["tir.disable_vectorize=true"],
mod_name="classify",
)
# check that an MLF package was created
assert os.path.exists(output_file_name)
with tarfile.open(output_file_name) as mlf_package:
# check that the C source files have been named classify_lib*.c
c_source_files = [
name
for name in mlf_package.getnames()
if re.match(r"\./codegen/host/src/classify_lib\d+\.c", name)
]
assert len(c_source_files) > 0
# check that "default" doesn't occur in any of the C source files
# check that function names are of the form "tvmgen_classify_*"
for file_name in c_source_files:
with mlf_package.extractfile(file_name) as f:
content = f.read()
assert b"default" not in content
assert b"tvmgen_classify_" in content
# check that tvmgen_classify_run() function exists
with mlf_package.extractfile("./codegen/host/src/classify_lib0.c") as f:
content = f.read()
assert b"tvmgen_classify_run(" in content
# check that CMSIS-NN function names are of the form "tvmgen_classify_cmsis_nn_main_*"
with mlf_package.extractfile("./codegen/host/src/classify_lib2.c") as f:
content = f.read()
assert b"tvmgen_classify_cmsis_nn_main_" in content
def test_compile_tflite_module_with_mod_name_and_ethosu(
tmpdir_factory, tflite_mobilenet_v1_1_quant
):
pytest.importorskip("tflite")
pytest.importorskip("ethosu.vela")
output_dir = tmpdir_factory.mktemp("mlf")
tvmc_model = tvmc.load(tflite_mobilenet_v1_1_quant)
output_file_name = f"{output_dir}/file.tar"
tvmc.compiler.compile_model(
tvmc_model,
target=f"ethos-u -accelerator_config=ethos-u55-256, c -mcpu=cortex-m55",
runtime=Runtime("crt"),
executor=Executor("aot", {"unpacked-api": True}),
output_format="mlf",
package_path=output_file_name,
pass_context_configs=["tir.disable_vectorize=true"],
mod_name="classify",
)
# check that an MLF package was created
assert os.path.exists(output_file_name)
with tarfile.open(output_file_name) as mlf_package:
# check that the C source files have been named classify_lib*.c
c_source_files = [
name
for name in mlf_package.getnames()
if re.match(r"\./codegen/host/src/classify_lib\d+\.c", name)
]
assert len(c_source_files) > 0
# check that "default" doesn't occur in any of the C source files
# check that function names are of the form "tvmgen_classify_*"
for file_name in c_source_files:
with mlf_package.extractfile(file_name) as f:
content = f.read()
assert b"default" not in content
assert b"tvmgen_classify_" in content
# check that tvmgen_classify_run() function exists
with mlf_package.extractfile("./codegen/host/src/classify_lib0.c") as f:
content = f.read()
assert b"tvmgen_classify_run(" in content
# check that microNPU function names are of the form "tvmgen_classify_ethos_u_main_*"
with mlf_package.extractfile("./codegen/host/src/classify_lib2.c") as f:
content = f.read()
assert b"tvmgen_classify_ethos_u_main_" in content
@mock.patch("tvm.relay.build")
@mock.patch("tvm.driver.tvmc.load")
@mock.patch("tvm.driver.tvmc.model.TVMCPackage.__init__", return_value=None)
def test_compile_check_workspace_pools(mock_pkg, mock_fe, mock_relay):
mock_fe.return_value = mock.MagicMock()
mock_relay.return_value = mock.MagicMock()
memory_pools = WorkspaceMemoryPools(
[WorkspacePoolInfo(pool_name="sram", targets=[Target("llvm")])]
)
tvmc_model = tvmc.load("no_file_needed")
tvmc.compile(
tvmc_model,
target="llvm,c",
workspace_pools=memory_pools,
)
assert mock_relay.call_count == 1
assert mock_relay.call_args_list[0][1]["workspace_memory_pools"] == memory_pools
def test_compile_check_pass_instrument(keras_resnet50):
pytest.importorskip("tensorflow")
@tvm.instrument.pass_instrument
class PassesCounter:
def __init__(self):
self.run_before_count = 0
self.run_after_count = 0
def run_before_pass(self, mod, info):
self.run_before_count = self.run_before_count + 1
def run_after_pass(self, mod, info):
self.run_after_count = self.run_after_count + 1
passes_counter = PassesCounter()
tvmc_model = tvmc.load(keras_resnet50)
tvmc.compile(tvmc_model, target="llvm", instruments=[passes_counter])
assert passes_counter.run_after_count > 0
assert passes_counter.run_after_count == passes_counter.run_before_count
if __name__ == "__main__":
tvm.testing.main()
| 44,351 | 37.973638 | 232 | py |
tvm | tvm-main/tests/python/frontend/mxnet/test_qnn_ops_utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import tvm
from tvm import relay
from tvm.contrib import graph_executor
from tvm.relay.frontend.mxnet_qnn_op_utils import (
dequantize_mxnet_min_max,
quantize_mxnet_min_max,
get_mkldnn_int8_scale,
get_mkldnn_uint8_scale,
quantize_conv_bias_mkldnn_from_var,
)
def test_mkldnn_dequantize():
def dequantize_test_driver(in_dtype, quant_args, in_data, verify_output_data):
shape = in_data.shape
input_data = relay.var("input_data", shape=shape, dtype=in_dtype)
min_range = quant_args["min_range"]
max_range = quant_args["max_range"]
dequantized_output = dequantize_mxnet_min_max(
input_data, min_range=min_range, max_range=max_range, in_dtype=in_dtype
)
mod = relay.Function(relay.analysis.free_vars(dequantized_output), dequantized_output)
mod = tvm.IRModule.from_expr(mod)
with tvm.transform.PassContext(opt_level=3):
graph, lib, params = relay.build(mod, "llvm", params=None)
rt_mod = graph_executor.create(graph, lib, device=tvm.cpu(0))
rt_mod.set_input(input_data=in_data)
rt_mod.set_input(**params)
rt_mod.run()
res = rt_mod.get_output(0).numpy()
assert np.allclose(res, verify_output_data)
assert res.dtype == np.float32
def test_uint8_to_float32():
data = np.array([0, 1, 2, 3, 4, 251, 252, 253, 254, 255]).astype("uint8").reshape((2, 5))
output = (
np.array(
[
0.0,
0.25048923,
0.50097847,
0.7514677,
1.0019569,
62.8728,
63.123287,
63.373775,
63.624268,
63.874756,
]
)
.astype("float32")
.reshape((2, 5))
)
quant_args = {"min_range": -63.5, "max_range": 64}
dequantize_test_driver(
in_dtype="uint8", quant_args=quant_args, in_data=data, verify_output_data=output
)
def test_int8_to_float32():
data = (
np.array([-126, -125, -124, -123, -122, 123, 124, 125, 126, 127])
.astype("int8")
.reshape((2, 5))
)
output = (
np.array(
[
-63.247063,
-62.745102,
-62.24314,
-61.74118,
-61.23922,
61.74118,
62.24314,
62.745102,
63.247063,
63.749023,
]
)
.astype("float32")
.reshape((2, 5))
)
dequantize_args = {"min_range": -63.5, "max_range": 64}
dequantize_test_driver(
in_dtype="int8", quant_args=dequantize_args, in_data=data, verify_output_data=output
)
test_uint8_to_float32()
test_int8_to_float32()
def test_mkldnn_quantize():
def quantize_test_driver(out_dtype, quant_args, in_data, verify_output_data):
shape = in_data.shape
input_data = relay.var("input_data", shape=shape, dtype="float32")
min_range = quant_args["min_range"]
max_range = quant_args["max_range"]
quantized_output, _, _ = quantize_mxnet_min_max(
input_data, min_range=min_range, max_range=max_range, out_dtype=out_dtype
)
mod = relay.Function(relay.analysis.free_vars(quantized_output), quantized_output)
mod = tvm.IRModule.from_expr(mod)
with tvm.transform.PassContext(opt_level=3):
graph, lib, params = relay.build(mod, "llvm", params=None)
rt_mod = graph_executor.create(graph, lib, device=tvm.cpu(0))
rt_mod.set_input(input_data=in_data)
rt_mod.set_input(**params)
rt_mod.run()
res = rt_mod.get_output(0).numpy()
assert np.allclose(res, verify_output_data)
assert res.dtype == verify_output_data.dtype
def test_float32_to_uint8():
data = (
np.array(
[
0.0,
0.25048923,
0.50097847,
0.7514677,
1.0019569,
62.8728,
63.123287,
63.373775,
63.624268,
63.874756,
]
)
.astype("float32")
.reshape((2, 5))
)
output = np.array([0, 1, 2, 3, 4, 251, 252, 253, 254, 255]).astype("uint8").reshape((2, 5))
quant_args = {"min_range": -63.5, "max_range": 64}
quantize_test_driver(
out_dtype="uint8", quant_args=quant_args, in_data=data, verify_output_data=output
)
def test_float32_to_int8():
data = (
np.array(
[
-63.247063,
-62.745102,
-62.24314,
-61.74118,
-61.23922,
61.74118,
62.24314,
62.745102,
63.247063,
63.749023,
]
)
.astype("float32")
.reshape((2, 5))
)
output = (
np.array([-126, -125, -124, -123, -122, 123, 124, 125, 126, 127])
.astype("int8")
.reshape((2, 5))
)
quant_args = {"min_range": -63.5, "max_range": 64}
quantize_test_driver(
out_dtype="int8", quant_args=quant_args, in_data=data, verify_output_data=output
)
test_float32_to_uint8()
test_float32_to_int8()
def test_get_mkldnn_int8_scale():
range_min = -3.904039
range_max = 3.904039
expected = 0.03061991354976495
output = get_mkldnn_int8_scale(range_max=range_max, range_min=range_min)
assert np.allclose(output, expected)
def test_get_mkldnn_uint8_scale():
range_min = 0.0
range_max = 55.77269
expected = 0.21828841189047482
output = get_mkldnn_uint8_scale(range_max=range_max, range_min=range_min)
assert np.allclose(output, expected)
def test_quantize_conv_bias_mkldnn_from_var():
bias_var = relay.var("bias", shape=(3,), dtype="float32")
bias_scale = tvm.nd.array(np.array([0.5, 0.6, 0.7]))
output = quantize_conv_bias_mkldnn_from_var(bias_var, bias_scale)
assert isinstance(output, tvm.relay.expr.Call)
attrs = output.attrs
assert attrs.axis == 0
assert attrs.out_dtype == "int32"
assert output.op.name == "qnn.quantize"
assert output.args[1].data == bias_scale
if __name__ == "__main__":
test_mkldnn_dequantize()
test_mkldnn_quantize()
test_get_mkldnn_int8_scale()
test_get_mkldnn_uint8_scale()
test_quantize_conv_bias_mkldnn_from_var()
| 7,760 | 33.493333 | 99 | py |
tvm | tvm-main/tests/python/frontend/mxnet/test_forward.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import operator
import random
import numpy as np
import pytest
import tvm
import tvm.testing
from tvm import relay, te
from tvm.contrib import graph_executor
import model_zoo
import mxnet as mx
from mxnet import gluon
from mxnet.gluon.model_zoo import vision
def verify_mxnet_frontend_impl(
mx_symbol,
data_shape=(1, 3, 224, 224),
out_shape=(1, 1000),
gluon_impl=False,
name=None,
dtype="float32",
):
"""Use name different from test to avoid pytest picking it up"""
if gluon_impl:
def get_gluon_output(name, x):
net = vision.get_model(name)
net.collect_params().initialize(mx.init.Xavier())
net_sym = gluon.nn.SymbolBlock(
outputs=net(mx.sym.var("data")),
inputs=mx.sym.var("data"),
params=net.collect_params(),
)
out = net_sym(mx.nd.array(x.astype(dtype))).asnumpy()
return out, net_sym
else:
def get_mxnet_output(symbol, x, dtype="float32"):
from collections import namedtuple
Batch = namedtuple("Batch", ["data"])
mod = mx.mod.Module(symbol, label_names=None)
mod.bind(data_shapes=[("data", x.shape)], for_training=False)
mod.init_params()
mod.forward(Batch([mx.nd.array(x.astype(dtype))]))
out = mod.get_outputs()[0].asnumpy()
args, auxs = mod.get_params()
return out, args, auxs
def get_tvm_output(symbol, x, args, auxs, target, dev, dtype="float32"):
shape_dict = {"data": x.shape}
if gluon_impl:
mod, params = relay.frontend.from_mxnet(symbol, shape_dict)
else:
mod, params = relay.frontend.from_mxnet(
symbol, shape_dict, arg_params=args, aux_params=auxs
)
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target, params=params)
m = graph_executor.GraphModule(lib["default"](dev))
# set inputs
m.set_input("data", tvm.nd.array(x.astype(dtype)))
m.run()
# get outputs
out = m.get_output(0, tvm.nd.empty(out_shape, dtype))
return out.numpy()
# random input
x = np.random.uniform(size=data_shape)
if gluon_impl:
gluon_out, gluon_sym = get_gluon_output(name, x)
for target, dev in tvm.testing.enabled_targets():
tvm_out = get_tvm_output(gluon_sym, x, None, None, target, dev, dtype)
tvm.testing.assert_allclose(gluon_out, tvm_out, rtol=1e-5, atol=1e-5)
else:
mx_out, args, auxs = get_mxnet_output(mx_symbol, x, dtype)
assert "data" not in args
for target, dev in tvm.testing.enabled_targets():
tvm_out = get_tvm_output(mx_symbol, x, args, auxs, target, dev, dtype)
tvm.testing.assert_allclose(mx_out, tvm_out, rtol=1e-5, atol=1e-5)
@tvm.testing.uses_gpu
def test_forward_mlp():
mlp = model_zoo.mx_mlp()
verify_mxnet_frontend_impl(mlp, data_shape=(1, 1, 28, 28), out_shape=(1, 10))
@tvm.testing.uses_gpu
def test_forward_vgg():
for n in [11]:
mx_sym = model_zoo.mx_vgg(n)
verify_mxnet_frontend_impl(mx_sym)
@tvm.testing.uses_gpu
def test_forward_resnet():
for n in [18]:
mx_sym = model_zoo.mx_resnet(18)
verify_mxnet_frontend_impl(mx_sym)
@tvm.testing.uses_gpu
def test_forward_leaky_relu():
data = mx.sym.var("data")
data = mx.sym.concat(data, -data, dim=1) # negative part explicitly
mx_sym = mx.sym.LeakyReLU(data)
verify_mxnet_frontend_impl(mx_sym, (1, 3, 100, 100), (1, 6, 100, 100))
mx_sym = mx.sym.LeakyReLU(data, act_type="leaky")
verify_mxnet_frontend_impl(mx_sym, (1, 3, 100, 100), (1, 6, 100, 100))
@tvm.testing.uses_gpu
def test_forward_elu():
data = mx.sym.var("data")
data = mx.sym.concat(data, -data, dim=1) # negative part explicitly
mx_sym = mx.sym.LeakyReLU(data, act_type="elu")
verify_mxnet_frontend_impl(mx_sym, (1, 3, 100, 100), (1, 6, 100, 100))
@tvm.testing.uses_gpu
def test_forward_rrelu():
data = mx.sym.var("data")
data = mx.sym.concat(data, -data, dim=1) # negative part explicitly
mx_sym = mx.sym.LeakyReLU(data, act_type="rrelu", lower_bound=0.3, upper_bound=0.7)
verify_mxnet_frontend_impl(mx_sym[0], (1, 3, 100, 100), (1, 6, 100, 100))
@tvm.testing.uses_gpu
def test_forward_prelu():
data = mx.sym.var("data")
data = mx.sym.concat(data, -data, dim=1) # negative part explicitly
mx_sym = mx.sym.LeakyReLU(data, act_type="prelu")
verify_mxnet_frontend_impl(mx_sym, (1, 3, 100, 100), (1, 6, 100, 100))
@tvm.testing.uses_gpu
def test_forward_gelu():
data = mx.sym.var("data")
data = mx.sym.concat(data, -data, dim=1) # negative part explicitly
mx_sym = mx.sym.LeakyReLU(data, act_type="gelu")
verify_mxnet_frontend_impl(mx_sym, (1, 3, 100, 100), (1, 6, 100, 100))
@tvm.testing.uses_gpu
def test_forward_softrelu():
data = mx.sym.var("data")
data = mx.sym.concat(data, -data, dim=1) # negative part explicitly
mx_sym = mx.sym.Activation(data, act_type="softrelu")
verify_mxnet_frontend_impl(mx_sym, (1, 3, 100, 100), (1, 6, 100, 100))
@tvm.testing.uses_gpu
def test_forward_fc_flatten():
# test flatten=True option in mxnet 0.11.1
data = mx.sym.var("data")
try:
mx_sym = mx.sym.FullyConnected(data, num_hidden=100, flatten=True)
verify_mxnet_frontend_impl(mx_sym, (1, 3, 100, 100), (1, 100))
mx_sym = mx.sym.FullyConnected(mx.sym.Flatten(data), num_hidden=100, flatten=False)
verify_mxnet_frontend_impl(mx_sym, (1, 3, 100, 100), (1, 100))
except:
pass
@tvm.testing.uses_gpu
def test_forward_clip():
data = mx.sym.var("data")
data = mx.sym.concat(data, -data, dim=1) # negative part explicitly
mx_sym = mx.sym.clip(data, a_min=0, a_max=1)
verify_mxnet_frontend_impl(mx_sym, (1, 3, 100, 100), (1, 6, 100, 100))
@tvm.testing.uses_gpu
def test_forward_split():
data = mx.sym.var("data")
mx_sym = mx.sym.split(data, axis=1, num_outputs=4, squeeze_axis=False)
verify_mxnet_frontend_impl(mx_sym, (1, 4, 2, 1), (1, 1, 2, 1))
@tvm.testing.uses_gpu
def test_forward_split_squeeze():
data = mx.sym.var("data")
mx_sym = mx.sym.split(data, axis=1, num_outputs=4, squeeze_axis=True)
verify_mxnet_frontend_impl(mx_sym, (1, 4, 2, 1), (1, 2, 1))
@tvm.testing.uses_gpu
def test_forward_expand_dims():
data = mx.sym.var("data")
mx_sym = mx.sym.expand_dims(data, axis=1)
verify_mxnet_frontend_impl(mx_sym, (2, 3, 4), (2, 1, 3, 4))
@tvm.testing.uses_gpu
def test_forward_pooling():
data = mx.sym.var("data")
mx_sym = mx.sym.Pooling(data, kernel=(3, 3), pad=(1, 1), pool_type="avg")
verify_mxnet_frontend_impl(mx_sym, (1, 20, 8, 8), (1, 20, 8, 8))
mx_sym = mx.sym.Pooling(data, kernel=(3, 3), pad=(1, 1), pool_type="max")
verify_mxnet_frontend_impl(mx_sym, (1, 20, 8, 8), (1, 20, 8, 8))
@tvm.testing.uses_gpu
def test_forward_pooling3d():
data = mx.sym.var("data")
mx_sym = mx.sym.Pooling(data, kernel=(3, 3, 3), pad=(1, 1, 1), pool_type="avg")
verify_mxnet_frontend_impl(mx_sym, (1, 20, 8, 8, 8), (1, 20, 8, 8, 8))
mx_sym = mx.sym.Pooling(data, kernel=(3, 3, 3), pad=(1, 1, 1), pool_type="max")
verify_mxnet_frontend_impl(mx_sym, (1, 20, 8, 8, 8), (1, 20, 8, 8, 8))
@tvm.testing.uses_gpu
def test_forward_adaptive_pooling():
data = mx.sym.var("data")
mx_sym = mx.sym.contrib.AdaptiveAvgPooling2D(data, output_size=(1,))
verify_mxnet_frontend_impl(mx_sym, (1, 20, 8, 8), (1, 20, 1, 1))
mx_sym = mx.sym.contrib.AdaptiveAvgPooling2D(data, output_size=(3, 3))
verify_mxnet_frontend_impl(mx_sym, (1, 20, 8, 8), (1, 20, 3, 3))
@tvm.testing.uses_gpu
def test_forward_lrn():
data = mx.sym.var("data")
mx_sym = mx.sym.LRN(data, alpha=2, beta=2, knorm=1, nsize=5)
verify_mxnet_frontend_impl(mx_sym, (1, 10, 24, 24), (1, 10, 24, 24))
@tvm.testing.uses_gpu
def test_forward_ones():
data = mx.sym.var("data")
ones = mx.sym.ones(shape=(2, 3, 4), dtype="float32")
mx_sym = mx.sym.elemwise_add(data, ones)
verify_mxnet_frontend_impl(mx_sym, (2, 3, 4), (2, 3, 4))
@tvm.testing.uses_gpu
def test_forward_zeros():
data = mx.sym.var("data")
zeros = mx.sym.zeros(shape=(2, 3, 4), dtype="float32")
mx_sym = mx.sym.elemwise_add(data, zeros)
verify_mxnet_frontend_impl(mx_sym, (2, 3, 4), (2, 3, 4))
@tvm.testing.uses_gpu
def test_forward_ones_like():
data = mx.sym.var("data")
mx_sym = mx.sym.ones_like(data, dtype="float32")
verify_mxnet_frontend_impl(mx_sym, (2, 3, 4), (2, 3, 4))
@tvm.testing.uses_gpu
def test_forward_make_loss():
data = mx.sym.var("data")
ones = mx.sym.ones(shape=(2, 3, 4), dtype="float32")
mx_sym = mx.sym.make_loss((data - ones) ** 2 / 2, dtype="float32")
verify_mxnet_frontend_impl(mx_sym, (2, 3, 4), (2, 3, 4))
@tvm.testing.uses_gpu
def test_forward_zeros_like():
data = mx.sym.var("data")
mx_sym = mx.sym.zeros_like(data, dtype="float32")
verify_mxnet_frontend_impl(mx_sym, (2, 3, 4), (2, 3, 4))
@tvm.testing.uses_gpu
def test_forward_argmax():
data = mx.sym.var("data")
mx_sym = mx.sym.argmax(data, axis=1)
verify_mxnet_frontend_impl(mx_sym, (5, 3), (5,))
@tvm.testing.uses_gpu
def test_forward_argmin():
data = mx.sym.var("data")
mx_sym = mx.sym.argmin(data, axis=0)
verify_mxnet_frontend_impl(mx_sym, (5, 4), (4,))
@tvm.testing.uses_gpu
def test_forward_slice():
data = mx.sym.var("data")
mx_sym = mx.sym.slice(data, begin=(0, 1), end=(2, 4))
verify_mxnet_frontend_impl(mx_sym, (3, 4), (2, 3))
mx_sym = mx.sym.slice(data, begin=(-1, 1), end=(-3, 4), step=(-1, 2))
verify_mxnet_frontend_impl(mx_sym, (3, 4), (2, 2))
@tvm.testing.uses_gpu
def test_forward_where():
cond = mx.sym.var("cond")
x = mx.sym.var("x")
y = mx.sym.var("y")
dshape = (2, 2)
dtype = "float32"
mx_sym = mx.sym.where(cond, x, y)
np_cond = np.array([[0, 1], [-1, 0]]).astype(dtype)
np_x = np.random.uniform(size=dshape).astype(dtype)
np_y = np.random.uniform(size=dshape).astype(dtype)
mx_cond = mx.nd.array(np_cond)
mx_x = mx.nd.array(np_x)
mx_y = mx.nd.array(np_y)
shapes = {"cond": dshape, "x": dshape, "y": dshape}
mod = mx.mod.Module(mx_sym, label_names=None, data_names=["cond", "x", "y"])
mod.bind(data_shapes=shapes.items(), for_training=False)
mod.init_params()
args, auxs = mod.get_params()
mx_out = mx.nd.where(mx_cond, mx_x, mx_y).asnumpy()
mod, _ = relay.frontend.from_mxnet(mx_sym, shapes, args, auxs)
for target, dev in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()(
np_cond, np_x, np_y
)
tvm.testing.assert_allclose(op_res.numpy(), mx_out)
@tvm.testing.uses_gpu
def test_forward_arange():
def _mx_symbol(F, start, stop, step):
if start is None and step is None:
sym = F.arange(stop)
elif start is None:
sym = F.arange(stop, step=step)
elif step is None:
sym = F.arange(start, stop)
else:
sym = F.arange(start, stop, step)
return sym
def verify(start, stop, step):
ref_res = _mx_symbol(mx.nd, start, stop, step)
mx_sym = _mx_symbol(mx.sym, start, stop, step)
mod, _ = relay.frontend.from_mxnet(mx_sym, {})
for target, dev in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
op_res = relay.create_executor(
kind, mod=mod, device=dev, target=target
).evaluate()()
tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy())
verify(0, 20, None)
verify(0, 20, 2)
verify(1, 20, None)
verify(1, 20, 2)
verify(1, 20, 1.5)
verify(1, 20.5, None)
verify(1, 20, 3)
verify(20, 1, -1)
verify(20, 1, -1.5)
def _mx_symbol(F, op_name, inputs):
op = getattr(F, op_name)
return op(*inputs)
@tvm.testing.uses_gpu
def test_forward_broadcast_ops():
for op in [
"broadcast_add",
"broadcast_plus",
"broadcast_sub",
"broadcast_minus",
"broadcast_mul",
"broadcast_div",
"broadcast_mod",
"broadcast_maximum",
"broadcast_minimum",
"broadcast_equal",
"broadcast_not_equal",
"broadcast_greater",
"broadcast_greater_equal",
"broadcast_lesser",
"broadcast_lesser_equal",
"broadcast_power",
"broadcast_logical_or",
"broadcast_logical_and",
"broadcast_logical_xor",
]:
a_shape = (3, 4, 5)
b_shape = (4, 5)
if op == "broadcast_mod":
dtype = "int32"
a_np = np.random.randint(1, 100, size=a_shape).astype(dtype)
b_np = np.random.randint(1, 100, size=b_shape).astype(dtype)
else:
dtype = "float32"
a_np = np.random.uniform(size=a_shape).astype(dtype)
b_np = np.random.uniform(size=b_shape).astype(dtype)
mx_sym = _mx_symbol(mx.sym, op, [mx.sym.var("a"), mx.sym.var("b")])
ref_res = _mx_symbol(mx.nd, op, [mx.nd.array(a_np), mx.nd.array(b_np)])
shapes = {"a": a_shape, "b": b_shape}
mod, _ = relay.frontend.from_mxnet(mx_sym, shapes, dtype)
for target, dev in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()(
a_np, b_np
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy())
@tvm.testing.uses_gpu
def test_forward_elemwise_ops():
for op in [
"elemwise_add",
"elemwise_sub",
"elemwise_mul",
"elemwise_div",
"maximum",
"minimum",
operator.lt,
operator.le,
operator.eq,
operator.ne,
operator.gt,
operator.ge,
]:
shape = (3, 4, 5)
dtype = "float32"
a_np = np.random.uniform(size=shape).astype(dtype)
b_np = np.random.uniform(size=shape).astype(dtype)
if type(op) == str:
mx_sym = _mx_symbol(mx.sym, op, [mx.sym.var("a"), mx.sym.var("b")])
ref_res = _mx_symbol(mx.nd, op, [mx.nd.array(a_np), mx.nd.array(b_np)])
else:
mx_sym = op(mx.sym.var("a"), mx.sym.var("b"))
ref_res = op(mx.nd.array(a_np), mx.nd.array(b_np))
shapes = {"a": shape, "b": shape}
mod, _ = relay.frontend.from_mxnet(mx_sym, shapes, dtype)
for target, dev in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()(
a_np, b_np
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy())
@tvm.testing.uses_gpu
def test_forward_softmin():
data = mx.sym.var("data")
mx_sym = mx.sym.softmin(data)
verify_mxnet_frontend_impl(mx_sym, (1, 3, 100, 100), (1, 3, 100, 100))
mx_sym = mx.sym.softmin(data, axis=2)
verify_mxnet_frontend_impl(mx_sym, (1, 3, 100, 100), (1, 3, 100, 100))
@tvm.testing.uses_gpu
def test_forward_unary_ops():
for op in [
"abs",
"sqrt",
"ceil",
"floor",
"round",
"reciprocal",
"trunc",
"softsign",
"hard_sigmoid",
"cos",
"sin",
"tan",
"cosh",
"sinh",
"tanh",
"arccos",
"arcsin",
"arctan",
"arccosh",
"arcsinh",
"arctanh",
]:
shape = (1, 3, 4, 5)
dtype = "float32"
a_np = np.random.uniform(size=shape).astype(dtype)
mx_sym = _mx_symbol(mx.sym, op, [mx.sym.var("a")])
ref_res = _mx_symbol(mx.nd, op, [mx.nd.array(a_np)])
shapes = {"a": shape}
mod, _ = relay.frontend.from_mxnet(mx_sym, shapes, dtype)
for target, dev in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()(
a_np
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy(), rtol=1e-5, atol=1e-5)
@tvm.testing.uses_gpu
def test_forward_scalar_ops():
for op in [
operator.add,
operator.sub,
operator.mul,
operator.truediv,
operator.pow,
operator.lt,
operator.le,
operator.eq,
operator.ne,
operator.gt,
operator.ge,
]:
dtype = "float32"
a_shape = (3, 4, 5)
a_np = np.random.uniform(size=a_shape).astype(dtype)
b_scalar = 2.3
mx_sym = op(mx.sym.var("a"), b_scalar)
ref_res = op(mx.nd.array(a_np), b_scalar)
shapes = {"a": a_shape}
mod, _ = relay.frontend.from_mxnet(mx_sym, shapes, dtype)
for target, dev in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()(
a_np
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy())
for op in ["maximum", "minimum"]:
dtype = "float32"
a_shape = (3, 4, 5)
a_np = np.random.uniform(size=a_shape).astype(dtype)
b_scalar = 2.3
mx_sym = _mx_symbol(mx.sym, op, [mx.sym.var("a"), b_scalar])
ref_res = _mx_symbol(mx.nd, op, [mx.nd.array(a_np), b_scalar])
shapes = {"a": a_shape}
mod, _ = relay.frontend.from_mxnet(mx_sym, shapes, dtype)
for target, dev in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()(
a_np
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy())
@tvm.testing.uses_gpu
def test_forward_slice_axis():
def verify(shape, axis, begin, end):
data_np = np.random.uniform(size=shape).astype("float32")
ref_res = mx.nd.slice_axis(mx.nd.array(data_np), axis, begin, end)
mx_sym = mx.sym.slice_axis(mx.sym.var("data"), axis, begin, end)
mod, _ = relay.frontend.from_mxnet(mx_sym, {"data": shape})
for target, dev in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()(
data_np
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy())
verify((3, 4), 0, 1, 2)
verify((3, 4), 0, 1, None)
verify((3, 4), 1, 0, 2)
verify((3, 4), 1, -3, -1)
verify((3, 4), -1, -3, -1)
@tvm.testing.uses_gpu
def test_forward_slice_like():
def verify(x_shape, y_shape, axes):
x_np = np.random.uniform(size=x_shape).astype("float32")
y_np = np.random.uniform(size=y_shape).astype("float32")
if axes is None:
ref_res = mx.nd.slice_like(mx.nd.array(x_np), mx.nd.array(y_np))
mx_sym = mx.sym.slice_like(mx.sym.var("x"), mx.sym.var("y"))
else:
ref_res = mx.nd.slice_like(mx.nd.array(x_np), mx.nd.array(y_np), axes=axes)
mx_sym = mx.sym.slice_like(mx.sym.var("x"), mx.sym.var("y"), axes=axes)
mod, _ = relay.frontend.from_mxnet(mx_sym, {"x": x_shape, "y": y_shape})
for target, dev in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()(
x_np, y_np
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy())
verify((3, 4), (2, 3), None)
verify((3, 4), (2, 3), (0, 1))
verify((3, 4), (2, 3), (0))
verify((3, 4), (2, 3), (-1))
@tvm.testing.uses_gpu
def test_forward_sequence_reverse():
def verify(shape, seq_lengths, use_seq_lengths, seq_axis):
data_np = np.random.uniform(size=shape).astype("float32")
ref_res_args = [mx.nd.array(data_np), None, use_seq_lengths, seq_axis]
mx_sym_args = [mx.sym.var("data"), None, use_seq_lengths, seq_axis]
from_mxnet_args = [{"data": shape}, {"data": "float32"}]
in_data = [data_np]
if use_seq_lengths and seq_lengths:
seq_lengths_np = np.array(seq_lengths).astype("int32")
ref_res_args[1] = mx.nd.array(seq_lengths_np)
mx_sym_args[1] = mx.sym.var("seq_lengths")
from_mxnet_args[0].update({"seq_lengths": seq_lengths_np.shape})
from_mxnet_args[1].update({"seq_lengths": "int32"})
in_data.append(seq_lengths_np)
ref_res = mx.nd.SequenceReverse(*ref_res_args)
mx_sym = mx.sym.SequenceReverse(*mx_sym_args)
mod, _ = relay.frontend.from_mxnet(mx_sym, *from_mxnet_args)
for target, dev in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()(
*in_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy())
verify((3, 4), [1, 2, 3, 1], True, 0)
verify((3, 4), None, False, 0)
verify((3, 5, 5, 6), [1, 2, 3, 1, 3], True, 0)
# MXNet accepts axis value as 0 only
# verify((3, 4, 5, 6), None, False, 2)
@tvm.testing.uses_gpu
def test_forward_l2_normalize():
data = mx.sym.var("data")
mx_sym = mx.sym.L2Normalization(data, mode="channel")
verify_mxnet_frontend_impl(mx_sym, (2, 3, 4, 5), (2, 3, 4, 5))
mx_sym = mx.sym.L2Normalization(data, mode="instance")
verify_mxnet_frontend_impl(mx_sym, (2, 3, 4, 5), (2, 3, 4, 5))
mx_sym = mx.sym.L2Normalization(data, mode="spatial")
verify_mxnet_frontend_impl(mx_sym, (2, 3, 4, 5), (2, 3, 4, 5))
@tvm.testing.uses_gpu
def test_forward_logistic_regression_output():
data_shape = (1, 10)
dtype = "float32"
data_np = np.random.uniform(size=data_shape).astype(dtype)
label_np = np.random.uniform(size=data_shape).astype(dtype)
mx_sym = mx.symbol.LogisticRegressionOutput(mx.sym.var("data"), mx.sym.var("label"))
ref_res = mx.nd.LogisticRegressionOutput(mx.nd.array(data_np), mx.nd.array(label_np))
shapes = {"data": data_shape}
mod, _ = relay.frontend.from_mxnet(mx_sym, shapes, dtype)
for target, dev in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()(
data_np
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy())
@tvm.testing.uses_gpu
def test_forward_dot():
def verify(a_shape, b_shape, transpose_b=False):
dtype = "float32"
a_np = np.random.uniform(size=a_shape).astype(dtype)
b_np = np.random.uniform(size=b_shape).astype(dtype)
mx_sym = mx.symbol.dot(mx.sym.var("a"), mx.sym.var("b"), transpose_b=transpose_b)
ref_res = mx.nd.dot(mx.nd.array(a_np), mx.nd.array(b_np), transpose_b=transpose_b)
shapes = {"a": a_shape, "b": b_shape}
mod, _ = relay.frontend.from_mxnet(mx_sym, shapes, dtype)
for target, dev in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()(
a_np, b_np
)
tvm.testing.assert_allclose(
op_res.numpy(), ref_res.asnumpy(), rtol=1e-05, atol=1e-05
)
verify((1, 256), (256, 1))
verify((1, 256), (1, 256), transpose_b=True)
verify((5,), (5,))
verify((3,), (3, 5))
verify((3,), (5, 3), transpose_b=True)
verify((3,), (3, 5, 3, 5))
verify((3,), (5, 5, 3, 3), transpose_b=True)
verify((10, 1), (1,))
verify((1, 1), (4, 3, 2, 1), transpose_b=True)
verify((4, 3, 2, 1), (1,))
verify((1, 2, 3, 4), (1, 4), transpose_b=True)
verify((4, 1, 1), (1, 2, 3))
verify((1, 1, 4), (2, 3, 4), transpose_b=True)
@tvm.testing.uses_gpu
def test_forward_shape_array():
def verify(shape):
x_np = np.random.uniform(size=shape).astype("float32")
ref_res = mx.nd.shape_array(mx.nd.array(x_np))
mx_sym = mx.sym.shape_array(mx.sym.var("x"))
mod, _ = relay.frontend.from_mxnet(mx_sym, {"x": shape})
for target, dev in tvm.testing.enabled_targets():
for kind in ["debug"]:
op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()(
x_np
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy())
verify((1,))
verify((3, 4, 5))
verify((3, 4, 5, 6))
@tvm.testing.uses_gpu
def test_forward_squeeze():
def verify(shape, axis):
x_np = np.random.uniform(size=shape).astype("float32")
if axis is None:
ref_res = mx.nd.squeeze(mx.nd.array(x_np))
mx_sym = mx.sym.squeeze(mx.sym.var("x"))
else:
ref_res = mx.nd.squeeze(mx.nd.array(x_np), axis=axis)
mx_sym = mx.sym.squeeze(mx.sym.var("x"), axis=axis)
mod, _ = relay.frontend.from_mxnet(mx_sym, {"x": shape})
for target, dev in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()(
x_np
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy())
verify((1, 3, 1), None)
verify((1, 3, 1), 0)
verify((1, 3, 1), 2)
verify((1, 3, 1), (0, 2))
@tvm.testing.uses_gpu
def test_forward_broadcast_axis():
def verify(shape, axis, size):
x_np = np.random.uniform(size=shape).astype("float32")
for op in ["broadcast_axis", "broadcast_axes"]:
mx_sym = _mx_symbol(mx.sym, op, [mx.sym.var("x"), axis, size])
ref_res = _mx_symbol(mx.nd, op, [mx.nd.array(x_np), axis, size])
mod, _ = relay.frontend.from_mxnet(mx_sym, {"x": shape})
for target, dev in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
op_res = relay.create_executor(
kind, mod=mod, device=dev, target=target
).evaluate()(x_np)
tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy())
verify((1, 2, 1), 2, 3)
verify((1, 2, 1), (0, 2), (2, 3))
@tvm.testing.uses_gpu
def test_forward_broadcast_to():
def verify(input_shape, shape):
x_np = np.random.uniform(size=input_shape).astype("float32")
ref_res = mx.nd.broadcast_to(mx.nd.array(x_np), shape=shape)
mx_sym = mx.sym.broadcast_to(mx.sym.var("x"), shape=shape)
mod, _ = relay.frontend.from_mxnet(mx_sym, {"x": input_shape})
for target, dev in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()(
x_np
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy())
verify((1, 2, 3), (3, 2, 3))
verify((4, 1, 32, 32), (4, 8, 32, 32))
@tvm.testing.uses_gpu
def test_forward_broadcast_like():
def verify(input_shape, like_shape):
x_np = np.random.uniform(size=input_shape).astype("float32")
y_np = np.random.uniform(size=like_shape).astype("float32")
ref_res = mx.nd.broadcast_like(mx.nd.array(x_np), mx.nd.array(y_np))
mx_sym = mx.sym.broadcast_like(mx.sym.var("x"), mx.sym.var("y"))
mod, _ = relay.frontend.from_mxnet(mx_sym, {"x": input_shape, "y": like_shape})
for target, dev in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()(
x_np, y_np
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy())
verify((1, 2, 3), (3, 2, 3))
verify((4, 1, 32, 32), (4, 8, 32, 32))
@tvm.testing.uses_gpu
def test_forward_logical_not():
a_shape = (3, 4, 5)
dtype = "float32"
a_np = np.random.uniform(size=a_shape).astype(dtype)
mx_sym = mx.sym.logical_not(mx.sym.var("a"))
ref_res = mx.nd.logical_not(mx.nd.array(a_np))
shapes = {"a": a_shape}
mod, _ = relay.frontend.from_mxnet(mx_sym, shapes, dtype)
for target, dev in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()(
a_np
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy())
@tvm.testing.uses_gpu
def test_forward_full():
def verify(val, shape, dtype):
dev = mx.cpu()
ref_res = mx.nd.full(shape, val, dtype=dtype)
mx_sym = mx.sym.full(shape, val, dtype=dtype)
mod, _ = relay.frontend.from_mxnet(mx_sym, {})
for target, dev in tvm.testing.enabled_targets():
# Skip testing graph executor because this op will be optimized out
# by constant folding.
for kind in ["debug"]:
op_res = relay.create_executor(
kind, mod=mod, device=dev, target=target
).evaluate()()
tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy())
verify(2, (3, 4), "float32")
verify(2, (3, 4), "int32")
verify(3.5, (1, 3, 4), "float32")
@tvm.testing.uses_gpu
def test_forward_embedding():
def verify(data_shape, weight_shape):
in_dim, out_dim = weight_shape
x_np = np.random.randint(0, weight_shape[0], size=data_shape).astype("float32")
w_np = np.random.uniform(size=weight_shape).astype("float32")
ref_res = mx.nd.Embedding(
mx.nd.array(x_np), mx.nd.array(w_np), input_dim=in_dim, output_dim=out_dim
)
mx_sym = mx.sym.Embedding(
mx.sym.var("x"), mx.sym.var("w"), input_dim=in_dim, output_dim=out_dim
)
mod, _ = relay.frontend.from_mxnet(mx_sym, {"x": data_shape, "w": weight_shape})
for target, dev in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()(
x=x_np, w=w_np
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy())
verify((2, 2), (4, 5))
verify((2, 3, 4), (4, 5))
@tvm.testing.uses_gpu
def test_forward_smooth_l1():
data = mx.sym.var("data")
mx_sym = mx.sym.smooth_l1(data)
verify_mxnet_frontend_impl(mx_sym, (3, 4), (3, 4))
mx_sym = mx.sym.smooth_l1(data, scalar=1.0)
verify_mxnet_frontend_impl(mx_sym, (3, 4), (3, 4))
@tvm.testing.uses_gpu
def test_forward_take():
def verify(shape, indices_src, axis, mode="clip"):
x_np = np.random.uniform(size=shape).astype("float32")
indices_np = np.array(indices_src, dtype="float32")
ref_res = mx.nd.take(mx.nd.array(x_np), mx.nd.array(indices_np), axis, mode)
mx_sym = mx.sym.take(mx.sym.var("x"), mx.sym.var("y"), axis, mode)
mod, _ = relay.frontend.from_mxnet(mx_sym, {"x": shape, "y": indices_np.shape})
for target, dev in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()(
x_np, indices_np
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy())
verify((2, 2), [[[1, 0], [0, 1]]], 0)
verify((2, 2), [[[1, 0], [0, 1]]], 1)
verify((4, 3, 5, 6), [[2, 1, 0, 0]], -2)
verify((3, 4), [-1, 5], 0)
verify((3, 4), [-1, 5], 0, mode="wrap")
verify((3, 4), [-1, 5], 1)
verify((3, 4), [-1, 5], 1, mode="wrap")
@tvm.testing.uses_gpu
def test_forward_gather_nd():
def verify(xshape, yshape, y_data, error=False):
x_data = np.random.uniform(size=xshape).astype("float32")
ref_res = mx.nd.gather_nd(mx.nd.array(x_data), mx.nd.array(y_data))
mx_sym = mx.sym.gather_nd(mx.sym.var("x_data"), mx.sym.var("y_data"))
mod, _ = relay.frontend.from_mxnet(
mx_sym, {"x_data": xshape, "y_data": yshape}, {"x_data": "float32", "y_data": "int32"}
)
for target, dev in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()(
x_data, y_data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy())
verify((2, 2), (2, 3), [[1, 1, 0], [0, 1, 0]])
verify((2, 2, 2), (2, 2), [[0, 1], [1, 0]])
verify((3, 2, 2), (2, 2), [[0, 1], [1, 0]])
verify((3, 2), (2, 2, 3), [[[0, 1, 2], [2, 0, 1]], [[0, 0, 0], [1, 1, 1]]])
verify((1, 4), (1, 1), [[0]])
@tvm.testing.uses_gpu
def test_forward_bilinear_resize():
# add tests including scale_height and scale_width when mxnet is updated to version 1.5
data = mx.sym.var("data")
mx_sym = mx.sym.contrib.BilinearResize2D(data, height=5, width=10)
verify_mxnet_frontend_impl(mx_sym, (1, 2, 3, 4), (1, 2, 5, 10))
@tvm.testing.uses_gpu
def test_forward_grid_generator():
def verify(shape, transform_type, target_shape):
x = np.random.uniform(size=shape).astype("float32")
ref_res = mx.nd.GridGenerator(mx.nd.array(x), transform_type, target_shape)
mx_sym = mx.sym.GridGenerator(mx.sym.var("x"), transform_type, target_shape)
shape_dict = {"x": x.shape}
mod, _ = relay.frontend.from_mxnet(mx_sym, shape_dict)
for target, dev in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()(
x
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy(), rtol=1e-5, atol=1e-5)
verify((4, 6), "affine", (16, 32))
verify((4, 2, 16, 16), "warp", None)
verify((1, 2, 16, 16), "warp", None)
@tvm.testing.uses_gpu
def test_forward_bilinear_sampler():
def verify(data_shape, grid_shape):
data = np.random.uniform(size=data_shape).astype("float32")
grid = np.random.uniform(low=-1.5, high=1.5, size=grid_shape).astype("float32")
ref_res = mx.nd.BilinearSampler(mx.nd.array(data), mx.nd.array(grid))
mx_sym = mx.sym.BilinearSampler(mx.sym.var("data"), mx.sym.var("grid"))
shape_dict = {"data": data.shape, "grid": grid.shape}
mod, _ = relay.frontend.from_mxnet(mx_sym, shape_dict)
for target, dev in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()(
data, grid
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy(), rtol=1e-5, atol=1e-5)
verify((4, 4, 16, 32), (4, 2, 8, 8))
verify((4, 4, 16, 32), (4, 2, 32, 32))
@tvm.testing.uses_gpu
def test_forward_rnn_layer():
def verify(
mode,
seq_len,
input_size,
hidden_size,
num_layers,
batch=1,
init_states=True,
bidirectional=False,
):
if mode == "rnn":
layer = gluon.rnn.RNN(hidden_size, num_layers, bidirectional=bidirectional)
elif mode == "gru":
layer = gluon.rnn.GRU(hidden_size, num_layers, bidirectional=bidirectional)
else: # mode == "lstm"
layer = gluon.rnn.LSTM(hidden_size, num_layers, bidirectional=bidirectional)
num_states = 2 if mode == "lstm" else 1
layer.initialize()
layer.hybridize()
dtype = "float32"
directions = 2 if bidirectional else 1
data_np = np.random.uniform(size=(seq_len, batch, input_size)).astype(dtype)
data_mx = mx.nd.array(data_np)
if init_states:
shape_dict = {"data0": data_np.shape}
inputs = {"data0": data_np}
state_shape = (num_layers * directions, batch, hidden_size)
states_np = []
states_mx = []
for i in range(num_states):
s = np.random.uniform(size=state_shape).astype(dtype)
states_np.append(s)
states_mx.append(mx.nd.array(s))
shape_dict["data%s" % (i + 1)] = s.shape
inputs["data%s" % (i + 1)] = s
mx_out, mx_states = layer(data_mx, states_mx)
mx_res = [mx_out] + mx_states
else:
shape_dict = {"data": data_np.shape}
inputs = {"data": data_np}
mx_res = layer(data_mx)
mx_sym = layer._cached_graph[1]
mx_params = {}
for name, param in layer.collect_params().items():
mx_params[name] = param._reduce()
mod, params = relay.frontend.from_mxnet(mx_sym, shape=shape_dict, arg_params=mx_params)
for target, dev in tvm.testing.enabled_targets():
# only test graph executor because debug runtime is too slow
for kind in ["graph"]:
op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()(
**inputs, **params
)
if init_states:
assert len(op_res) == len(mx_res)
for i, val in enumerate(op_res):
tvm.testing.assert_allclose(val.numpy(), mx_res[i].asnumpy(), rtol=1e-3)
else:
tvm.testing.assert_allclose(op_res.numpy(), mx_res.asnumpy(), rtol=1e-3)
for mode in ["rnn", "gru", "lstm"]:
verify(mode, 1, 64, 64, 1)
verify(mode, 10, 64, 64, 2)
verify(mode, 10, 64, 32, 2)
verify(mode, 10, 64, 32, 2, batch=2)
verify(mode, 10, 32, 64, 1, bidirectional=True)
# The following two codeblocks need to be fixed for mxnet 1.5
# verify(mode, 10, 64, 64, 3, init_states=False)
# verify(mode, 10, 64, 64, 3, batch=2, bidirectional=True, init_states=False)
@tvm.testing.uses_gpu
def test_forward_Crop():
def verify(xshape, yshape, offset=None):
x_data = np.random.uniform(size=xshape).astype("float32")
y_data = np.random.uniform(size=yshape).astype("float32")
if offset is None:
mx_sym = mx.sym.Crop(mx.sym.var("x"), mx.sym.var("y"))
ref_res = mx.nd.Crop(mx.nd.array(x_data), mx.nd.array(y_data))
else:
mx_sym = mx.sym.Crop(mx.sym.var("x"), mx.sym.var("y"), offset=offset)
ref_res = mx.nd.Crop(mx.nd.array(x_data), mx.nd.array(y_data), offset=offset)
mod, _ = relay.frontend.from_mxnet(mx_sym, {"x": xshape, "y": yshape})
for target, dev in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
func = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()
if offset is None or offset == (0, 0):
op_res = func(x_data, y_data)
else:
op_res = func(x_data)
tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy())
verify((1, 3, 40, 40), (1, 3, 20, 20))
verify((1, 3, 40, 40), (1, 3, 20, 20), (0, 0))
verify((1, 3, 40, 40), (1, 3, 20, 20), (10, 10))
verify((5, 32, 40, 40), (5, 32, 25, 25))
verify((5, 32, 40, 40), (5, 32, 25, 25), (5, 5))
@tvm.testing.uses_gpu
def test_forward_argsort():
def verify(shape, axis, is_ascend, dtype="float32"):
x_np = np.random.uniform(size=shape).astype("float32")
ref_res = mx.nd.argsort(mx.nd.array(x_np), axis=axis, is_ascend=is_ascend, dtype=dtype)
mx_sym = mx.sym.argsort(mx.sym.var("x"), axis=axis, is_ascend=is_ascend, dtype=dtype)
mod, _ = relay.frontend.from_mxnet(mx_sym, {"x": shape})
for target, dev in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()(
x_np
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy())
verify((2, 3, 4), axis=0, is_ascend=False)
verify((1, 4, 6), axis=1, is_ascend=True)
verify((3, 5, 6), axis=-3, is_ascend=False, dtype="int32")
@tvm.testing.uses_gpu
def test_forward_topk():
def verify(shape, k, axis, ret_type, is_ascend=None, dtype="float32"):
x_np = np.random.uniform(size=shape).astype("float32")
if is_ascend is None:
ref_res = mx.nd.topk(mx.nd.array(x_np), k=k, axis=axis, ret_typ=ret_type, dtype=dtype)
mx_sym = mx.sym.topk(mx.sym.var("x"), k=k, axis=axis, ret_typ=ret_type, dtype=dtype)
else:
ref_res = mx.nd.topk(
mx.nd.array(x_np),
k=k,
axis=axis,
ret_typ=ret_type,
is_ascend=is_ascend,
dtype=dtype,
)
mx_sym = mx.sym.topk(
mx.sym.var("x"), k=k, axis=axis, ret_typ=ret_type, is_ascend=is_ascend, dtype=dtype
)
mod, _ = relay.frontend.from_mxnet(mx_sym, {"x": shape})
for target, dev in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()(
x_np
)
if isinstance(ref_res, list):
assert len(op_res) == len(ref_res)
for i, t in enumerate(op_res):
tvm.testing.assert_allclose(t.numpy(), ref_res[i].asnumpy())
else:
tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy())
verify((3, 4), k=1, axis=0, ret_type="both")
verify((3, 4), k=1, axis=-1, ret_type="indices")
verify((3, 5, 6), k=2, axis=2, ret_type="value", is_ascend=False)
verify((3, 5, 6), k=2, axis=1, ret_type="value", is_ascend=True)
verify((3, 5, 6), k=0, axis=2, ret_type="both", dtype="int32")
@tvm.testing.uses_gpu
def test_forward_sequence_mask():
def verify(shape, use_sequence_length, value, axis, dtype, itype):
data_np = np.random.uniform(size=shape).astype(dtype)
valid_length_np = np.random.randint(0, shape[axis], size=shape[1 - axis]).astype(itype)
if use_sequence_length:
ref_res = mx.nd.SequenceMask(
mx.nd.array(data_np, dtype=dtype),
sequence_length=mx.nd.array(valid_length_np, dtype=itype),
use_sequence_length=use_sequence_length,
value=value,
axis=axis,
)
mx_sym = mx.sym.SequenceMask(
mx.sym.var("data"),
sequence_length=mx.sym.var("valid_length"),
use_sequence_length=use_sequence_length,
value=value,
axis=axis,
)
mod, _ = relay.frontend.from_mxnet(
mx_sym,
{"data": shape, "valid_length": valid_length_np.shape},
dtype={"data": dtype, "valid_length": itype},
)
else:
ref_res = mx.nd.SequenceMask(
mx.nd.array(data_np, dtype=dtype),
use_sequence_length=use_sequence_length,
value=value,
axis=axis,
)
mx_sym = mx.sym.SequenceMask(
mx.sym.var("data"), use_sequence_length=use_sequence_length, value=value, axis=axis
)
mod, _ = relay.frontend.from_mxnet(mx_sym, {"data": shape}, dtype={"data": dtype})
for target, dev in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
if use_sequence_length is False and kind == "graph":
# Disable the test for 'graph' when it's identity.
continue
func = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()
if use_sequence_length:
op_res = func(data_np, valid_length_np)
else:
op_res = func(data_np)
tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy())
verify((5, 10), True, 0.0, 0, "float32", "float32")
verify((5, 4, 3), True, 1.0, 1, "float32", "float32")
verify((5, 4, 3), False, 1.0, 1, "float64", "float64")
verify((5, 4, 3, 2), True, 1.0, 0, "float32", "float32")
@tvm.testing.uses_gpu
def test_forward_contrib_div_sqrt_dim():
def verify(shape):
x_np = np.random.uniform(size=shape).astype("float32")
ref_res = mx.nd.contrib.div_sqrt_dim(mx.nd.array(x_np))
mx_sym = mx.sym.contrib.div_sqrt_dim(mx.sym.var("x"))
mod, _ = relay.frontend.from_mxnet(mx_sym, {"x": shape})
for target, dev in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()(
x_np
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy())
verify((3, 4))
verify((3, 4, 5))
@tvm.testing.uses_gpu
def test_forward_batch_norm():
def verify(shape, axis=1, fix_gamma=False):
x = np.random.uniform(size=shape).astype("float32")
gamma = np.random.uniform(size=(shape[axis])).astype("float32")
beta = np.random.uniform(size=(shape[axis])).astype("float32")
moving_mean = np.random.uniform(size=(shape[axis])).astype("float32")
moving_var = np.abs(np.random.uniform(size=(shape[axis])).astype("float32")) + 0.5
ref_res = mx.nd.BatchNorm(
mx.nd.array(x),
mx.nd.array(gamma),
mx.nd.array(beta),
mx.nd.array(moving_mean),
mx.nd.array(moving_var),
axis=axis,
use_global_stats=True,
fix_gamma=fix_gamma,
)
mx_sym = mx.sym.BatchNorm(
mx.sym.var("x"),
mx.sym.var("gamma"),
mx.sym.var("beta"),
mx.sym.var("mean"),
mx.sym.var("var"),
axis=axis,
use_global_stats=True,
fix_gamma=fix_gamma,
)
shape_dict = {
"x": x.shape,
"gamma": gamma.shape,
"beta": beta.shape,
"mean": moving_mean.shape,
"var": moving_var.shape,
}
mod, _ = relay.frontend.from_mxnet(mx_sym, shape_dict)
# print(mod)
for target, dev in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()(
x, gamma, beta, moving_mean, moving_var
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy(), rtol=1e-3)
verify((2, 3, 4, 5))
verify((2, 3, 4, 5), axis=0)
verify((2, 3, 4, 5), axis=-1)
verify((2, 3, 4, 5), fix_gamma=True)
@tvm.testing.uses_gpu
def test_forward_instance_norm():
def verify(shape, axis=1, epsilon=1e-5):
x = np.random.uniform(size=shape).astype("float32")
gamma = np.random.uniform(size=(shape[axis])).astype("float32")
beta = np.random.uniform(size=(shape[axis])).astype("float32")
ref_res = mx.nd.InstanceNorm(mx.nd.array(x), mx.nd.array(gamma), mx.nd.array(beta), epsilon)
mx_sym = mx.sym.InstanceNorm(
mx.sym.var("x"), mx.sym.var("gamma"), mx.sym.var("beta"), epsilon
)
shape_dict = {"x": x.shape, "gamma": gamma.shape, "beta": beta.shape}
mod, _ = relay.frontend.from_mxnet(mx_sym, shape_dict)
for target, dev in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()(
x, gamma, beta
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy(), rtol=2e-5, atol=1e-5)
verify((2, 3, 4, 5))
verify((32, 64, 80, 64))
verify((8, 6, 5))
verify((8, 7, 6, 5, 4))
@tvm.testing.uses_gpu
def test_forward_layer_norm():
def verify(shape, axis=-1):
x = np.random.uniform(size=shape).astype("float32")
gamma = np.random.uniform(size=(shape[axis])).astype("float32")
beta = np.random.uniform(size=(shape[axis])).astype("float32")
ref_res = mx.nd.LayerNorm(mx.nd.array(x), mx.nd.array(gamma), mx.nd.array(beta), axis=axis)
mx_sym = mx.sym.LayerNorm(
mx.sym.var("x"), mx.sym.var("gamma"), mx.sym.var("beta"), axis=axis
)
shape_dict = {"x": x.shape, "gamma": gamma.shape, "beta": beta.shape}
mod, _ = relay.frontend.from_mxnet(mx_sym, shape_dict)
for target, dev in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()(
x, gamma, beta
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy(), rtol=1e-3, atol=1e-5)
verify((2, 5))
verify((2, 5), axis=0)
verify((2, 5, 6))
@tvm.testing.uses_gpu
def test_forward_group_norm():
def verify(shape, num_groups=1):
x = np.random.uniform(size=shape).astype("float32")
gamma = np.random.uniform(size=(shape[1])).astype("float32")
beta = np.random.uniform(size=(shape[1])).astype("float32")
ref_res = mx.nd.GroupNorm(
data=mx.nd.array(x),
gamma=mx.nd.array(gamma),
beta=mx.nd.array(beta),
num_groups=num_groups,
)
mx_sym = mx.sym.GroupNorm(
mx.sym.var("x"), mx.sym.var("gamma"), mx.sym.var("beta"), num_groups=num_groups
)
shape_dict = {"x": x.shape, "gamma": gamma.shape, "beta": beta.shape}
mod, _ = relay.frontend.from_mxnet(mx_sym, shape_dict)
for target, dev in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()(
x, gamma, beta
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy(), rtol=1e-3, atol=1e-5)
verify((1, 4, 2), num_groups=4)
# TODO(trevmorr): MXNet GroupNorm implementation is bugged for cases when num_groups != num_channels
# https://github.com/apache/incubator-mxnet/pull/18199
# verify((1, 4, 2, 3), num_groups=2)
# verify((1, 4, 2, 3))
@tvm.testing.uses_gpu
def test_forward_one_hot():
def verify(indices_shape, depth, on_value, off_value, dtype):
x = np.random.randint(0, 5, size=indices_shape)
ref_res = mx.nd.one_hot(mx.nd.array(x), depth, on_value, off_value, dtype)
mx_sym = mx.sym.one_hot(mx.sym.var("x"), depth, on_value, off_value, dtype)
shape_dict = {"x": x.shape}
mod, _ = relay.frontend.from_mxnet(mx_sym, shape_dict)
for target, dev in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()(
x.astype("float32")
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy(), rtol=1e-3, atol=1e-5)
verify((3,), 3, 1, 0, "int32")
verify((3,), 3, 1.0, 0.0, "float32")
verify((2, 2), 5, 2, -2, "int32")
verify((2, 2), 5, 0.5, -0.5, "float32")
verify((3, 2, 4, 5), 6, 1, 0, "int32")
verify((3, 2, 4, 5), 6, 1.0, 0.0, "float32")
@tvm.testing.uses_gpu
def test_forward_pad():
def verify(data_shape, out_shape, mode, pad_width, constant_value=0.0):
data = mx.sym.var("data")
mx_sym = mx.sym.pad(data, mode=mode, pad_width=pad_width, constant_value=constant_value)
verify_mxnet_frontend_impl(mx_sym, data_shape=data_shape, out_shape=out_shape)
verify(
data_shape=(1, 1, 3, 5),
out_shape=(1, 1, 6, 12),
mode="constant",
pad_width=(0, 0, 0, 0, 1, 2, 3, 4),
)
verify(
data_shape=(1, 1, 3, 5),
out_shape=(1, 1, 6, 12),
mode="constant",
pad_width=(0, 0, 0, 0, 1, 2, 3, 4),
constant_value=3.0,
)
verify(
data_shape=(1, 1, 3, 5),
out_shape=(1, 1, 6, 12),
mode="edge",
pad_width=(0, 0, 0, 0, 1, 2, 3, 4),
)
verify(
data_shape=(1, 1, 3, 5),
out_shape=(1, 1, 6, 12),
mode="reflect",
pad_width=(0, 0, 0, 0, 1, 2, 3, 4),
)
verify(
data_shape=(1, 1, 3, 5, 7),
out_shape=(1, 1, 6, 12, 18),
mode="constant",
pad_width=(0, 0, 0, 0, 1, 2, 3, 4, 5, 6),
)
verify(
data_shape=(1, 1, 3, 5, 7),
out_shape=(1, 1, 6, 12, 18),
mode="constant",
pad_width=(0, 0, 0, 0, 1, 2, 3, 4, 5, 6),
constant_value=3.0,
)
verify(
data_shape=(1, 1, 3, 5, 7),
out_shape=(1, 1, 6, 12, 18),
mode="edge",
pad_width=(0, 0, 0, 0, 1, 2, 3, 4, 5, 6),
)
verify(
data_shape=(1, 1, 3, 5, 7),
out_shape=(1, 1, 6, 12, 18),
mode="reflect",
pad_width=(0, 0, 0, 0, 1, 2, 3, 4, 5, 6),
)
@tvm.testing.uses_gpu
def test_forward_slice():
def verify(data_shape, out_shape, begin, end):
data = mx.sym.var("data")
mx_sym = mx.sym.slice(data, begin=begin, end=end)
verify_mxnet_frontend_impl(mx_sym, data_shape=data_shape, out_shape=out_shape)
verify(data_shape=(1, 1, 10), out_shape=(1, 1, 8), begin=(0, 0, 2), end=(1, 1, 10))
verify(
data_shape=(1, 1, 10), out_shape=(1, 1, 8), begin=(None, None, 2), end=(None, None, None)
)
@tvm.testing.uses_gpu
def test_forward_convolution():
def verify(data_shape, kernel_size, stride, pad, num_filter, is_depthwise=False):
if is_depthwise:
groups = data_shape[1]
weight_shape = (
data_shape[1],
num_filter // groups,
) + kernel_size
else:
groups = 1
weight_shape = (
num_filter,
data_shape[1],
) + kernel_size
x = np.random.uniform(size=data_shape).astype("float32")
weight = np.random.uniform(size=weight_shape).astype("float32")
bias = np.random.uniform(size=num_filter).astype("float32")
ref_res = mx.nd.Convolution(
data=mx.nd.array(x),
weight=mx.nd.array(weight),
bias=mx.nd.array(bias),
kernel=kernel_size,
stride=stride,
pad=pad,
num_filter=num_filter,
num_group=groups,
)
mx_sym = mx.sym.Convolution(
mx.sym.var("x"),
mx.sym.var("weight"),
mx.sym.var("bias"),
kernel=kernel_size,
stride=stride,
pad=pad,
num_filter=num_filter,
num_group=groups,
)
shape_dict = {"x": x.shape, "weight": weight.shape, "bias": bias.shape}
mod, _ = relay.frontend.from_mxnet(mx_sym, shape_dict)
for target, dev in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()(
x, weight, bias
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy(), rtol=1e-3)
verify(data_shape=(1, 1, 1024 * 16), kernel_size=(17,), stride=(2,), pad=(8,), num_filter=4)
verify(data_shape=(20, 1, 1024 * 16), kernel_size=(17,), stride=(2,), pad=(8,), num_filter=4)
verify(data_shape=(1, 8, 1024 * 16), kernel_size=(17,), stride=(2,), pad=(8,), num_filter=4)
verify(data_shape=(20, 8, 1024 * 16), kernel_size=(17,), stride=(2,), pad=(8,), num_filter=4)
verify(data_shape=(1, 1, 32, 32), kernel_size=(3, 3), stride=(1, 1), pad=(1, 1), num_filter=2)
verify(data_shape=(20, 1, 32, 32), kernel_size=(3, 3), stride=(1, 1), pad=(1, 1), num_filter=2)
verify(data_shape=(1, 8, 32, 32), kernel_size=(3, 3), stride=(1, 1), pad=(1, 1), num_filter=2)
verify(data_shape=(20, 8, 32, 32), kernel_size=(3, 3), stride=(1, 1), pad=(1, 1), num_filter=2)
verify(
data_shape=(1, 8, 32, 32),
kernel_size=(3, 3),
stride=(1, 1),
pad=(1, 1),
num_filter=8,
is_depthwise=True,
)
verify(
data_shape=(1, 1, 16, 16, 16),
kernel_size=(3, 3, 3),
stride=(1, 1, 1),
pad=(1, 1, 1),
num_filter=2,
)
verify(
data_shape=(20, 1, 16, 16, 16),
kernel_size=(3, 3, 3),
stride=(1, 1, 1),
pad=(1, 1, 1),
num_filter=2,
)
verify(
data_shape=(1, 8, 16, 16, 16),
kernel_size=(3, 3, 3),
stride=(2, 2, 2),
pad=(1, 1, 1),
num_filter=2,
)
verify(
data_shape=(20, 8, 16, 16, 16),
kernel_size=(3, 3, 3),
stride=(1, 1, 1),
pad=(1, 1, 1),
num_filter=2,
)
@tvm.testing.uses_gpu
def test_forward_deconvolution():
def verify(data_shape, kernel_size, stride, pad, num_filter):
weight_shape = (data_shape[1], num_filter) + kernel_size
x = np.random.uniform(size=data_shape).astype("float32")
weight = np.random.uniform(size=weight_shape).astype("float32")
bias = np.random.uniform(size=num_filter).astype("float32")
ref_res = mx.nd.Deconvolution(
data=mx.nd.array(x),
weight=mx.nd.array(weight),
bias=mx.nd.array(bias),
kernel=kernel_size,
stride=stride,
pad=pad,
num_filter=num_filter,
no_bias=False,
)
mx_sym = mx.sym.Deconvolution(
mx.sym.var("x"),
mx.sym.var("weight"),
mx.sym.var("bias"),
kernel=kernel_size,
stride=stride,
pad=pad,
num_filter=num_filter,
no_bias=False,
)
shape_dict = {"x": x.shape, "weight": weight.shape, "bias": bias.shape}
mod, _ = relay.frontend.from_mxnet(mx_sym, shape_dict)
for target, dev in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()(
x, weight, bias
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy(), rtol=1e-3, atol=1e-5)
verify(data_shape=(1, 1, 1024 * 16), kernel_size=(17,), stride=(2,), pad=(8,), num_filter=4)
verify(data_shape=(20, 1, 1024 * 16), kernel_size=(17,), stride=(2,), pad=(8,), num_filter=4)
verify(data_shape=(1, 8, 1024 * 16), kernel_size=(17,), stride=(2,), pad=(8,), num_filter=4)
verify(data_shape=(20, 8, 1024 * 16), kernel_size=(17,), stride=(2,), pad=(8,), num_filter=4)
verify(data_shape=(1, 1, 32, 32), kernel_size=(3, 3), stride=(1, 1), pad=(1, 1), num_filter=2)
verify(data_shape=(20, 1, 32, 32), kernel_size=(3, 3), stride=(1, 1), pad=(1, 1), num_filter=2)
verify(data_shape=(1, 8, 32, 32), kernel_size=(3, 3), stride=(1, 1), pad=(1, 1), num_filter=2)
verify(data_shape=(20, 8, 32, 32), kernel_size=(3, 3), stride=(1, 1), pad=(1, 1), num_filter=2)
@tvm.testing.uses_gpu
def test_forward_cond():
def verify(a_np, b_np):
a_nd, b_nd = mx.nd.array(a_np), mx.nd.array(b_np)
pred = a_nd * b_nd < 5
then_func = lambda: (a_nd + 5) * (b_nd + 5)
else_func = lambda: (a_nd - 5) * (b_nd - 5)
ref_res = mx.nd.contrib.cond(pred, then_func, else_func)
a_sym, b_sym = mx.sym.var("a"), mx.sym.var("b")
pred = a_sym * b_sym < 5
then_func = lambda: (a_sym + 5) * (b_sym + 5)
else_func = lambda: (a_sym - 5) * (b_sym - 5)
mx_sym = mx.sym.contrib.cond(pred, then_func, else_func)
shape_dict = {"a": a_np.shape, "b": b_np.shape}
mod, _ = relay.frontend.from_mxnet(mx_sym, shape_dict)
for target, dev in tvm.testing.enabled_targets():
for kind in ["debug", "vm"]:
op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()(
a_np, b_np
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy(), rtol=1e-3)
verify(np.asarray([1.0], "float32"), np.asarray([2.0], "float32"))
verify(np.asarray([4.0], "float32"), np.asarray([3.0], "float32"))
@tvm.testing.uses_gpu
def test_forward_amp_cast():
def verify(from_dtype, to_dtype):
from_np = np.random.uniform(size=(1, 3, 18)).astype(from_dtype)
x_var = mx.sym.var("x", dtype=from_dtype)
mx_sym = mx.sym.amp_cast(x_var, dtype=to_dtype)
shape_dict = {"x": (1, 3, 18)}
dtype_dict = {"x": from_dtype}
mod, _ = relay.frontend.from_mxnet(mx_sym, shape_dict, dtype_dict)
for target, dev in tvm.testing.enabled_targets():
for kind in ["graph", "vm", "debug"]:
op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()(
from_np
)
assert op_res.dtype == to_dtype, op_res.dtype
tvm.testing.assert_allclose(op_res.numpy(), from_np.astype(to_dtype))
verify("float32", "float16")
verify("float16", "float32")
@tvm.testing.uses_gpu
def test_forward_amp_multicast():
def verify(dtypes, cast_narrow, expected_dtype):
x_nps = [np.random.uniform(size=(1, 3, 18)).astype(dtype) for dtype in dtypes]
x_vars = [mx.sym.var(str(i), dtype=dtype) for i, dtype in enumerate(dtypes)]
mx_sym = mx.sym.amp_multicast(*x_vars, cast_narrow=cast_narrow, num_outputs=len(dtypes))
shape_dict = {}
dtype_dict = {}
for i, dtype in enumerate(dtypes):
shape_dict[str(i)] = (1, 3, 18)
dtype_dict[str(i)] = dtype
mod, _ = relay.frontend.from_mxnet(mx_sym, shape_dict, dtype_dict)
for target, dev in tvm.testing.enabled_targets():
for kind in ["graph", "vm", "debug"]:
op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()(
*x_nps
)
for i, res in enumerate(op_res):
assert res.dtype == expected_dtype, res.dtype
tvm.testing.assert_allclose(res.numpy(), x_nps[i].astype(expected_dtype))
verify(["float32", "float16"], False, "float32")
verify(["float32", "float16"], True, "float16")
verify(["float32", "float32"], False, "float32")
verify(["float32", "float32"], True, "float32")
verify(["float16", "float16"], False, "float16")
verify(["float16", "float16"], True, "float16")
@tvm.testing.uses_gpu
def test_forward_unravel_index():
def verify(x, shape, dtype):
a_np = np.array(x).astype(dtype)
mx_sym = _mx_symbol(mx.sym, "unravel_index", [mx.sym.var("a"), shape])
ref_res = _mx_symbol(mx.nd, "unravel_index", [mx.nd.array(a_np), shape])
shapes = {"a": a_np.shape}
mod, _ = relay.frontend.from_mxnet(mx_sym, shapes, dtype)
for target, dev in tvm.testing.enabled_targets():
for kind in ["graph", "vm", "debug"]:
op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()(
a_np
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy())
for dtype in ["int32", "int64"]:
verify([0, 1, 2, 3], [2, 2], dtype)
verify([144, 13, 45], [6, 7, 10, 2], dtype)
verify([456], [6, 7, 10, 2], dtype)
# In below example, 5 is out of bound for array of size 4.
# MXNet implementation provides different result than TVM
# TVM implementation is inline with Tensorflow
# Ideally error should be thrown just like Numpy
# verify([0, 1, 2, 5], [2, 2], dtype)
@tvm.testing.uses_gpu
def test_forward_swap_axis():
def _verify_swap_axis(in_shape, out_shape, dim1, dim2):
data = mx.sym.var("data")
mx_sym = mx.sym.swapaxes(data, dim1, dim2)
verify_mxnet_frontend_impl(mx_sym, in_shape, out_shape)
_verify_swap_axis((4, 5), (5, 4), 0, 1)
_verify_swap_axis((2, 4, 4, 5), (2, 5, 4, 4), 1, 3)
# MXNet errors out when dim1 == dim2
# _verify_swap_axis((4, 5), (5, 4), 0, 0)
@tvm.testing.uses_gpu
def test_forward_depth_to_space():
def verify(shape, blocksize=2):
x = np.random.uniform(size=shape).astype("float32")
ref_res = mx.nd.depth_to_space(mx.nd.array(x), blocksize)
mx_sym = mx.sym.depth_to_space(mx.sym.var("x"), blocksize)
shape_dict = {
"x": x.shape,
}
mod, _ = relay.frontend.from_mxnet(mx_sym, shape_dict)
for target, dev in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()(
x
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy(), rtol=1e-3, atol=1e-5)
verify((1, 18, 3, 3), 3)
@tvm.testing.uses_gpu
def test_forward_space_to_depth():
def verify(shape, blocksize=2):
x = np.random.uniform(size=shape).astype("float32")
ref_res = mx.nd.space_to_depth(mx.nd.array(x), blocksize)
mx_sym = mx.sym.space_to_depth(mx.sym.var("x"), blocksize)
shape_dict = {
"x": x.shape,
}
mod, _ = relay.frontend.from_mxnet(mx_sym, shape_dict)
for target, dev in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()(
x
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy(), rtol=1e-3, atol=1e-5)
verify((1, 1, 9, 9), 3)
@tvm.testing.uses_gpu
def test_forward_correlation():
def verify(data_shape, kernel_size, max_displacement, stride1, stride2, pad_size, is_multiply):
data1 = np.random.uniform(size=data_shape).astype("float32")
data2 = np.random.uniform(size=data_shape).astype("float32")
ref_res = mx.nd.Correlation(
data1=mx.nd.array(data1),
data2=mx.nd.array(data2),
kernel_size=kernel_size,
max_displacement=max_displacement,
stride1=stride1,
stride2=stride2,
pad_size=pad_size,
is_multiply=is_multiply,
)
mx_sym = mx.sym.Correlation(
data1=mx.sym.var("data1"),
data2=mx.sym.var("data2"),
kernel_size=kernel_size,
max_displacement=max_displacement,
stride1=stride1,
stride2=stride2,
pad_size=pad_size,
is_multiply=is_multiply,
)
shape_dict = {"data1": data1.shape, "data2": data2.shape}
mod, _ = relay.frontend.from_mxnet(mx_sym, shape_dict)
for target, dev in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()(
data1, data2
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy(), rtol=1e-3, atol=1e-5)
verify(
(1, 3, 10, 10),
kernel_size=1,
max_displacement=4,
stride1=1,
stride2=1,
pad_size=4,
is_multiply=False,
)
verify(
(5, 1, 15, 15),
kernel_size=1,
max_displacement=5,
stride1=1,
stride2=1,
pad_size=5,
is_multiply=False,
)
verify(
(5, 1, 15, 15),
kernel_size=1,
max_displacement=5,
stride1=1,
stride2=1,
pad_size=5,
is_multiply=True,
)
verify(
(5, 1, 15, 15),
kernel_size=1,
max_displacement=10,
stride1=1,
stride2=2,
pad_size=10,
is_multiply=True,
)
verify(
(5, 1, 4, 4),
kernel_size=3,
max_displacement=1,
stride1=1,
stride2=1,
pad_size=2,
is_multiply=True,
)
verify(
(5, 1, 4, 4),
kernel_size=3,
max_displacement=1,
stride1=2,
stride2=1,
pad_size=2,
is_multiply=True,
)
verify(
(5, 1, 4, 4),
kernel_size=3,
max_displacement=1,
stride1=2,
stride2=1,
pad_size=2,
is_multiply=False,
)
verify(
(5, 1, 6, 4),
kernel_size=3,
max_displacement=1,
stride1=2,
stride2=1,
pad_size=2,
is_multiply=False,
)
verify(
(5, 1, 11, 11),
kernel_size=5,
max_displacement=1,
stride1=1,
stride2=1,
pad_size=2,
is_multiply=False,
)
@tvm.testing.uses_gpu
def test_forward_arange_like():
def verify(data_shape, start=None, step=None, axis=None):
attrs = {}
if start is not None:
attrs["start"] = start
if step is not None:
attrs["step"] = step
if axis is not None:
attrs["axis"] = axis
data = mx.sym.var("data")
data_np = np.random.uniform(size=data_shape).astype("float32")
ref_res = mx.nd.contrib.arange_like(mx.nd.array(data_np), **attrs)
mx_sym = mx.sym.contrib.arange_like(data, **attrs)
mod, _ = relay.frontend.from_mxnet(mx_sym, {"data": data_shape})
for target, dev in tvm.testing.enabled_targets():
for kind in ["graph"]:
op_res = relay.create_executor(
kind, mod=mod, device=dev, target=target
).evaluate()()
tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy())
verify(data_shape=(3,), start=0.0, step=1.0)
verify(data_shape=(3, 4, 5), start=0.0, step=1.0)
verify(data_shape=(3, 4, 5), start=0.0, step=1.0, axis=-1)
verify(data_shape=(3, 4, 5), start=2.0, step=3.0, axis=1)
@tvm.testing.uses_gpu
def test_forward_interleaved_matmul_selfatt_qk():
def verify(batch, seq_length, num_heads, head_dim):
data_shape = (seq_length, batch, num_heads * head_dim * 3)
data = mx.sym.var("data")
data_np = np.random.uniform(size=data_shape).astype("float32")
ref_res = mx.nd.contrib.interleaved_matmul_selfatt_qk(mx.nd.array(data_np), heads=num_heads)
mx_sym = mx.sym.contrib.interleaved_matmul_selfatt_qk(data, heads=num_heads)
mod, _ = relay.frontend.from_mxnet(mx_sym, {"data": data_shape})
for target, dev in tvm.testing.enabled_targets():
for kind in ["graph"]:
op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()(
data_np
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy(), rtol=1e-5)
verify(1, 10, 3, 16)
verify(3, 10, 6, 8)
@tvm.testing.uses_gpu
def test_forward_interleaved_matmul_selfatt_valatt():
def verify(batch, seq_length, num_heads, head_dim):
data_shape = (seq_length, batch, num_heads * head_dim * 3)
weight_shape = (batch * num_heads, seq_length, seq_length)
data = mx.sym.var("data")
weight = mx.sym.var("weight")
data_np = np.random.uniform(size=data_shape).astype("float32")
weight_np = np.random.uniform(size=weight_shape).astype("float32")
ref_res = mx.nd.contrib.interleaved_matmul_selfatt_valatt(
mx.nd.array(data_np), mx.nd.array(weight_np), heads=num_heads
)
mx_sym = mx.sym.contrib.interleaved_matmul_selfatt_valatt(data, weight, heads=num_heads)
mod, _ = relay.frontend.from_mxnet(mx_sym, {"data": data_shape, "weight": weight_shape})
for target, dev in tvm.testing.enabled_targets():
for kind in ["graph"]:
op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()(
data=data_np, weight=weight_np
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy(), rtol=1e-5)
verify(1, 10, 4, 16)
verify(3, 10, 6, 8)
@tvm.testing.uses_gpu
def test_forward_box_nms():
def verify(
data_shape,
overlap_thresh=0.5,
valid_thresh=0,
topk=1,
coord_start=2,
score_index=1,
id_index=0,
force_suppress=False,
in_format="corner",
):
dtype = "float32"
data = np.random.uniform(low=0, high=1, size=data_shape).astype(dtype)
ref_res = mx.nd.contrib.box_nms(
mx.nd.array(data),
overlap_thresh=overlap_thresh,
valid_thresh=valid_thresh,
topk=topk,
coord_start=coord_start,
score_index=score_index,
id_index=id_index,
force_suppress=force_suppress,
background_id=-1,
in_format=in_format,
out_format=in_format,
)
mx_sym = mx.sym.contrib.box_nms(
mx.sym.var("data"),
overlap_thresh=overlap_thresh,
valid_thresh=valid_thresh,
topk=topk,
coord_start=coord_start,
score_index=score_index,
id_index=id_index,
force_suppress=force_suppress,
background_id=-1,
in_format=in_format,
out_format=in_format,
)
shape_dict = {"data": data_shape}
mod, _ = relay.frontend.from_mxnet(mx_sym, shape_dict)
for target, dev in tvm.testing.enabled_targets():
if tvm.contrib.thrust.can_use_thrust(
tvm.target.Target(target + " -libs=thrust"), "tvm.contrib.thrust.sort"
):
target += " -libs=thrust"
for kind in ["graph", "debug"]:
op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()(
data
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy(), rtol=1e-3, atol=1e-5)
verify((1, 10, 6))
# No valid boxes
verify((1, 10, 6), valid_thresh=1)
@tvm.testing.uses_gpu
def test_forward_box_decode():
def verify(data_shape, anchor_shape, stds=[1, 1, 1, 1], clip=-1, in_format="corner"):
dtype = "float32"
data = np.random.uniform(low=-2, high=2, size=data_shape).astype(dtype)
anchors = np.random.uniform(low=-2, high=2, size=anchor_shape).astype(dtype)
ref_res = mx.nd.contrib.box_decode(
mx.nd.array(data),
mx.nd.array(anchors),
stds[0],
stds[1],
stds[2],
stds[3],
clip,
in_format,
)
mx_sym = mx.sym.contrib.box_decode(
mx.sym.var("data"),
mx.sym.var("anchors"),
stds[0],
stds[1],
stds[2],
stds[3],
clip,
in_format,
)
shape_dict = {"data": data_shape, "anchors": anchor_shape}
mod, _ = relay.frontend.from_mxnet(mx_sym, shape_dict)
for target, dev in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()(
data, anchors
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy(), rtol=1e-3, atol=1e-5)
verify((1, 10, 4), (1, 10, 4))
verify((4, 10, 4), (1, 10, 4))
verify((1, 10, 4), (1, 10, 4), stds=[2, 3, 0.5, 1.5])
verify((1, 10, 4), (1, 10, 4), clip=1)
verify((1, 10, 4), (1, 10, 4), in_format="center")
@tvm.testing.uses_gpu
def test_forward_softmax():
def verify(data_shape, axis, use_length, length):
dtype = "float32"
x = np.random.uniform(low=-100, high=100, size=data_shape).astype(dtype)
if use_length:
ref_res = mx.nd.softmax(
data=mx.nd.array(x),
length=mx.nd.array(length, dtype="int32"),
axis=axis,
use_length=use_length,
)
mx_sym = mx.symbol.softmax(
data=mx.sym.var("data"),
length=mx.sym.var("length"),
axis=axis,
use_length=use_length,
)
shape_dict = {"data": data_shape, "length": (length.shape)}
dtype_dict = {"data": dtype, "length": "int32"}
mod, _ = relay.frontend.from_mxnet(mx_sym, shape_dict, dtype_dict)
else:
ref_res = mx.nd.softmax(data=mx.nd.array(x), axis=axis)
mx_sym = mx.symbol.softmax(data=mx.sym.var("data"), axis=axis)
shape_dict = {"data": data_shape}
mod, _ = relay.frontend.from_mxnet(mx_sym, shape_dict)
for target, dev in tvm.testing.enabled_targets():
for kind in ["graph", "debug"]:
func = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()
if use_length:
op_res = func(x, length)
else:
op_res = func(x)
tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy(), rtol=1e-3, atol=1e-5)
verify((2, 3, 5), -1, False, None)
verify((2, 3, 5), 2, False, None)
verify((2, 3), -1, True, np.array([2, 1]).astype("int32"))
verify((2, 3, 4), -1, True, np.array([[3, 4, 2], [2, 1, 1]]).astype("int32"))
verify((2, 3, 4), 2, True, np.array([[3, 4, 2], [1, 2, 1]]).astype("int32"))
@pytest.mark.skipif(not hasattr(mx.sym.np, "pad"), reason="mx.sym.np.pad hasn't been publish yet")
@pytest.mark.parametrize(
"data_shape, pad_width",
[
((1, 1, 3, 5), ((0, 0), (0, 0), (1, 2), (3, 4))),
((1, 1, 3, 5, 7), ((0, 0), (0, 0), (1, 2), (3, 4), (5, 6))),
],
)
@pytest.mark.parametrize("mode", ["constant", "edge", "reflect"])
@pytest.mark.parametrize("dtype", ["float64", "float32", "int64", "int32"])
@pytest.mark.parametrize("constant_value", [0.0, 3.0])
@tvm.testing.parametrize_targets
@pytest.mark.parametrize("kind", ["graph", "vm", "debug"])
def test_forward_npi_pad(data_shape, pad_width, mode, dtype, constant_value, target, dev, kind):
data_np = np.random.uniform(size=data_shape).astype(dtype)
data = mx.sym.var("data")
if mode == "constant":
ref_res = np.pad(data_np, mode=mode, pad_width=pad_width, constant_values=constant_value)
mx_sym = mx.sym.np.pad(
data.as_np_ndarray(), mode=mode, pad_width=pad_width, constant_values=constant_value
)
else:
ref_res = np.pad(data_np, mode=mode, pad_width=pad_width)
mx_sym = mx.sym.np.pad(data.as_np_ndarray(), mode=mode, pad_width=pad_width)
mod, _ = relay.frontend.from_mxnet(mx_sym, {"data": data_shape}, dtype=dtype)
op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()(data_np)
tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5)
@pytest.mark.skipif(
not hasattr(mx.sym.np, "pad"), reason="test'll abort with Mxnet 1.x, skip for now"
)
@pytest.mark.parametrize("data_shape", [(2, 2, 2), (2, 7, 2)])
@pytest.mark.parametrize("dtype", ["float64", "float32", "int64", "int32", "bool"])
@pytest.mark.parametrize("axes", [(1, 0, 2), None])
@tvm.testing.parametrize_targets
@pytest.mark.parametrize("kind", ["graph", "vm", "debug"])
def test_forward_npi_transpose(data_shape, axes, dtype, target, dev, kind):
data_np = np.random.uniform(size=data_shape).astype(dtype)
data = mx.sym.var("data")
ref_res = mx.np.transpose(mx.np.array(data_np), axes=axes)
mx_sym = mx.sym.np.transpose(data.as_np_ndarray(), axes=axes)
mod, _ = relay.frontend.from_mxnet(mx_sym, {"data": data_shape}, dtype=dtype)
op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()(data_np)
tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy(), rtol=1e-5)
@pytest.mark.parametrize(
"data_shape1, data_shape2, axis",
[
((2, 2), (2, 2), 1),
((2, 4), (2, 3), 1),
((1, 3, 2), (1, 3, 5), 2),
((1, 3, 3), (1, 3, 3), 1),
((1, 3), (1, 3), 0),
],
)
@pytest.mark.parametrize("dtype", ["float64", "float32", "int64", "int32"])
@tvm.testing.parametrize_targets
@pytest.mark.parametrize("kind", ["graph", "vm", "debug"])
def test_forward_npi_concatenate(data_shape1, data_shape2, axis, dtype, target, dev, kind):
data_np1 = np.random.uniform(size=data_shape1).astype(dtype)
data_np2 = np.random.uniform(size=data_shape2).astype(dtype)
data1 = mx.sym.var("data1")
data2 = mx.sym.var("data2")
ref_res = mx.np.concatenate([mx.np.array(data_np1), mx.np.array(data_np2)], axis=axis)
mx_sym = mx.sym.np.concatenate([data1.as_np_ndarray(), data2.as_np_ndarray()], axis=axis)
mod, _ = relay.frontend.from_mxnet(
mx_sym, shape={"data1": data_shape1, "data2": data_shape2}, dtype=dtype
)
op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()(
data_np1, data_np2
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy(), rtol=1e-5)
@pytest.mark.parametrize(
"data_shape1, data_shape2, axis",
[
((3,), (3,), 0),
((3,), (3,), -1),
((1, 3, 2), (1, 3, 2), 2),
((1, 3, 3), (1, 3, 3), 1),
((1, 3), (1, 3), 0),
],
)
@pytest.mark.parametrize("dtype", ["float64", "float32", "int64", "int32"])
@tvm.testing.parametrize_targets
@pytest.mark.parametrize("kind", ["graph", "vm", "debug"])
def test_forward_npi_stack(data_shape1, data_shape2, axis, dtype, target, dev, kind):
data_np1 = np.random.uniform(size=data_shape1).astype(dtype)
data_np2 = np.random.uniform(size=data_shape2).astype(dtype)
data1 = mx.sym.var("data1")
data2 = mx.sym.var("data2")
ref_res = mx.np.stack([mx.np.array(data_np1), mx.np.array(data_np2)], axis=axis)
mx_sym = mx.sym.np.stack([data1.as_np_ndarray(), data2.as_np_ndarray()], axis=axis)
mod, _ = relay.frontend.from_mxnet(
mx_sym, shape={"data1": data_shape1, "data2": data_shape2}, dtype=dtype
)
op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()(
data_np1, data_np2
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy(), rtol=1e-5)
@pytest.mark.parametrize("data_shape", [(2, 2, 2), (2, 7, 2), (2, 2, 2, 1, 2, 3, 1), (1, 8)])
@pytest.mark.parametrize("dtype", ["float64", "float32", "int64", "int32", "bool"])
@tvm.testing.parametrize_targets
@pytest.mark.parametrize("kind", ["graph", "vm", "debug"])
def test_forward_np_copy(data_shape, dtype, target, dev, kind):
data_np = np.random.uniform(size=data_shape).astype(dtype)
data = mx.sym.var("data")
ref_res = mx.np.copy(mx.np.array(data_np))
mx_sym = mx.sym.np.copy(data.as_np_ndarray())
mod, _ = relay.frontend.from_mxnet(mx_sym, {"data": data_shape}, dtype=dtype)
op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()(data_np)
tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy(), rtol=1e-5)
@pytest.mark.parametrize("dtype", ["float64", "float32", "int64", "int32", "bool"])
@tvm.testing.parametrize_targets
@pytest.mark.parametrize("kind", ["graph", "vm", "debug"])
@pytest.mark.parametrize(
"data_shape,out_shape,reverse",
[
((2, 3, 8), (-2, -2, 2, -1), False),
((8, 3, 3, 3, 4, 4), (-6, 2, -1, -4), False),
((8, 3, 3, 3, 4, 4), (-5, -4), False),
((1, 8, 3, 3, 3, 4, 4), (-3, -5, -4), False),
((8, 1, 3, 4), (-2, -3, -1), False),
((8, 3, 3, 3, 3, 8), (-4, -5), True),
((8, 3, 2, 4, 8), (-4, -1, 2, -6), True),
((3, 2, 4, 8, 1, 1), (-4, -1, 2, -6, -5, -3), True),
((2, 4, 1, 8), (-4, -3, -1, 2, -6), True),
],
)
def test_forward_npx_reshape(data_shape, out_shape, dtype, target, reverse, dev, kind):
data_np = np.random.uniform(size=data_shape).astype(dtype)
data = mx.sym.var("data")
ref_res = mx.npx.reshape(mx.np.array(data_np), newshape=out_shape, reverse=reverse)
mx_sym = mx.sym.npx.reshape(data.as_np_ndarray(), newshape=out_shape, reverse=reverse)
mod, _ = relay.frontend.from_mxnet(mx_sym, {"data": data_shape}, dtype=dtype)
op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()(data_np)
tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy(), rtol=1e-5)
@pytest.mark.parametrize(
"data_shape", [(2, 2, 2), (2, 7, 2), (2, 2, 2, 1, 2, 3, 1), (1, 8), (2, 2), (1, 3)]
)
@pytest.mark.parametrize("dtype", ["float64", "float32", "int64", "int32"])
@tvm.testing.parametrize_targets
@pytest.mark.parametrize("kind", ["graph", "vm", "debug"])
def test_forward_npi_binary(data_shape, dtype, target, dev, kind):
ref_ops = [mx.np.power, mx.np.multiply, mx.np.add, mx.np.subtract, mx.np.less]
mx_ops = [
mx.sym.np.power,
mx.sym.np.multiply,
mx.sym.np.add,
mx.sym.np.subtract,
mx.sym.np.less,
]
for i in range(len(ref_ops)):
ref_op = ref_ops[i]
mx_op = mx_ops[i]
# mx.np.power only support float type
if ref_op == mx.np.power and dtype not in ["float64", "float32"]:
continue
data_np1 = np.random.uniform(size=data_shape).astype(dtype)
data_np2 = np.random.uniform(size=data_shape).astype(dtype)
data1 = mx.sym.var("lhs")
data2 = mx.sym.var("rhs")
ref_res = ref_op(mx.np.array(data_np1), mx.np.array(data_np2))
mx_sym = mx_op(data1.as_np_ndarray(), data2.as_np_ndarray())
mod, _ = relay.frontend.from_mxnet(
mx_sym, shape={"lhs": data_shape, "rhs": data_shape}, dtype=dtype
)
op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()(
data_np1, data_np2
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy(), rtol=1e-5)
@pytest.mark.parametrize(
"data_shape", [(2, 2, 2), (2, 7, 2), (2, 2, 2, 1, 2, 3, 1), (1, 8), (2, 2), (1, 3)]
)
@pytest.mark.parametrize("dtype", ["float64", "float32", "int64", "int32"])
@tvm.testing.parametrize_targets
@pytest.mark.parametrize("scalar", [1.0, 2.0, 3.0, 4.0])
@pytest.mark.parametrize("kind", ["graph", "vm", "debug"])
def test_forward_npi_binary_scalar(data_shape, dtype, scalar, target, dev, kind):
ref_ops = [mx.np.power, mx.np.multiply, mx.np.add, mx.np.subtract, mx.np.true_divide]
mx_ops = [
mx.sym.np.power,
mx.sym.np.multiply,
mx.sym.np.add,
mx.sym.np.subtract,
mx.sym.np.true_divide,
]
for i in range(len(ref_ops)):
ref_op = ref_ops[i]
mx_op = mx_ops[i]
# mx.np.power only support float type
if ref_op == mx.np.power and dtype not in ["float64", "float32"]:
continue
data_np1 = np.random.uniform(size=data_shape).astype(dtype)
data1 = mx.sym.var("lhs")
ref_res = ref_op(mx.np.array(data_np1), scalar)
mx_sym = mx_op(data1.as_np_ndarray(), scalar)
mod, _ = relay.frontend.from_mxnet(mx_sym, shape={"lhs": data_shape}, dtype=dtype)
op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()(
data_np1
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy(), rtol=1e-5)
@pytest.mark.parametrize(
"data_shape", [(2, 2, 2), (2, 7, 2), (2, 2, 2, 1, 2, 3, 1), (1, 8), (2, 2), (1, 3)]
)
@pytest.mark.parametrize("dtype", ["float64", "float32"])
@tvm.testing.parametrize_targets
@pytest.mark.parametrize("kind", ["graph", "vm", "debug"])
def test_forward_npi_tanh(data_shape, dtype, target, dev, kind):
data_np1 = np.random.uniform(size=data_shape).astype(dtype)
data1 = mx.sym.var("data")
ref_res = mx.np.tanh(mx.np.array(data_np1))
mx_sym = mx.sym.np.tanh(data1.as_np_ndarray())
mod, _ = relay.frontend.from_mxnet(mx_sym, shape={"data": data_shape}, dtype=dtype)
op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()(data_np1)
tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy(), rtol=1e-5)
@pytest.mark.skipif(not hasattr(mx.np, "where"), reason="mx.np.where hasn't been publish yet")
@pytest.mark.parametrize(
"data_shape,cond_shape",
[[(2, 2, 2), (2, 2, 2)], [(2, 7, 2), (7, 2)], [(2, 2), (1, 2)], [(1, 3), (3, 3)]],
)
@pytest.mark.parametrize("data_dtype", ["float64", "float32", "int64", "int32", "bool"])
@pytest.mark.parametrize("cond_dtype", ["float64", "float32", "int64", "int32", "bool"])
@pytest.mark.parametrize("scalar", [1.0, 2.0])
@tvm.testing.parametrize_targets
@pytest.mark.parametrize("kind", ["graph", "vm", "debug"])
def test_forward_npi_where_rscalar(
data_shape, cond_shape, data_dtype, cond_dtype, scalar, target, dev, kind
):
if data_dtype == "bool":
scalar = scalar == 0.0
cond_np = np.random.uniform(size=cond_shape).astype(cond_dtype)
data_np = np.random.uniform(size=data_shape).astype(data_dtype)
cond = mx.sym.var("condition")
data = mx.sym.var("x")
ref_res = mx.np.where(mx.np.array(cond_np), mx.np.array(data_np), scalar)
mx_sym = mx.sym.np.where(cond.as_np_ndarray(), data.as_np_ndarray(), scalar)
dtypeDic = {}
dtypeDic["condition"] = cond_dtype
dtypeDic["x"] = data_dtype
mod, _ = relay.frontend.from_mxnet(
mx_sym, shape={"condition": cond_shape, "x": data_shape}, dtype=dtypeDic
)
op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()(
cond_np, data_np
)
tvm.testing.assert_allclose(op_res.numpy(), ref_res.asnumpy(), rtol=1e-5)
@pytest.mark.parametrize("dtype", ["float64", "float32", "int64", "int32", "bool"])
@tvm.testing.parametrize_targets
@pytest.mark.parametrize("kind", ["graph", "vm", "debug"])
@pytest.mark.parametrize(
"data_shape, axis, indices_or_sections, squeeze_axis",
[
((3, 2, 1), 1, 2, False),
((3, 2, 1), 0, 3, False),
((3, 2, 1), 0, 3, True),
((3, 2, 1), 0, (1, 2), False),
],
)
def test_forward_split_v2(
data_shape, axis, dtype, indices_or_sections, squeeze_axis, target, dev, kind
):
data_np = np.random.uniform(size=data_shape).astype(dtype)
data = mx.sym.var("data")
ref_res = mx.ndarray.split_v2(
mx.nd.array(data_np), indices_or_sections, axis=axis, squeeze_axis=squeeze_axis
)
mx_sym = mx.sym.split_v2(
data.as_nd_ndarray(), indices_or_sections, axis=axis, squeeze_axis=squeeze_axis
)
mod, _ = relay.frontend.from_mxnet(mx_sym, {"data": data_shape}, dtype=dtype)
op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()(data_np)
op_res_ = []
for arr in op_res:
op_res_.append(arr.numpy().tolist())
ref_res_ = []
for arr in ref_res:
ref_res_.append(arr.asnumpy().tolist())
tvm.testing.assert_allclose(op_res_, ref_res_, rtol=1e-5)
if __name__ == "__main__":
tvm.testing.main()
| 93,053 | 38.313054 | 104 | py |
tvm | tvm-main/tests/python/frontend/mxnet/test_graph.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet as mx
import tvm
from tvm import te
from tvm import relay
from tvm.relay import transform
import model_zoo
def compare_graph(lhs_mod, rhs_mod):
lhs_mod = transform.InferType()(lhs_mod)
rhs_mod = transform.InferType()(rhs_mod)
assert tvm.ir.structural_equal(lhs_mod["main"], rhs_mod["main"])
def test_mlp():
shape = {"data": (1, 1, 28, 28)}
mx_fun = model_zoo.mx_mlp()
mod, _ = relay.frontend.from_mxnet(mx_fun, shape=shape)
relay_fun = model_zoo.relay_mlp()
compare_graph(mod, relay_fun)
def test_vgg():
shape = {"data": (1, 3, 224, 224)}
for n in [11, 13, 16, 19]:
mx_sym = model_zoo.mx_vgg(n)
mod, _ = relay.frontend.from_mxnet(mx_sym, shape=shape)
relay_mod = model_zoo.relay_vgg(n)
compare_graph(mod, relay_mod)
def test_resnet():
shape = {"data": (1, 3, 224, 224)}
for n in [18, 34, 50, 101]:
mx_sym = model_zoo.mx_resnet(n)
mod, _ = relay.frontend.from_mxnet(mx_sym, shape=shape)
relay_mod = model_zoo.relay_resnet(n)
compare_graph(mod, relay_mod)
def test_squeezenet():
shape = {"data": (1, 3, 224, 224)}
for version in ["1.0", "1.1"]:
mx_sym = model_zoo.mx_squeezenet(version)
mod, _ = relay.frontend.from_mxnet(mx_sym, shape)
relay_mod = model_zoo.relay_squeezenet(version)
compare_graph(mod, relay_mod)
def test_inception_v3():
shape = {"data": (1, 3, 299, 299)}
mx_sym = model_zoo.mx_inception_v3()
mod, _ = relay.frontend.from_mxnet(mx_sym, shape)
relay_mod = model_zoo.relay_inception_v3()
compare_graph(mod, relay_mod)
def test_dqn():
shape = {"data": (1, 4, 84, 84)}
mx_sym = model_zoo.mx_dqn()
mod, _ = relay.frontend.from_mxnet(mx_sym, shape)
relay_mod = model_zoo.relay_dqn()
compare_graph(mod, relay_mod)
def test_dcgan():
shape = {"data": (2, 100)}
mx_sym = model_zoo.mx_dcgan()
mod, _ = relay.frontend.from_mxnet(mx_sym, shape)
relay_mod = model_zoo.relay_dcgan(batch_size=2)
compare_graph(mod, relay_mod)
def test_multi_outputs():
xshape = (10, 27)
yshape = (10, 9)
def mx_compose(F, **kwargs):
x = F.sym.Variable("x")
y = F.sym.Variable("y")
z = F.sym.split(x, **kwargs)
return F.sym.broadcast_sub(F.sym.broadcast_add(z[0], z[2]), y)
def relay_compose(F, **kwargs):
x = F.var("x", shape=xshape)
y = F.var("y", shape=yshape)
z = F.split(x, **kwargs)
z = F.subtract(F.add(z[0], z[2]), y)
func = relay.Function(relay.analysis.free_vars(z), z)
return tvm.IRModule.from_expr(func)
mx_sym = mx_compose(mx, num_outputs=3, axis=1)
mod, _ = relay.frontend.from_mxnet(mx_sym, shape={"x": xshape, "y": yshape})
relay_mod = relay_compose(relay, indices_or_sections=3, axis=1)
compare_graph(mod, relay_mod)
if __name__ == "__main__":
test_mlp()
test_resnet()
test_vgg()
test_multi_outputs()
test_dqn()
test_dcgan()
test_squeezenet()
test_inception_v3()
| 3,841 | 29.983871 | 80 | py |
tvm | tvm-main/tests/python/frontend/mxnet/model_zoo/resnet.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Adapted from https://github.com/tornadomeet/ResNet/blob/master/symbol_resnet.py
Original author Wei Wu
Implemented the following paper:
Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun. "Identity Mappings in Deep Residual Networks"
"""
import mxnet as mx
import numpy as np
def residual_unit(
data,
num_filter,
stride,
dim_match,
name,
bottle_neck=True,
bn_mom=0.9,
workspace=256,
memonger=False,
):
"""Return ResNet Unit symbol for building ResNet
Parameters
----------
data : str
Input data
num_filter : int
Number of output channels
bnf : int
Bottle neck channels factor with regard to num_filter
stride : tuple
Stride used in convolution
dim_match : Boolean
True means channel number between input and output is the same, otherwise means differ
name : str
Base name of the operators
workspace : int
Workspace used in convolution operator
"""
if bottle_neck:
bn1 = mx.sym.BatchNorm(
data=data, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + "_bn1"
)
act1 = mx.sym.Activation(data=bn1, act_type="relu", name=name + "_relu1")
conv1 = mx.sym.Convolution(
data=act1,
num_filter=int(num_filter * 0.25),
kernel=(1, 1),
stride=stride,
pad=(0, 0),
no_bias=True,
workspace=workspace,
name=name + "_conv1",
)
bn2 = mx.sym.BatchNorm(
data=conv1, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + "_bn2"
)
act2 = mx.sym.Activation(data=bn2, act_type="relu", name=name + "_relu2")
conv2 = mx.sym.Convolution(
data=act2,
num_filter=int(num_filter * 0.25),
kernel=(3, 3),
stride=(1, 1),
pad=(1, 1),
no_bias=True,
workspace=workspace,
name=name + "_conv2",
)
bn3 = mx.sym.BatchNorm(
data=conv2, fix_gamma=False, eps=2e-5, momentum=bn_mom, name=name + "_bn3"
)
act3 = mx.sym.Activation(data=bn3, act_type="relu", name=name + "_relu3")
conv3 = mx.sym.Convolution(
data=act3,
num_filter=num_filter,
kernel=(1, 1),
stride=(1, 1),
pad=(0, 0),
no_bias=True,
workspace=workspace,
name=name + "_conv3",
)
if dim_match:
shortcut = data
else:
shortcut = mx.sym.Convolution(
data=act1,
num_filter=num_filter,
kernel=(1, 1),
stride=stride,
no_bias=True,
workspace=workspace,
name=name + "_sc",
)
if memonger:
shortcut._set_attr(mirror_stage="True")
return conv3 + shortcut
else:
bn1 = mx.sym.BatchNorm(
data=data, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + "_bn1"
)
act1 = mx.sym.Activation(data=bn1, act_type="relu", name=name + "_relu1")
conv1 = mx.sym.Convolution(
data=act1,
num_filter=num_filter,
kernel=(3, 3),
stride=stride,
pad=(1, 1),
no_bias=True,
workspace=workspace,
name=name + "_conv1",
)
bn2 = mx.sym.BatchNorm(
data=conv1, fix_gamma=False, momentum=bn_mom, eps=2e-5, name=name + "_bn2"
)
act2 = mx.sym.Activation(data=bn2, act_type="relu", name=name + "_relu2")
conv2 = mx.sym.Convolution(
data=act2,
num_filter=num_filter,
kernel=(3, 3),
stride=(1, 1),
pad=(1, 1),
no_bias=True,
workspace=workspace,
name=name + "_conv2",
)
if dim_match:
shortcut = data
else:
shortcut = mx.sym.Convolution(
data=act1,
num_filter=num_filter,
kernel=(1, 1),
stride=stride,
no_bias=True,
workspace=workspace,
name=name + "_sc",
)
if memonger:
shortcut._set_attr(mirror_stage="True")
return conv2 + shortcut
def resnet(
units,
num_stages,
filter_list,
num_classes,
image_shape,
bottle_neck=True,
bn_mom=0.9,
workspace=256,
dtype="float32",
memonger=False,
):
"""Return ResNet symbol of
Parameters
----------
units : list
Number of units in each stage
num_stages : int
Number of stage
filter_list : list
Channel size of each stage
num_classes : int
Output size of symbol
dataset : str
Dataset type, only cifar10 and imagenet supports
workspace : int
Workspace used in convolution operator
dtype : str
Precision (float32 or float16)
"""
num_unit = len(units)
assert num_unit == num_stages
data = mx.sym.Variable(name="data")
if dtype == "float32":
# data = mx.sym.identity(data=data, name='id')
data = data
else:
if dtype == "float16":
data = mx.sym.Cast(data=data, dtype=np.float16)
data = mx.sym.BatchNorm(data=data, fix_gamma=True, eps=2e-5, momentum=bn_mom, name="bn_data")
(nchannel, height, width) = image_shape
if height <= 32: # such as cifar10
body = mx.sym.Convolution(
data=data,
num_filter=filter_list[0],
kernel=(3, 3),
stride=(1, 1),
pad=(1, 1),
no_bias=True,
name="conv0",
workspace=workspace,
)
else: # often expected to be 224 such as imagenet
body = mx.sym.Convolution(
data=data,
num_filter=filter_list[0],
kernel=(7, 7),
stride=(2, 2),
pad=(3, 3),
no_bias=True,
name="conv0",
workspace=workspace,
)
body = mx.sym.BatchNorm(data=body, fix_gamma=False, eps=2e-5, momentum=bn_mom, name="bn0")
body = mx.sym.Activation(data=body, act_type="relu", name="relu0")
body = mx.sym.Pooling(data=body, kernel=(3, 3), stride=(2, 2), pad=(1, 1), pool_type="max")
for i in range(num_stages):
body = residual_unit(
body,
filter_list[i + 1],
(1 if i == 0 else 2, 1 if i == 0 else 2),
False,
name="stage%d_unit%d" % (i + 1, 1),
bottle_neck=bottle_neck,
workspace=workspace,
memonger=memonger,
)
for j in range(units[i] - 1):
body = residual_unit(
body,
filter_list[i + 1],
(1, 1),
True,
name="stage%d_unit%d" % (i + 1, j + 2),
bottle_neck=bottle_neck,
workspace=workspace,
memonger=memonger,
)
bn1 = mx.sym.BatchNorm(data=body, fix_gamma=False, eps=2e-5, momentum=bn_mom, name="bn1")
relu1 = mx.sym.Activation(data=bn1, act_type="relu", name="relu1")
# Although kernel is not used here when global_pool=True, we should put one
pool1 = mx.sym.Pooling(
data=relu1, global_pool=True, kernel=(7, 7), pool_type="avg", name="pool1"
)
flat = mx.sym.Flatten(data=pool1)
try:
fc1 = mx.sym.FullyConnected(data=flat, num_hidden=num_classes, name="fc1", flatten=False)
except:
fc1 = mx.sym.FullyConnected(data=flat, num_hidden=num_classes, name="fc1")
if dtype == "float16":
fc1 = mx.sym.Cast(data=fc1, dtype=np.float32)
return mx.sym.softmax(data=fc1, name="softmax")
def get_symbol(num_classes, num_layers, image_shape, conv_workspace=256, dtype="float32", **kwargs):
"""
Adapted from https://github.com/tornadomeet/ResNet/blob/master/train_resnet.py
Original author Wei Wu
"""
image_shape = [int(l) for l in image_shape.split(",")]
(nchannel, height, width) = image_shape
if height <= 28:
num_stages = 3
if (num_layers - 2) % 9 == 0 and num_layers >= 164:
per_unit = [(num_layers - 2) // 9]
filter_list = [16, 64, 128, 256]
bottle_neck = True
elif (num_layers - 2) % 6 == 0 and num_layers < 164:
per_unit = [(num_layers - 2) // 6]
filter_list = [16, 16, 32, 64]
bottle_neck = False
else:
raise ValueError(
"no experiments done on num_layers {}, you can do it yourself".format(num_layers)
)
units = per_unit * num_stages
else:
if num_layers >= 50:
filter_list = [64, 256, 512, 1024, 2048]
bottle_neck = True
else:
filter_list = [64, 64, 128, 256, 512]
bottle_neck = False
num_stages = 4
if num_layers == 18:
units = [2, 2, 2, 2]
elif num_layers == 34:
units = [3, 4, 6, 3]
elif num_layers == 50:
units = [3, 4, 6, 3]
elif num_layers == 101:
units = [3, 4, 23, 3]
elif num_layers == 152:
units = [3, 8, 36, 3]
elif num_layers == 200:
units = [3, 24, 36, 3]
elif num_layers == 269:
units = [3, 30, 48, 8]
else:
raise ValueError(
"no experiments done on num_layers {}, you can do it yourself".format(num_layers)
)
return resnet(
units=units,
num_stages=num_stages,
filter_list=filter_list,
num_classes=num_classes,
image_shape=image_shape,
bottle_neck=bottle_neck,
workspace=conv_workspace,
dtype=dtype,
)
| 10,688 | 31.688073 | 100 | py |
tvm | tvm-main/tests/python/frontend/mxnet/model_zoo/squeezenet.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Symbol of SqueezeNet
Reference:
Iandola, Forrest N., et al.
"Squeezenet: Alexnet-level accuracy with 50x fewer parameters and< 0.5 mb model size." (2016).
"""
import mxnet as mx
# Helpers
def _make_fire(net, squeeze_channels, expand1x1_channels, expand3x3_channels):
net = _make_fire_conv(net, squeeze_channels, 1, 0)
left = _make_fire_conv(net, expand1x1_channels, 1, 0)
right = _make_fire_conv(net, expand3x3_channels, 3, 1)
# NOTE : Assume NCHW layout here
net = mx.sym.concat(left, right, dim=1)
return net
def _make_fire_conv(net, channels, kernel_size, padding=0):
net = mx.sym.Convolution(
net, num_filter=channels, kernel=(kernel_size, kernel_size), pad=(padding, padding)
)
net = mx.sym.Activation(net, act_type="relu")
return net
# Net
def get_symbol(num_classes=1000, version="1.0", **kwargs):
"""Get symbol of SqueezeNet
Parameters
----------
num_classes: int
The number of classification results
version : str, optional
"1.0" or "1.1" of SqueezeNet
"""
assert version in [
"1.0",
"1.1",
], "Unsupported SqueezeNet version {version}:" "1.0 or 1.1 expected".format(version=version)
net = mx.sym.Variable("data")
if version == "1.0":
net = mx.sym.Convolution(net, num_filter=96, kernel=(7, 7), stride=(2, 2), pad=(3, 3))
net = mx.sym.Activation(net, act_type="relu")
net = mx.sym.Pooling(data=net, kernel=(3, 3), pool_type="max", stride=(2, 2))
net = _make_fire(net, 16, 64, 64)
net = _make_fire(net, 16, 64, 64)
net = _make_fire(net, 32, 128, 128)
net = mx.sym.Pooling(data=net, kernel=(3, 3), pool_type="max", stride=(2, 2))
net = _make_fire(net, 32, 128, 128)
net = _make_fire(net, 48, 192, 192)
net = _make_fire(net, 48, 192, 192)
net = _make_fire(net, 64, 256, 256)
net = mx.sym.Pooling(data=net, kernel=(3, 3), pool_type="max", stride=(2, 2))
net = _make_fire(net, 64, 256, 256)
else:
net = mx.sym.Convolution(net, num_filter=64, kernel=(3, 3), stride=(2, 2), pad=(1, 1))
net = mx.sym.Activation(net, act_type="relu")
net = mx.sym.Pooling(data=net, kernel=(3, 3), pool_type="max", stride=(2, 2))
net = _make_fire(net, 16, 64, 64)
net = _make_fire(net, 16, 64, 64)
net = mx.sym.Pooling(data=net, kernel=(3, 3), pool_type="max", stride=(2, 2))
net = _make_fire(net, 32, 128, 128)
net = _make_fire(net, 32, 128, 128)
net = mx.sym.Pooling(data=net, kernel=(3, 3), pool_type="max", stride=(2, 2))
net = _make_fire(net, 48, 192, 192)
net = _make_fire(net, 48, 192, 192)
net = _make_fire(net, 64, 256, 256)
net = _make_fire(net, 64, 256, 256)
net = mx.sym.Dropout(net, p=0.5)
net = mx.sym.Convolution(net, num_filter=num_classes, kernel=(1, 1))
net = mx.sym.Activation(net, act_type="relu")
net = mx.sym.Pooling(data=net, global_pool=True, kernel=(13, 13), pool_type="avg")
net = mx.sym.flatten(net)
return mx.sym.softmax(net)
| 3,892 | 38.72449 | 96 | py |
tvm | tvm-main/tests/python/frontend/mxnet/model_zoo/vgg.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""References:
Simonyan, Karen, and Andrew Zisserman. "Very deep convolutional networks for
large-scale image recognition." arXiv preprint arXiv:1409.1556 (2014).
"""
import mxnet as mx
import numpy as np
def get_feature(internel_layer, layers, filters, batch_norm=False, **kwargs):
for i, num in enumerate(layers):
for j in range(num):
internel_layer = mx.sym.Convolution(
data=internel_layer,
kernel=(3, 3),
pad=(1, 1),
num_filter=filters[i],
name="conv%s_%s" % (i + 1, j + 1),
)
if batch_norm:
internel_layer = mx.symbol.BatchNorm(
data=internel_layer, name="bn%s_%s" % (i + 1, j + 1)
)
internel_layer = mx.sym.Activation(
data=internel_layer, act_type="relu", name="relu%s_%s" % (i + 1, j + 1)
)
internel_layer = mx.sym.Pooling(
data=internel_layer,
pool_type="max",
kernel=(2, 2),
stride=(2, 2),
name="pool%s" % (i + 1),
)
return internel_layer
def get_classifier(input_data, num_classes, **kwargs):
flatten = mx.sym.Flatten(data=input_data, name="flatten")
try:
fc6 = mx.sym.FullyConnected(data=flatten, num_hidden=4096, name="fc6", flatten=False)
relu6 = mx.sym.Activation(data=fc6, act_type="relu", name="relu6")
drop6 = mx.sym.Dropout(data=relu6, p=0.5, name="drop6")
fc7 = mx.sym.FullyConnected(data=drop6, num_hidden=4096, name="fc7", flatten=False)
relu7 = mx.sym.Activation(data=fc7, act_type="relu", name="relu7")
drop7 = mx.sym.Dropout(data=relu7, p=0.5, name="drop7")
fc8 = mx.sym.FullyConnected(data=drop7, num_hidden=num_classes, name="fc8", flatten=False)
except:
fc6 = mx.sym.FullyConnected(data=flatten, num_hidden=4096, name="fc6")
relu6 = mx.sym.Activation(data=fc6, act_type="relu", name="relu6")
drop6 = mx.sym.Dropout(data=relu6, p=0.5, name="drop6")
fc7 = mx.sym.FullyConnected(data=drop6, num_hidden=4096, name="fc7")
relu7 = mx.sym.Activation(data=fc7, act_type="relu", name="relu7")
drop7 = mx.sym.Dropout(data=relu7, p=0.5, name="drop7")
fc8 = mx.sym.FullyConnected(data=drop7, num_hidden=num_classes, name="fc8")
return fc8
def get_symbol(num_classes, num_layers=11, batch_norm=False, dtype="float32", **kwargs):
"""
Parameters
----------
num_classes : int, default 1000
Number of classification classes.
num_layers : int
Number of layers for the variant of densenet. Options are 11, 13, 16, 19.
batch_norm : bool, default False
Use batch normalization.
dtype: str, float32 or float16
Data precision.
"""
vgg_spec = {
11: ([1, 1, 2, 2, 2], [64, 128, 256, 512, 512]),
13: ([2, 2, 2, 2, 2], [64, 128, 256, 512, 512]),
16: ([2, 2, 3, 3, 3], [64, 128, 256, 512, 512]),
19: ([2, 2, 4, 4, 4], [64, 128, 256, 512, 512]),
}
if num_layers not in vgg_spec:
raise ValueError(
"Invalide num_layers {}. Possible choices are 11,13,16,19.".format(num_layers)
)
layers, filters = vgg_spec[num_layers]
data = mx.sym.Variable(name="data")
if dtype == "float16":
data = mx.sym.Cast(data=data, dtype=np.float16)
feature = get_feature(data, layers, filters, batch_norm)
classifier = get_classifier(feature, num_classes)
if dtype == "float16":
classifier = mx.sym.Cast(data=classifier, dtype=np.float32)
symbol = mx.sym.softmax(data=classifier, name="softmax")
return symbol
| 4,491 | 40.211009 | 98 | py |
tvm | tvm-main/tests/python/frontend/mxnet/model_zoo/mlp.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
a simple multilayer perceptron
"""
import mxnet as mx
def get_symbol(num_classes=10, **kwargs):
data = mx.symbol.Variable("data")
data = mx.sym.Flatten(data=data)
try:
fc1 = mx.symbol.FullyConnected(data=data, name="fc1", num_hidden=128, flatten=False)
act1 = mx.symbol.Activation(data=fc1, name="relu1", act_type="relu")
fc2 = mx.symbol.FullyConnected(data=act1, name="fc2", num_hidden=64, flatten=False)
act2 = mx.symbol.Activation(data=fc2, name="relu2", act_type="relu")
fc3 = mx.symbol.FullyConnected(data=act2, name="fc3", num_hidden=num_classes, flatten=False)
mlp = mx.symbol.softmax(data=fc3, name="softmax")
except:
fc1 = mx.symbol.FullyConnected(data=data, name="fc1", num_hidden=128)
act1 = mx.symbol.Activation(data=fc1, name="relu1", act_type="relu")
fc2 = mx.symbol.FullyConnected(data=act1, name="fc2", num_hidden=64)
act2 = mx.symbol.Activation(data=fc2, name="relu2", act_type="relu")
fc3 = mx.symbol.FullyConnected(data=act2, name="fc3", num_hidden=num_classes)
mlp = mx.symbol.softmax(data=fc3, name="softmax")
return mlp
| 1,950 | 45.452381 | 100 | py |
tvm | tvm-main/tests/python/frontend/mxnet/model_zoo/dqn.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
The mxnet symbol of Nature DQN
Reference:
Mnih, Volodymyr, et al.
"Human-level control through deep reinforcement learning."
Nature 518.7540 (2015): 529.
"""
import mxnet as mx
def get_symbol(num_action=18):
data = mx.sym.Variable(name="data")
net = mx.sym.Convolution(data, kernel=(8, 8), stride=(4, 4), num_filter=32, name="conv1")
net = mx.sym.Activation(net, act_type="relu", name="relu1")
net = mx.sym.Convolution(net, kernel=(4, 4), stride=(2, 2), num_filter=64, name="conv2")
net = mx.sym.Activation(net, act_type="relu", name="relu2")
net = mx.sym.Convolution(net, kernel=(3, 3), stride=(1, 1), num_filter=64, name="conv3")
net = mx.sym.Activation(net, act_type="relu", name="relu3")
net = mx.sym.FullyConnected(net, num_hidden=512, name="fc4")
net = mx.sym.Activation(net, act_type="relu", name="relu4")
net = mx.sym.FullyConnected(net, num_hidden=num_action, name="fc5", flatten=False)
return net
| 1,745 | 40.571429 | 93 | py |
tvm | tvm-main/tests/python/frontend/mxnet/model_zoo/dcgan.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument
"""
The MXNet symbol of DCGAN generator
Adopted from:
https://github.com/tqchen/mxnet-gan/blob/main/mxgan/generator.py
Reference:
Radford, Alec, Luke Metz, and Soumith Chintala.
"Unsupervised representation learning with deep convolutional generative adversarial networks."
arXiv preprint arXiv:1511.06434 (2015).
"""
import mxnet as mx
def deconv2d(data, ishape, oshape, kshape, name, stride=(2, 2)):
"""a deconv layer that enlarges the feature map"""
target_shape = (oshape[-2], oshape[-1])
pad_y = (kshape[0] - 1) // 2
pad_x = (kshape[1] - 1) // 2
adj_y = (target_shape[0] + 2 * pad_y - kshape[0]) % stride[0]
adj_x = (target_shape[1] + 2 * pad_x - kshape[1]) % stride[1]
net = mx.sym.Deconvolution(
data,
kernel=kshape,
stride=stride,
pad=(pad_y, pad_x),
adj=(adj_y, adj_x),
num_filter=oshape[0],
no_bias=True,
name=name,
)
return net
def deconv2d_bn_relu(data, prefix, **kwargs):
"""a block of deconv + batch norm + relu"""
eps = 1e-5 + 1e-12
net = deconv2d(data, name="%s_deconv" % prefix, **kwargs)
net = mx.sym.BatchNorm(net, eps=eps, name="%s_bn" % prefix)
net = mx.sym.Activation(net, name="%s_act" % prefix, act_type="relu")
return net
def get_symbol(oshape=(3, 64, 64), ngf=128, code=None):
"""get symbol of dcgan generator"""
assert oshape[-1] == 64, "Only support 64x64 image"
assert oshape[-2] == 64, "Only support 64x64 image"
code = mx.sym.Variable("data") if code is None else code
net = mx.sym.FullyConnected(
code, name="g1", num_hidden=ngf * 8 * 4 * 4, no_bias=True, flatten=False
)
net = mx.sym.Activation(net, act_type="relu")
# 4 x 4
net = mx.sym.reshape(net, shape=(-1, ngf * 8, 4, 4))
# 8 x 8
net = deconv2d_bn_relu(
net, ishape=(ngf * 8, 4, 4), oshape=(ngf * 4, 8, 8), kshape=(4, 4), prefix="g2"
)
# 16x16
net = deconv2d_bn_relu(
net, ishape=(ngf * 4, 8, 8), oshape=(ngf * 2, 16, 16), kshape=(4, 4), prefix="g3"
)
# 32x32
net = deconv2d_bn_relu(
net, ishape=(ngf * 2, 16, 16), oshape=(ngf, 32, 32), kshape=(4, 4), prefix="g4"
)
# 64x64
net = deconv2d(net, ishape=(ngf, 32, 32), oshape=oshape[-3:], kshape=(4, 4), name="g5_deconv")
net = mx.sym.Activation(net, act_type="tanh")
return net
| 3,190 | 33.684783 | 98 | py |
tvm | tvm-main/tests/python/frontend/mxnet/model_zoo/inception_v3.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Inception V3, suitable for images with around 299 x 299
Reference:
Szegedy, Christian, et al. "Rethinking the Inception Architecture for Computer Vision." arXiv preprint arXiv:1512.00567 (2015).
Adopted from https://github.com/apache/incubator-mxnet/blob/master/
example/image-classification/symbols/inception-v3.py
"""
import mxnet as mx
import numpy as np
def Conv(data, num_filter, kernel=(1, 1), stride=(1, 1), pad=(0, 0), name=None, suffix=""):
conv = mx.sym.Convolution(
data=data,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
no_bias=True,
name="%s%s_conv2d" % (name, suffix),
)
bn = mx.sym.BatchNorm(data=conv, eps=2e-5, name="%s%s_batchnorm" % (name, suffix))
act = mx.sym.Activation(data=bn, act_type="relu", name="%s%s_relu" % (name, suffix))
return act
def Inception7A(
data, num_1x1, num_3x3_red, num_3x3_1, num_3x3_2, num_5x5_red, num_5x5, pool, proj, name
):
tower_1x1 = Conv(data, num_1x1, name=("%s_conv" % name))
tower_5x5 = Conv(data, num_5x5_red, name=("%s_tower" % name), suffix="_conv")
tower_5x5 = Conv(
tower_5x5, num_5x5, kernel=(5, 5), pad=(2, 2), name=("%s_tower" % name), suffix="_conv_1"
)
tower_3x3 = Conv(data, num_3x3_red, name=("%s_tower_1" % name), suffix="_conv")
tower_3x3 = Conv(
tower_3x3,
num_3x3_1,
kernel=(3, 3),
pad=(1, 1),
name=("%s_tower_1" % name),
suffix="_conv_1",
)
tower_3x3 = Conv(
tower_3x3,
num_3x3_2,
kernel=(3, 3),
pad=(1, 1),
name=("%s_tower_1" % name),
suffix="_conv_2",
)
pooling = mx.sym.Pooling(
data=data,
kernel=(3, 3),
stride=(1, 1),
pad=(1, 1),
pool_type=pool,
name=("%s_pool_%s_pool" % (pool, name)),
)
cproj = Conv(pooling, proj, name=("%s_tower_2" % name), suffix="_conv")
concat = mx.sym.Concat(
*[tower_1x1, tower_5x5, tower_3x3, cproj], name="ch_concat_%s_chconcat" % name
)
return concat
# First Downsample
def Inception7B(data, num_3x3, num_d3x3_red, num_d3x3_1, num_d3x3_2, pool, name):
tower_3x3 = Conv(
data, num_3x3, kernel=(3, 3), pad=(0, 0), stride=(2, 2), name=("%s_conv" % name)
)
tower_d3x3 = Conv(data, num_d3x3_red, name=("%s_tower" % name), suffix="_conv")
tower_d3x3 = Conv(
tower_d3x3,
num_d3x3_1,
kernel=(3, 3),
pad=(1, 1),
stride=(1, 1),
name=("%s_tower" % name),
suffix="_conv_1",
)
tower_d3x3 = Conv(
tower_d3x3,
num_d3x3_2,
kernel=(3, 3),
pad=(0, 0),
stride=(2, 2),
name=("%s_tower" % name),
suffix="_conv_2",
)
pooling = mx.sym.Pooling(
data=data,
kernel=(3, 3),
stride=(2, 2),
pad=(0, 0),
pool_type="max",
name=("max_pool_%s_pool" % name),
)
concat = mx.sym.Concat(*[tower_3x3, tower_d3x3, pooling], name="ch_concat_%s_chconcat" % name)
return concat
def Inception7C(
data,
num_1x1,
num_d7_red,
num_d7_1,
num_d7_2,
num_q7_red,
num_q7_1,
num_q7_2,
num_q7_3,
num_q7_4,
pool,
proj,
name,
):
tower_1x1 = Conv(data=data, num_filter=num_1x1, kernel=(1, 1), name=("%s_conv" % name))
tower_d7 = Conv(data=data, num_filter=num_d7_red, name=("%s_tower" % name), suffix="_conv")
tower_d7 = Conv(
data=tower_d7,
num_filter=num_d7_1,
kernel=(1, 7),
pad=(0, 3),
name=("%s_tower" % name),
suffix="_conv_1",
)
tower_d7 = Conv(
data=tower_d7,
num_filter=num_d7_2,
kernel=(7, 1),
pad=(3, 0),
name=("%s_tower" % name),
suffix="_conv_2",
)
tower_q7 = Conv(data=data, num_filter=num_q7_red, name=("%s_tower_1" % name), suffix="_conv")
tower_q7 = Conv(
data=tower_q7,
num_filter=num_q7_1,
kernel=(7, 1),
pad=(3, 0),
name=("%s_tower_1" % name),
suffix="_conv_1",
)
tower_q7 = Conv(
data=tower_q7,
num_filter=num_q7_2,
kernel=(1, 7),
pad=(0, 3),
name=("%s_tower_1" % name),
suffix="_conv_2",
)
tower_q7 = Conv(
data=tower_q7,
num_filter=num_q7_3,
kernel=(7, 1),
pad=(3, 0),
name=("%s_tower_1" % name),
suffix="_conv_3",
)
tower_q7 = Conv(
data=tower_q7,
num_filter=num_q7_4,
kernel=(1, 7),
pad=(0, 3),
name=("%s_tower_1" % name),
suffix="_conv_4",
)
pooling = mx.sym.Pooling(
data=data,
kernel=(3, 3),
stride=(1, 1),
pad=(1, 1),
pool_type=pool,
name=("%s_pool_%s_pool" % (pool, name)),
)
cproj = Conv(
data=pooling, num_filter=proj, kernel=(1, 1), name=("%s_tower_2" % name), suffix="_conv"
)
# concat
concat = mx.sym.Concat(
*[tower_1x1, tower_d7, tower_q7, cproj], name="ch_concat_%s_chconcat" % name
)
return concat
def Inception7D(
data, num_3x3_red, num_3x3, num_d7_3x3_red, num_d7_1, num_d7_2, num_d7_3x3, pool, name
):
tower_3x3 = Conv(data=data, num_filter=num_3x3_red, name=("%s_tower" % name), suffix="_conv")
tower_3x3 = Conv(
data=tower_3x3,
num_filter=num_3x3,
kernel=(3, 3),
pad=(0, 0),
stride=(2, 2),
name=("%s_tower" % name),
suffix="_conv_1",
)
tower_d7_3x3 = Conv(
data=data, num_filter=num_d7_3x3_red, name=("%s_tower_1" % name), suffix="_conv"
)
tower_d7_3x3 = Conv(
data=tower_d7_3x3,
num_filter=num_d7_1,
kernel=(1, 7),
pad=(0, 3),
name=("%s_tower_1" % name),
suffix="_conv_1",
)
tower_d7_3x3 = Conv(
data=tower_d7_3x3,
num_filter=num_d7_2,
kernel=(7, 1),
pad=(3, 0),
name=("%s_tower_1" % name),
suffix="_conv_2",
)
tower_d7_3x3 = Conv(
data=tower_d7_3x3,
num_filter=num_d7_3x3,
kernel=(3, 3),
stride=(2, 2),
name=("%s_tower_1" % name),
suffix="_conv_3",
)
pooling = mx.sym.Pooling(
data=data,
kernel=(3, 3),
stride=(2, 2),
pool_type=pool,
name=("%s_pool_%s_pool" % (pool, name)),
)
# concat
concat = mx.sym.Concat(*[tower_3x3, tower_d7_3x3, pooling], name="ch_concat_%s_chconcat" % name)
return concat
def Inception7E(
data,
num_1x1,
num_d3_red,
num_d3_1,
num_d3_2,
num_3x3_d3_red,
num_3x3,
num_3x3_d3_1,
num_3x3_d3_2,
pool,
proj,
name,
):
tower_1x1 = Conv(data=data, num_filter=num_1x1, kernel=(1, 1), name=("%s_conv" % name))
tower_d3 = Conv(data=data, num_filter=num_d3_red, name=("%s_tower" % name), suffix="_conv")
tower_d3_a = Conv(
data=tower_d3,
num_filter=num_d3_1,
kernel=(1, 3),
pad=(0, 1),
name=("%s_tower" % name),
suffix="_mixed_conv",
)
tower_d3_b = Conv(
data=tower_d3,
num_filter=num_d3_2,
kernel=(3, 1),
pad=(1, 0),
name=("%s_tower" % name),
suffix="_mixed_conv_1",
)
tower_3x3_d3 = Conv(
data=data, num_filter=num_3x3_d3_red, name=("%s_tower_1" % name), suffix="_conv"
)
tower_3x3_d3 = Conv(
data=tower_3x3_d3,
num_filter=num_3x3,
kernel=(3, 3),
pad=(1, 1),
name=("%s_tower_1" % name),
suffix="_conv_1",
)
tower_3x3_d3_a = Conv(
data=tower_3x3_d3,
num_filter=num_3x3_d3_1,
kernel=(1, 3),
pad=(0, 1),
name=("%s_tower_1" % name),
suffix="_mixed_conv",
)
tower_3x3_d3_b = Conv(
data=tower_3x3_d3,
num_filter=num_3x3_d3_2,
kernel=(3, 1),
pad=(1, 0),
name=("%s_tower_1" % name),
suffix="_mixed_conv_1",
)
pooling = mx.sym.Pooling(
data=data,
kernel=(3, 3),
stride=(1, 1),
pad=(1, 1),
pool_type=pool,
name=("%s_pool_%s_pool" % (pool, name)),
)
cproj = Conv(
data=pooling, num_filter=proj, kernel=(1, 1), name=("%s_tower_2" % name), suffix="_conv"
)
# concat
concat = mx.sym.Concat(
*[tower_1x1, tower_d3_a, tower_d3_b, tower_3x3_d3_a, tower_3x3_d3_b, cproj],
name="ch_concat_%s_chconcat" % name,
)
return concat
def get_symbol(num_classes=1000, **kwargs):
data = mx.sym.Variable(name="data")
# stage 1
conv = Conv(data, 32, kernel=(3, 3), stride=(2, 2), name="conv")
conv_1 = Conv(conv, 32, kernel=(3, 3), name="conv_1")
conv_2 = Conv(conv_1, 64, kernel=(3, 3), pad=(1, 1), name="conv_2")
pool = mx.sym.Pooling(data=conv_2, kernel=(3, 3), stride=(2, 2), pool_type="max", name="pool")
# stage 2
conv_3 = Conv(pool, 80, kernel=(1, 1), name="conv_3")
conv_4 = Conv(conv_3, 192, kernel=(3, 3), name="conv_4")
pool1 = mx.sym.Pooling(data=conv_4, kernel=(3, 3), stride=(2, 2), pool_type="max", name="pool1")
# # stage 3
in3a = Inception7A(pool1, 64, 64, 96, 96, 48, 64, "avg", 32, "mixed")
in3b = Inception7A(in3a, 64, 64, 96, 96, 48, 64, "avg", 64, "mixed_1")
in3c = Inception7A(in3b, 64, 64, 96, 96, 48, 64, "avg", 64, "mixed_2")
in3d = Inception7B(in3c, 384, 64, 96, 96, "max", "mixed_3")
# stage 4
in4a = Inception7C(in3d, 192, 128, 128, 192, 128, 128, 128, 128, 192, "avg", 192, "mixed_4")
in4b = Inception7C(in4a, 192, 160, 160, 192, 160, 160, 160, 160, 192, "avg", 192, "mixed_5")
in4c = Inception7C(in4b, 192, 160, 160, 192, 160, 160, 160, 160, 192, "avg", 192, "mixed_6")
in4d = Inception7C(in4c, 192, 192, 192, 192, 192, 192, 192, 192, 192, "avg", 192, "mixed_7")
in4e = Inception7D(in4d, 192, 320, 192, 192, 192, 192, "max", "mixed_8")
# stage 5
in5a = Inception7E(in4e, 320, 384, 384, 384, 448, 384, 384, 384, "avg", 192, "mixed_9")
in5b = Inception7E(in5a, 320, 384, 384, 384, 448, 384, 384, 384, "max", 192, "mixed_10")
# pool
pool = mx.sym.Pooling(
data=in5b, kernel=(8, 8), stride=(1, 1), pool_type="avg", name="global_pool"
)
flatten = mx.sym.Flatten(data=pool, name="flatten")
fc1 = mx.sym.FullyConnected(data=flatten, num_hidden=num_classes, name="fc1", flatten=False)
softmax = mx.sym.SoftmaxOutput(data=fc1, name="softmax")
return softmax
| 11,306 | 29.642276 | 127 | py |
tvm | tvm-main/tests/python/frontend/tensorflow2/test_sequential_models.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=import-self, invalid-name, unused-argument, too-many-lines, len-as-condition, broad-except
# pylint: disable=import-outside-toplevel, redefined-builtin
"""TF2 to relay converter test: testing models built with tf.keras.Sequential()"""
import tempfile
import numpy as np
import pytest
import tensorflow as tf
from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
from common import compare_tf_tvm
from common import run_tf_code
def run_sequential_model(model_fn, input_shape):
def get_input(shape):
_input = np.random.uniform(0, 1, shape).astype(dtype="float32")
return _input
def save_and_reload(_model):
with tempfile.TemporaryDirectory() as model_path:
tf.saved_model.save(_model, model_path)
loaded = tf.saved_model.load(model_path)
func = loaded.signatures["serving_default"]
frozen_func = convert_variables_to_constants_v2(func)
return frozen_func
def model_graph(model, input_shape):
_input = get_input(input_shape)
f = save_and_reload(model(input_shape))
_output = run_tf_code(f, _input)
gdef = f.graph.as_graph_def(add_shapes=True)
return gdef, _input, _output
compare_tf_tvm(*model_graph(model_fn, input_shape), runtime="vm")
def test_dense_model():
def dense_model(input_shape, num_units=128):
return tf.keras.Sequential(
[tf.keras.layers.Flatten(input_shape=input_shape[1:]), tf.keras.layers.Dense(num_units)]
)
run_sequential_model(dense_model, input_shape=(1, 28, 28))
def test_mnist_model():
def mnist_model(input_shape):
return tf.keras.Sequential(
[
tf.keras.layers.Flatten(input_shape=input_shape[1:]),
tf.keras.layers.Dense(128, activation="relu"),
tf.keras.layers.Dense(10),
]
)
run_sequential_model(mnist_model, input_shape=(1, 28, 28))
def test_conv2d_model():
def conv2d_model(input_shape, kernel=(3, 3), filters=16):
model = tf.keras.Sequential(
[
tf.keras.layers.Input(shape=input_shape[1:], batch_size=1),
tf.keras.layers.Conv2D(filters, kernel),
]
)
return model
run_sequential_model(conv2d_model, input_shape=(1, 32, 32, 3))
def test_maxpool_model():
def maxpool_model(input_shape, pool_size=(2, 2)):
model = tf.keras.Sequential(
[tf.keras.layers.MaxPool2D(pool_size=pool_size, input_shape=input_shape[1:])]
)
return model
run_sequential_model(maxpool_model, input_shape=(1, 32, 32, 3))
def test_maxpool_batchnorm_model():
def maxpool_batchnorm_model(input_shape, pool_size=(2, 2)):
model = tf.keras.Sequential(
[
tf.keras.layers.MaxPool2D(pool_size=pool_size, input_shape=input_shape[1:]),
tf.keras.layers.BatchNormalization(),
]
)
return model
run_sequential_model(maxpool_batchnorm_model, input_shape=(1, 32, 32, 3))
def test_tensorlist_stack_model():
def tensorlist_stack_model(input_shape):
class TensorArrayStackLayer(tf.keras.layers.Layer):
def __init__(self):
super().__init__()
def call(self, inputs):
inputs = tf.squeeze(inputs)
outputs = tf.TensorArray(
tf.float32,
size=inputs.shape[0],
infer_shape=False,
element_shape=inputs.shape[1:],
)
outputs = outputs.unstack(inputs)
return outputs.stack()
input_shape = (3, 32)
model = tf.keras.Sequential(
[tf.keras.layers.Input(shape=input_shape, batch_size=1), TensorArrayStackLayer()]
)
return model
run_sequential_model(tensorlist_stack_model, input_shape=(3, 32))
def test_tensorlist_read_model():
def tensorlist_read_model(input_shape):
class TensorArrayReadLayer(tf.keras.layers.Layer):
def __init__(self):
super().__init__()
def call(self, inputs):
inputs = tf.squeeze(inputs)
outputs = tf.TensorArray(
tf.float32,
size=inputs.shape[0],
infer_shape=False,
element_shape=inputs.shape[1:],
)
for i in range(inputs.shape[0]):
outputs = outputs.write(i, inputs[i, :])
return outputs.read(0)
input_shape = (3, 32)
model = tf.keras.Sequential(
[tf.keras.layers.Input(shape=input_shape, batch_size=1), TensorArrayReadLayer()]
)
return model
run_sequential_model(tensorlist_read_model, input_shape=(3, 32))
if __name__ == "__main__":
tvm.testing.main()
| 5,746 | 33.005917 | 108 | py |
tvm | tvm-main/tests/python/frontend/caffe2/test_forward.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Caffe2 testcases
====================
This article is a test script to test Caffe2 operator with Relay.
"""
from collections import namedtuple
import numpy as np
from caffe2.python import workspace, core
from caffe2.proto import caffe2_pb2
from model_zoo import c2_squeezenet, c2_resnet50, c2_vgg19
import tvm
from tvm.contrib import graph_executor
from tvm import relay
import tvm.testing
def get_tvm_output(model, input_data, target, device, output_shape, output_dtype="float32"):
"""Generic function to execute and get tvm output"""
# supporting multiple inputs in caffe2 in a bit tricky,
# because the input names can appear at the beginning or end of model.predict_net.external_input
assert isinstance(input_data, np.ndarray)
# here we use the first input blob to the first op to get the input name
input_names = model.predict_net.op[0].input[0]
shape_dict = {input_names: input_data.shape}
dtype_dict = {input_names: input_data.dtype}
mod, params = relay.frontend.from_caffe2(
model.init_net, model.predict_net, shape_dict, dtype_dict
)
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target, params=params)
m = graph_executor.GraphModule(lib["default"](device))
# set inputs
m.set_input(input_names, tvm.nd.array(input_data.astype(input_data.dtype)))
# execute
m.run()
# get outputs
if isinstance(output_shape, list) and isinstance(output_dtype, list):
tvm_output_list = []
for i, s in enumerate(output_shape):
tvm_output = m.get_output(i, tvm.nd.empty((s), output_dtype[i]))
tvm_output_list.append(tvm_output.numpy())
return tvm_output_list
else:
tvm_output = m.get_output(0, tvm.nd.empty((output_shape), output_dtype))
return tvm_output.numpy()
def get_caffe2_output(model, x, dtype="float32"):
workspace.RunNetOnce(model.init_net)
input_blob = model.predict_net.op[0].input[0]
workspace.FeedBlob(input_blob, x.astype(dtype))
workspace.RunNetOnce(model.predict_net)
output_blob = model.predict_net.external_output[0]
c2_output = workspace.FetchBlob(output_blob)
return c2_output
def verify_caffe2_forward_impl(model, data_shape, out_shape):
dtype = "float32"
data = np.random.uniform(size=data_shape).astype(dtype)
c2_out = get_caffe2_output(model, data, dtype)
for target, dev in tvm.testing.enabled_targets():
tvm_out = get_tvm_output(model, data, target, dev, out_shape, dtype)
tvm.testing.assert_allclose(c2_out, tvm_out, rtol=1e-5, atol=1e-5)
@tvm.testing.uses_gpu
def test_forward_squeezenet1_1():
verify_caffe2_forward_impl(c2_squeezenet, (1, 3, 224, 224), (1, 1000, 1, 1))
@tvm.testing.uses_gpu
def test_forward_resnet50():
verify_caffe2_forward_impl(c2_resnet50, (1, 3, 224, 224), (1, 1000))
@tvm.testing.uses_gpu
def test_forward_vgg19():
verify_caffe2_forward_impl(c2_vgg19, (1, 3, 224, 224), (1, 1000))
Model = namedtuple("Model", ["init_net", "predict_net"])
@tvm.testing.uses_gpu
def test_elementwise_add():
"""Elewise_add"""
data_shape = (1, 16, 9, 9)
init_net = caffe2_pb2.NetDef()
init_net.name = "test_init_net"
init_net.external_output[:] = ["A", "B"]
init_net.op.extend(
[
core.CreateOperator(
"GivenTensorFill",
[],
["A"],
shape=data_shape,
values=np.random.uniform(size=data_shape).flatten().tolist(),
),
core.CreateOperator(
"GivenTensorFill",
[],
["B"],
shape=data_shape,
values=np.random.uniform(size=data_shape).flatten().tolist(),
),
]
)
predict_net = caffe2_pb2.NetDef()
predict_net.name = "test_predict_net"
predict_net.external_input[:] = ["A", "B"]
predict_net.external_output[:] = ["C"]
predict_net.op.extend(
[
core.CreateOperator(
"Add",
["A", "B"],
["C"],
)
]
)
model = Model(init_net, predict_net)
verify_caffe2_forward_impl(model, data_shape, data_shape)
@tvm.testing.uses_gpu
def test_elementwise_add_with_broadcast():
"""Elewise_add_with_broadcast"""
data_shape = (1, 16, 9, 9)
init_net = caffe2_pb2.NetDef()
init_net.name = "test_init_net"
init_net.external_output[:] = ["A", "B"]
init_net.op.extend(
[
core.CreateOperator(
"GivenTensorFill",
[],
["A"],
shape=data_shape,
values=np.random.uniform(size=data_shape).flatten().tolist(),
),
core.CreateOperator(
"GivenTensorFill",
[],
["B"],
shape=(1,),
values=np.random.uniform(size=1).flatten().tolist(),
),
]
)
predict_net = caffe2_pb2.NetDef()
predict_net.name = "test_predict_net"
predict_net.external_input[:] = ["A", "B"]
predict_net.external_output[:] = ["C"]
predict_net.op.extend(
[
core.CreateOperator(
"Add",
["A", "B"],
["C"],
broadcast=1,
)
]
)
model = Model(init_net, predict_net)
verify_caffe2_forward_impl(model, data_shape, data_shape)
@tvm.testing.uses_gpu
def test_normalize_yuv():
"""Normalize_yuv"""
data_shape = (1, 3, 96, 96)
init_net = caffe2_pb2.NetDef()
init_net.name = "test_init_net"
init_net.external_output[:] = ["A", "mean", "std"]
init_net.op.extend(
[
core.CreateOperator(
"GivenTensorFill",
[],
["A"],
shape=data_shape,
values=np.random.uniform(size=data_shape).flatten().tolist(),
),
core.CreateOperator(
"GivenTensorFill",
[],
["mean"],
shape=(
1,
3,
),
values=np.random.uniform(size=3).flatten().tolist(),
),
core.CreateOperator(
"GivenTensorFill",
[],
["std"],
shape=(
1,
3,
),
values=np.random.uniform(size=3).flatten().tolist(),
),
]
)
predict_net = caffe2_pb2.NetDef()
predict_net.name = "test_predict_net"
predict_net.external_input[:] = ["A", "mean", "std"]
predict_net.external_output[:] = ["C"]
predict_net.op.extend(
[
core.CreateOperator(
"NormalizePlanarYUV",
["A", "mean", "std"],
["C"],
)
]
)
model = Model(init_net, predict_net)
verify_caffe2_forward_impl(model, data_shape, data_shape)
if __name__ == "__main__":
tvm.testing.main()
| 7,906 | 29.647287 | 100 | py |
tvm | tvm-main/tests/python/frontend/caffe2/test_graph.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test graph equality of caffe2 models."""
from model_zoo import c2_squeezenet, relay_squeezenet
import tvm
from tvm import relay
from tvm.relay import transform
def compare_graph(lhs_mod, rhs_mod):
lhs_mod = transform.InferType()(lhs_mod)
rhs_mod = transform.InferType()(rhs_mod)
assert tvm.ir.structural_equal(lhs_mod["main"], rhs_mod["main"])
def test_squeeze_net():
shape_dict = {"data": (1, 3, 224, 224)}
dtype_dict = {"data": "float32"}
mod, _, = relay.frontend.from_caffe2(
c2_squeezenet.init_net, c2_squeezenet.predict_net, shape_dict, dtype_dict
)
relay_mod, _ = relay_squeezenet()
compare_graph(mod, relay_mod)
if __name__ == "__main__":
test_squeeze_net()
| 1,507 | 34.904762 | 81 | py |
tvm | tvm-main/tests/python/frontend/caffe2/model_zoo/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Store for caffe2 examples and common models."""
from __future__ import absolute_import as _abs
import os
import sys
import importlib
from caffe2.python.models.download import ModelDownloader
from . import squeezenet
models = [
"squeezenet",
"resnet50",
"vgg19",
]
mf = ModelDownloader()
class Model:
def __init__(self, model_name):
self.init_net, self.predict_net, self.value_info = mf.get_c2_model(model_name)
for model in models:
try:
locals()["c2_" + model] = importlib.import_module("caffe2.python.models." + model)
except ImportError:
locals()["c2_" + model] = Model(model)
# squeezenet
def relay_squeezenet():
return squeezenet.get_workload()
| 1,497 | 29.571429 | 90 | py |
tvm | tvm-main/tests/python/frontend/tflite/test_forward.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument, import-outside-toplevel, inconsistent-return-statements
"""
TFLite testcases
================
This article is a test script to test TFLite operator with Relay.
"""
from __future__ import print_function
from functools import partial
from distutils.version import LooseVersion
import os
import tempfile
import typing
from packaging import version as package_version
import pytest
import numpy as np
from PIL import Image
from tflite.BuiltinOperator import BuiltinOperator
try:
import tensorflow.compat.v1 as tf
# tensorflow.python.framework.ops module itself is not part of
# TensorFlow's public API: the precise contents of that module
# may vary from one version to the next
import tensorflow.compat.v1 as ops
except ImportError:
import tensorflow as tf
import tensorflow as ops
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import variables
from tensorflow import raw_ops
try:
from tensorflow import lite as interpreter_wrapper
except ImportError:
from tensorflow.contrib import lite as interpreter_wrapper
import tvm
import tvm.relay.testing.tf as tf_testing
from tvm.contrib.download import download_testdata
from tvm import relay, ir
from tvm.contrib import graph_executor
from relay.utils.tag_span import _set_span, _create_span, _verify_structural_equal_with_span
#######################################################################
# Generic run functions for TVM & TFLite
# --------------------------------------
def convert_to_list(x):
if not isinstance(x, list):
x = [x]
return x
#######################################################################
# Get a real image for e2e testing
# --------------------------------
def get_real_image(im_height, im_width, quantized=True):
repo_base = "https://github.com/dmlc/web-data/raw/main/tensorflow/models/InceptionV1/"
img_name = "elephant-299.jpg"
image_url = os.path.join(repo_base, img_name)
img_path = download_testdata(image_url, img_name, module="data")
image = Image.open(img_path).resize((im_height, im_width))
x = np.array(image).astype("uint8") if quantized else np.array(image).astype("float32")
data = np.reshape(x, (1, im_height, im_width, 3))
return data
def pre_processed_image(height, width):
"""Image preprocessed"""
repo_base = "https://github.com/dmlc/web-data/raw/main/tensorflow/models/InceptionV1/"
img_name = "elephant-299.jpg"
image_url = os.path.join(repo_base, img_name)
img_path = download_testdata(image_url, img_name, module="data")
image = tf.io.read_file(img_path)
image = tf.image.decode_jpeg(image, channels=3)
with tf.name_scope("eval_image"):
if image.dtype != tf.float32:
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
image = tf.image.central_crop(image, central_fraction=0.875)
# Resize the image to the specified height and width.
image = tf.image.resize(image, [height, width], align_corners=False)
image = tf.expand_dims(image, axis=0)
return image
def get_real_image_object_detection(im_height, im_width):
repo_base = "https://github.com/dmlc/web-data/raw/main/gluoncv/detection/"
img_name = "street_small.jpg"
image_url = os.path.join(repo_base, img_name)
img_path = download_testdata(image_url, img_name, module="data")
image = Image.open(img_path).resize((im_height, im_width))
x = np.array(image).astype("uint8")
data = np.reshape(x, (1, im_height, im_width, 3))
return data
def vmobj_to_list(obj):
"""Converts TVM objects returned by VM execution to Python List."""
if isinstance(obj, tvm.nd.NDArray):
return [obj.numpy().tolist()]
elif isinstance(obj, tvm.runtime.container.ADT):
result = []
for f in obj:
result.extend(vmobj_to_list(f))
return result
elif isinstance(obj, tvm.relay.backend.interpreter.ConstructorValue):
if obj.constructor.name_hint == "Cons":
t_l = vmobj_to_list(obj.fields[1])
h_d = vmobj_to_list(obj.fields[0])
h_d.extend(t_l)
return h_d
elif obj.constructor.name_hint == "Nil":
return []
elif "tensor_nil" in obj.constructor.name_hint:
return [0]
elif "tensor" in obj.constructor.name_hint:
return [obj.fields[0].numpy()]
else:
raise RuntimeError(f"Unknown object type: {obj.constructor.name_hint}")
else:
raise RuntimeError(f"Unknown object type: {type(obj)}")
def _quantize_keras_model(
keras_model,
representative_data_gen,
is_float_input=False,
is_float_output=False,
int_quant_dtype=tf.int8,
):
"""Utility function to quantize a Keras model using TFLite converter."""
converter = interpreter_wrapper.TFLiteConverter.from_keras_model(keras_model)
if int_quant_dtype == tf.int8:
converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]
converter.representative_dataset = representative_data_gen
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
inference_dtype = tf.uint8
elif int_quant_dtype == tf.int16:
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_data_gen
converter.target_spec.supported_ops = [
tf.lite.OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8
]
inference_dtype = tf.uint16
else:
raise RuntimeError(
f"Invalid quantized dtype {int_quant_dtype}. Supported types: int8, int16."
)
# NOTE: If representative dataset is provided, and inference input type is not set,
# then converter will self add quant & dequant Op accordingly.
if not is_float_input:
converter.inference_input_type = inference_dtype
if not is_float_output:
converter.inference_output_type = inference_dtype
return converter.convert()
def run_tvm_graph(
tflite_model_buf,
input_data,
input_node,
num_output=1,
target="llvm",
out_names=None,
mode="graph_executor",
op_converter=relay.frontend.tflite.OperatorConverter,
):
"""Generic function to compile on relay and execute on tvm"""
# TFLite.Model.Model has changed to TFLite.Model from 1.14 to 2.1
try:
import tflite.Model
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buf, 0)
except AttributeError:
import tflite
tflite_model = tflite.Model.GetRootAsModel(tflite_model_buf, 0)
except ImportError as exc:
raise ImportError("The tflite package must be installed") from exc
input_data = convert_to_list(input_data)
input_node = convert_to_list(input_node)
shape_dict = {}
dtype_dict = {}
for i, node in enumerate(input_node):
shape_dict[node] = input_data[i].shape
dtype_dict[node] = input_data[i].dtype.name
with tvm.testing.disable_span_filling():
mod, params = relay.frontend.from_tflite(
tflite_model, shape_dict=shape_dict, dtype_dict=dtype_dict, op_converter=op_converter
)
with tvm.testing.enable_span_filling():
mod_with_span, _ = relay.frontend.from_tflite(
tflite_model, shape_dict=shape_dict, dtype_dict=dtype_dict, op_converter=op_converter
)
assert tvm.ir.structural_equal(mod["main"], mod_with_span["main"])
if mode in ["debug", "vm"]:
inputs = []
for param in mod["main"].params:
found = False
for i, n in enumerate(input_node):
if n == param.name_hint:
found = True
inputs.append(tvm.nd.array(input_data[i]))
break
# Interpreter doesn't bind constants, so still need to find in params
if not found:
inputs.append(tvm.nd.array(params[param.name_hint]))
result = relay.create_executor(mode, mod=mod, device=tvm.cpu(), target="llvm").evaluate()(
*inputs
)
return vmobj_to_list(result)
else:
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target, params=params)
dev = tvm.device(target, 0)
m = graph_executor.GraphModule(lib["default"](dev))
# set inputs
for i, node in enumerate(input_node):
m.set_input(node, tvm.nd.array(input_data[i].astype(input_data[i].dtype)))
# execute
m.run()
# get outputs
assert out_names is None or num_output == len(
out_names
), f"out_names: {out_names} num_output: {num_output}"
tvm_output_list = []
for i in range(0, num_output):
tvm_output = m.get_output(i)
tvm_output_list.append(tvm_output.numpy())
return tvm_output_list
def run_tflite_graph(tflite_model_buf, input_data):
"""Generic function to execute TFLite"""
input_data = convert_to_list(input_data)
interpreter = interpreter_wrapper.Interpreter(model_content=tflite_model_buf)
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
for i, input_detail in enumerate(input_details):
interpreter.resize_tensor_input(input_detail["index"], input_data[i].shape)
interpreter.allocate_tensors()
# set input
assert len(input_data) == len(input_details)
for i, input_detail in enumerate(input_details):
interpreter.set_tensor(input_detail["index"], input_data[i])
# Run
interpreter.invoke()
# get output
tflite_output = []
for _, output_detail in enumerate(output_details):
tflite_output.append(interpreter.get_tensor(output_detail["index"]))
return tflite_output
def compare_tflite_with_tvm(
in_data: typing.List[np.ndarray],
in_name: typing.List[str],
input_tensors: typing.List,
output_tensors: typing.List,
init_global_variables: bool = False,
out_names=None,
quantized=False,
input_range=None,
mode="graph_executor",
experimental_new_converter=False,
fp16_quantized=False,
int_quant_dtype=tf.uint8,
):
"""Generic function to generate and compare TFLite and TVM output"""
in_data = convert_to_list(in_data)
in_name = convert_to_list(in_name)
out_names = convert_to_list(out_names)
in_node = [0] * len(in_name)
for i, _ in enumerate(in_name):
in_node[i] = in_name[i].split(":")[0] if ":" in in_name[i] else in_name[i]
with tf.Session() as sess:
if init_global_variables:
sess.run(variables.global_variables_initializer())
# convert to tflite model
converter = tf.lite.TFLiteConverter.from_session(sess, input_tensors, output_tensors)
if len(input_tensors) > 1:
if len(input_tensors[0].shape) <= 4 and len(input_tensors[1].shape) <= 4:
converter._experimental_disable_batchmatmul_unfold = True
else:
converter._experimental_disable_batchmatmul_unfold = False
converter.experimental_new_converter = experimental_new_converter
if quantized:
if int_quant_dtype == tf.int16:
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_ops = [
tf.lite.OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8
]
elif int_quant_dtype == tf.int8:
converter.inference_type = tf.lite.constants.INT8
else:
# default to int8 quantization
converter.inference_type = tf.lite.constants.QUANTIZED_UINT8
input_arrays = converter.get_input_arrays()
input_stats = {}
# calculate the mean and quantization scale for every input tensor,
# with respect to its fp32 input range, defined in fake_quant.
# s = 255/(fmax-fmin); m = -fmin*s (the zero point)
for i in input_arrays:
try:
quant_scale = 255 / (input_range[i][1] - input_range[i][0])
except ZeroDivisionError:
print("Min and max of the input range for tensor " + i + " can't be equal")
mean = -input_range[i][0] * quant_scale
input_stats[i] = (mean, quant_scale)
converter.quantized_input_stats = input_stats
elif fp16_quantized:
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_types = [tf.float16]
tflite_model_buffer = converter.convert()
tflite_output = run_tflite_graph(tflite_model_buffer, in_data)
for device in ["llvm"]:
_ = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print(f"Skip because {device} is not enabled")
continue
tvm_output = run_tvm_graph(
tflite_model_buffer,
in_data,
in_node,
target=device,
num_output=len(out_names),
out_names=out_names,
mode=mode,
)
# WARNING: the results could well be random values clipped to 0 or 255 because of badly
# tuned output range for the specific operator. While adding test ensure that we aren't
# getting only clipped values in output tensors that still pass the assertion.
# For reference see _test_elemwise_qnn_out_range()
if quantized and not fp16_quantized:
for i, _ in enumerate(tflite_output):
# allow absolute tolerance of 1 in the quantized results
tvm.testing.assert_allclose(
tflite_output[i], # pylint: disable=unnecessary-list-index-lookup
tvm_output[i],
atol=1,
rtol=1e-5,
)
else:
for i, _ in enumerate(tflite_output):
tvm.testing.assert_allclose(
tflite_output[i], # pylint: disable=unnecessary-list-index-lookup
tvm_output[i],
atol=1e-5,
rtol=1e-5,
)
def with_fused_activation_function(input_tensor, fn_name):
"""Fused activation function"""
if fn_name is None or fn_name == "NONE":
return input_tensor
if fn_name == "RELU":
return nn_ops.relu(input_tensor)
if fn_name == "RELU6":
return nn_ops.relu6(input_tensor)
if fn_name == "RELU_N1_TO_1":
return math_ops.maximum(-1, math_ops.minimum(input_tensor, 1))
if fn_name == "TANH":
return math_ops.tanh(input_tensor)
raise AssertionError(f"Unknown fused_activation_function {fn_name}")
def _test_split(in_shape, axis, num_splits, dtype):
"""internal split tester taking as parameters in_shape, number of tensors to split into
and dtype (data type)"""
np_data = np.random.uniform(-5, 5, size=in_shape).astype(dtype)
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=in_shape, dtype=dtype, name="in_data")
out = array_ops.split(in_data, num_splits, axis=axis)
num_splits = len(num_splits) if isinstance(num_splits, list) else num_splits
out_names = ["out_" + str(n) + ":0" for n in range(num_splits)]
compare_tflite_with_tvm([np_data], ["in_data"], [in_data], out, out_names=out_names)
def test_forward_split():
"""test split layer"""
# rank 1
_test_split((3,), 0, 1, "float32")
_test_split((3,), 0, 3, "float32")
_test_split((6,), 0, 3, "float32")
# rank 2
_test_split((6, 2), 0, 3, "float32")
_test_split((2, 6), 1, 6, "float32")
# rank 3
if package_version.parse(tf.VERSION) >= package_version.parse("1.14.0"):
_test_split((6, 2, 4), 0, 2, "int32")
_test_split((2, 6, 4), 1, 3, "float32")
_test_split((2, 4, 6), 2, 1, "float32")
# rank 4
_test_split((6, 1, 3, 5), 0, 3, "float32")
_test_split((1, 6, 3, 5), 1, 3, "float32")
_test_split((1, 3, 6, 5), 2, 3, "float32")
_test_split((1, 3, 5, 6), 3, 3, "float32")
# split along negative axis
_test_split((6, 1, 3, 5), -4, 3, "float32")
_test_split((1, 6, 3, 5), -3, 3, "float32")
_test_split((1, 3, 6, 5), -2, 3, "float32")
_test_split((1, 3, 5, 6), -1, 3, "float32")
# size_splits split
_test_split((6,), 0, [1, 2, 3], "float32")
_test_split((3, 6, 4), -2, [1, 4, 1], "float32")
#######################################################################
# slice
# -----
def _test_slice(data, begin, size):
"""One iteration of SLICE"""
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
out = array_ops.slice(in_data, begin, size)
compare_tflite_with_tvm(data, "Placeholder:0", [in_data], [out])
def test_forward_slice():
"""SLICE"""
_test_slice(np.arange(4, dtype=np.float32).reshape((4,)), begin=[0], size=[2])
_test_slice(np.arange(18, dtype=np.int32).reshape((3, 2, 3)), begin=[1, 0, 0], size=[1, 1, 3])
# tflite 1.13 outputs nonsense values if size[i] == -1
if package_version.parse(tf.VERSION) >= package_version.parse("1.14.0"):
_test_slice(np.arange(8, dtype=np.int32).reshape((2, 4)), begin=[0, 1], size=[-1, -1])
_test_slice(np.arange(5, dtype=np.int32).reshape((5,)), begin=[4], size=[-1])
#######################################################################
# Topk
# ----
def _test_topk(in_shape, k=1):
"""One iteration of TOPK"""
data = np.random.uniform(size=in_shape).astype("float32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
out = nn_ops.top_k(in_data, k, name="TopK")
compare_tflite_with_tvm(data, "Placeholder:0", [in_data], [out[0]])
def test_forward_topk():
"""TOPK"""
_test_topk((3,), 1)
_test_topk((3,), 3)
_test_topk((3, 5, 7), 3)
_test_topk((3, 5, 7), 3)
#######################################################################
# Gather
# ------
def _test_gather(dshape, indices, axis, dtype, quantized=False, oob=False, wrap_idx=False):
"""One iteration of Gather"""
indices = np.asarray(indices).astype("int32")
data = np.random.uniform(1, 10, size=dshape)
data = data.astype(np.uint8) if quantized else data.astype(dtype)
with tf.Graph().as_default():
if wrap_idx:
in_name = "in_indices"
indices_expr = array_ops.placeholder(
shape=indices.shape, dtype=indices.dtype, name=in_name
)
in_tensor_name = [in_name + ":0"]
in_indices = [indices_expr]
else:
indices_expr = indices
indices = []
in_tensor_name = []
in_indices = []
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype, name="in_data")
if axis:
out = array_ops.gather(in_data, indices_expr, axis=axis)
else:
out = array_ops.gather(in_data, indices_expr) # tflite conversion fails for None axis
input_range = {"in_data": (-100, 100)} if quantized else None
try:
compare_tflite_with_tvm(
[data] + indices,
["in_data:0"] + in_tensor_name,
[in_data] + in_indices,
[out],
quantized=quantized,
input_range=input_range,
)
except ValueError as exc:
if not oob:
raise exc
except Exception as exc:
raise exc
def test_forward_gather():
"""GATHER"""
for quantized in [False, True]:
for wrap_idx in [False, True]:
_test_gather((4,), [1], 0, "float32", quantized, wrap_idx)
_test_gather((4,), [1], None, "int32", quantized, wrap_idx)
_test_gather((1, 4), [0], 0, "int32", quantized, wrap_idx)
_test_gather((4,), [[[1, 0], [0, 1]]], 0, "float32", quantized, wrap_idx)
_test_gather((2, 2), [[[1, 0], [0, 1]]], 1, "int32", quantized, wrap_idx)
_test_gather((2, 2), [[[1, 0], [0, 1]]], None, "float32", quantized, wrap_idx)
_test_gather((3, 3, 3), [[[1, 0]]], 0, "int32", quantized, wrap_idx)
_test_gather((3, 3, 3), [[[1, 0]]], 2, "int32", quantized, wrap_idx)
_test_gather((4, 3, 5, 6), [[2, 1, 0, 0]], 0, "float32", quantized, wrap_idx)
_test_gather((3, 3, 3), [[[2, 1]]], -1, "int32", quantized, wrap_idx)
# Out of boundary error cannot be tested with wrapped index
_test_gather((4,), [16], 0, "float32", quantized, oob=True)
_test_gather((1, 3, 3), [12], 0, "int32", quantized, oob=True)
_test_gather((1, 3, 3), [20], 1, "float32", quantized, oob=True)
_test_gather((1, 3, 3), [20, 20], 2, "float32", quantized, oob=True)
#######################################################################
# Gather_ND
# ---------
def _test_gather_nd(data, indices):
"""One iteration of GATHER_ND"""
with tf.Graph().as_default():
in_data = tf.placeholder(shape=data.shape, dtype=data.dtype, name="data")
indices_data = tf.placeholder(shape=indices.shape, dtype=indices.dtype, name="indices")
out = tf.gather_nd(in_data, indices_data)
compare_tflite_with_tvm(
[data, indices], ["data:0", "indices:0"], [in_data, indices_data], [out]
)
def test_forward_gather_nd():
"""GATHER_ND"""
_test_gather_nd(
np.array([[[1.2, 2.0], [3.1, 4.1]], [[5.1, 6.1], [7.1, 8.1]]]).astype("float32"),
np.asarray([[0, 1], [1, 0]]).astype("int32"),
)
_test_gather_nd(
np.reshape(np.arange(30), [5, 6]).astype("int32"), np.asarray([[1, 2]]).astype("int32")
)
_test_gather_nd(
np.reshape(np.arange(12), [2, 3, 2]).astype("int32"),
np.asarray([[[0, 0], [0, 1]], [[1, 0], [1, 1]]]).astype("int32"),
)
_test_gather_nd(
np.reshape(np.arange(4), [4]).astype("float32"), np.asarray([1]).astype("int32")
)
_test_gather_nd(
np.reshape(np.arange(4), [1, 4]).astype("float32"), np.asarray([0]).astype("int32")
)
_test_gather_nd(
np.reshape(np.arange(4), [1, 4]).astype("float32"), np.asarray([0, 3]).astype("int32")
)
#######################################################################
# StridedSlice
# ------------
def _test_stridedslice(
ip_shape,
begin,
end,
stride,
dtype,
begin_mask=0,
end_mask=0,
new_axis_mask=0,
shrink_axis_mask=0,
ellipsis_mask=0,
quantized=False,
):
"""One iteration of a Stridedslice"""
data = np.random.uniform(size=ip_shape).astype(dtype)
data = data.astype(np.uint8) if quantized else data.astype(dtype)
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, ip_shape, name="in_data")
out = array_ops.strided_slice(
in_data,
begin,
end,
stride,
begin_mask=begin_mask,
end_mask=end_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask,
ellipsis_mask=ellipsis_mask,
)
input_range = {"in_data": (-100, 100)} if quantized else None
compare_tflite_with_tvm(
[data], ["in_data:0"], [in_data], [out], quantized=quantized, input_range=input_range
)
def test_forward_stridedslice():
"""test StridedSlice"""
for quantized in [False, True]:
_test_stridedslice(
(1, 3, 3),
[0, 0, 0],
[3, 3, 3],
[1, 1, 1],
"float32",
shrink_axis_mask=7,
quantized=quantized,
)
_test_stridedslice(
(1, 3, 3),
[0, 0, 0],
[3, 3, 3],
[1, 1, 1],
"float32",
shrink_axis_mask=5,
quantized=quantized,
)
_test_stridedslice((2), [1], [1], [1], "float32", shrink_axis_mask=1, quantized=quantized)
_test_stridedslice(
(3, 4, 3), [1, -1, 0], [4, -5, 3], [2, -1, 1], "float32", quantized=quantized
)
_test_stridedslice(
(3, 4), [1, 0], [4, 4], [1, 1], "float32", shrink_axis_mask=0, quantized=quantized
)
_test_stridedslice(
(4, 4), [1, 0], [4, 4], [1, 1], "float32", shrink_axis_mask=2, quantized=quantized
)
_test_stridedslice(
(3, 4), [-1, 0], [0, 3], [1, 1], "float32", shrink_axis_mask=1, quantized=quantized
)
#######################################################################
# transpose
# ---------
def _test_forward_transpose(ishape, axes=()):
data = np.random.uniform(size=ishape).astype(np.float32)
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
if not axes:
out = array_ops.transpose(in_data)
else:
out = array_ops.transpose(in_data, axes)
compare_tflite_with_tvm(data, "Placeholder:0", [in_data], [out])
def test_forward_transpose():
_test_forward_transpose((2, 2))
_test_forward_transpose((2, 3, 4))
_test_forward_transpose((7, 8, 8, 10))
_test_forward_transpose((2, 3, 4), (1, 2, 0))
_test_forward_transpose((2, 3, 4), (0, 1, 2))
_test_forward_transpose((2, 3, 4, 5), (3, 0, 1, 2))
_test_forward_transpose((2, 3, 4, 5), ())
#######################################################################
# Cast
# ----
def _test_cast(data, cast_dtype, use_mlir=False):
"""One iteration of CAST"""
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
out = math_ops.cast(in_data, cast_dtype)
compare_tflite_with_tvm(
data, "Placeholder:0", [in_data], [out], experimental_new_converter=use_mlir
)
def test_forward_cast():
"""CAST"""
for use_mlir in [False, True]:
_test_cast(
np.arange(6.0, dtype=np.float32).reshape((1, 6)), cast_dtype=tf.int32, use_mlir=use_mlir
)
_test_cast(
np.arange(6.0, dtype=np.float32).reshape((1, 6)), cast_dtype=tf.uint8, use_mlir=use_mlir
)
_test_cast(
np.arange(6.0, dtype=np.int32).reshape((1, 6)), cast_dtype=tf.int64, use_mlir=use_mlir
)
#######################################################################
# Batch Mat Mul
# ----
def _test_batch_matmul(
a_shape, b_shape, dtype, out_dtype, adjoint_a=False, adjoint_b=False, quantized=False
):
with tf.Graph().as_default():
a = array_ops.placeholder(shape=a_shape, dtype=dtype, name="A")
b = array_ops.placeholder(shape=b_shape, dtype=dtype, name="B")
print(tf.__version__)
result = raw_ops.BatchMatMulV3(
x=a, y=b, Tout=out_dtype, adj_x=adjoint_a, adj_y=adjoint_b, name="batchmatmul"
)
input_range = {"A": (-100, 100), "B": (-100, 100)} if quantized else None
a_np = np.random.uniform(high=5.0, size=a_shape).astype(dtype)
b_np = np.random.uniform(high=5.0, size=b_shape).astype(dtype)
compare_tflite_with_tvm(
[a_np, b_np],
[a.name, b.name],
[a, b],
[result],
experimental_new_converter=True,
quantized=quantized,
input_range=input_range,
)
@pytest.mark.parametrize("config", [("int8", "int32", True), ("float32", "float32", False)])
def test_forward_batch_matmul(config):
"""BATCH_MAT_MUL"""
_test_batch_matmul(
(3, 5, 4), (3, 4, 5), dtype=config[0], out_dtype=config[1], quantized=config[2]
)
_test_batch_matmul(
(3, 5, 4),
(3, 4, 5),
dtype=config[0],
out_dtype=config[1],
adjoint_a=True,
adjoint_b=True,
quantized=config[2],
)
_test_batch_matmul(
(3, 5, 4),
(3, 5, 4),
dtype=config[0],
out_dtype=config[1],
adjoint_a=True,
adjoint_b=False,
quantized=config[2],
)
_test_batch_matmul(
(2, 3, 5, 4),
(1, 3, 5, 4),
dtype=config[0],
out_dtype=config[1],
adjoint_a=True,
adjoint_b=False,
quantized=config[2],
)
_test_batch_matmul(
(3, 5, 4),
(3, 5, 4),
dtype=config[0],
out_dtype=config[1],
adjoint_a=False,
adjoint_b=True,
quantized=config[2],
)
_test_batch_matmul(
(2, 3, 5, 4),
(1, 3, 5, 4),
dtype=config[0],
out_dtype=config[1],
adjoint_a=False,
adjoint_b=True,
quantized=config[2],
)
_test_batch_matmul(
(3, 4, 5, 6), (3, 4, 6, 5), dtype=config[0], out_dtype=config[1], quantized=config[2]
)
# BatchMatMul doesn't support larger than 4D tensors
# _test_batch_matmul(
# (2, 3, 4, 5, 6), (2, 3, 4, 6, 5), dtype=config[0], out_dtype=config[1], quantized=config[2]
# )
#######################################################################
# Tile
# ----
def _test_forward_tile(in_shape, reps, dtype):
data = np.random.uniform(-5, 5, size=in_shape).astype(dtype)
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
out = array_ops.tile(in_data, reps)
compare_tflite_with_tvm(data, "Placeholder:0", [in_data], [out])
def test_forward_tile():
_test_forward_tile((2,), (3,), "int32")
_test_forward_tile((2, 2), (2, 3), "float32")
######################################################################
# BatchToSpaceND
# --------------
def _test_batch_to_space_nd(input_shape, block_shape, crops, dtype="int32"):
data = np.random.uniform(0, 5, size=input_shape).astype(dtype)
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=input_shape, dtype=dtype)
out = array_ops.batch_to_space_nd(in_data, block_shape, crops)
compare_tflite_with_tvm(data, "Placeholder:0", [in_data], [out])
def test_forward_batch_to_space_nd():
# test cases: https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/batch-to-space-n-d
_test_batch_to_space_nd(input_shape=[4, 1, 1, 1], block_shape=[2, 2], crops=[[0, 0], [0, 0]])
_test_batch_to_space_nd(input_shape=[4, 1, 1, 3], block_shape=[2, 2], crops=[[0, 0], [0, 0]])
_test_batch_to_space_nd(input_shape=[4, 2, 2, 1], block_shape=[2, 2], crops=[[0, 0], [0, 0]])
_test_batch_to_space_nd(input_shape=[4, 3, 3, 1], block_shape=[2, 2], crops=[[0, 1], [0, 1]])
######################################################################
# SpaceToBatchND
# --------------
def _test_space_to_batch_nd(input_shape, block_shape, paddings, dtype="int32"):
data = np.random.uniform(0, 5, size=input_shape).astype(dtype)
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=input_shape, dtype=dtype)
out = array_ops.space_to_batch_nd(in_data, block_shape, paddings)
compare_tflite_with_tvm(data, "Placeholder:0", [in_data], [out])
def test_forward_space_to_batch_nd():
# test cases: https://www.tensorflow.org/api_docs/python/tf/space_to_batch_nd
_test_space_to_batch_nd(input_shape=[1, 2, 2, 1], block_shape=[2, 2], paddings=[[0, 0], [0, 0]])
_test_space_to_batch_nd(input_shape=[1, 2, 2, 3], block_shape=[2, 2], paddings=[[0, 0], [0, 0]])
_test_space_to_batch_nd(input_shape=[1, 4, 4, 1], block_shape=[2, 2], paddings=[[0, 0], [0, 0]])
_test_space_to_batch_nd(input_shape=[2, 2, 4, 1], block_shape=[2, 2], paddings=[[0, 0], [2, 0]])
#######################################################################
# Pooling
# -------
def _test_pooling_iteration(input_shape, **kwargs):
"""One iteration of pool operation with given shapes and attributes"""
x = -np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) - 1
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=input_shape, dtype="float32")
out = nn_ops.pool(in_data, **kwargs)
compare_tflite_with_tvm(x, "Placeholder:0", [in_data], [out])
def _test_pooling(input_shape, **kwargs):
_test_pooling_iteration(input_shape, **kwargs)
def test_forward_pooling():
"""Pooling"""
for pool_type in ["AVG", "MAX"]:
_test_pooling(
input_shape=[2, 9, 10, 2],
window_shape=[1, 1],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1],
strides=[1, 1],
)
_test_pooling(
input_shape=[2, 10, 9, 2],
window_shape=[1, 1],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1],
strides=[1, 1],
)
_test_pooling(
input_shape=[2, 9, 10, 2],
window_shape=[2, 1],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1],
strides=[1, 1],
)
_test_pooling(
input_shape=[2, 10, 9, 2],
window_shape=[2, 3],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1],
strides=[2, 1],
)
def _test_l2_pool2d(input_shape, ksize, strides, padding, data_format, fused_func_name=None):
x = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) - 1
with tf.Graph().as_default():
in_data = tf.placeholder(dtype=tf.float32, name="input", shape=input_shape)
out = tf.sqrt(
tf.nn.avg_pool(
tf.square(in_data),
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
)
)
out = with_fused_activation_function(out, fused_func_name)
compare_tflite_with_tvm(x, "input", [in_data], [out])
def test_forward_l2_pool2d():
_test_l2_pool2d([1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], "SAME", "NHWC", "RELU6")
_test_l2_pool2d([2, 9, 10, 2], [1, 1, 1, 1], [1, 1, 1, 1], "SAME", "NHWC", "RELU6")
_test_l2_pool2d([2, 9, 10, 2], [1, 2, 1, 1], [1, 1, 1, 1], "SAME", "NHWC")
_test_l2_pool2d([2, 9, 10, 2], [1, 2, 1, 1], [1, 1, 2, 1], "SAME", "NHWC")
_test_l2_pool2d([1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], "VALID", "NHWC", "RELU")
_test_l2_pool2d([2, 9, 10, 2], [1, 1, 1, 1], [1, 1, 1, 1], "VALID", "NHWC")
_test_l2_pool2d([2, 9, 10, 2], [1, 2, 1, 1], [1, 1, 1, 1], "VALID", "NHWC")
_test_l2_pool2d([2, 9, 10, 2], [1, 2, 1, 1], [1, 1, 2, 1], "VALID", "NHWC", "RELU6")
#######################################################################
# Convolution
# -----------
def _test_tflite2_quantized_convolution(
input_shape, kernel_shape, filters, padding="valid", data_format=None, int_quant_dtype=tf.int8
):
"""One iteration of TFLite2 quantized convolution with given shapes and attributes"""
data_format = "channels_last" if data_format == "NHWC" else "channels_first"
data = np.random.uniform(0, 1, input_shape).astype("float32")
_ = np.random.uniform(0, 1, kernel_shape).astype("float32")
data_in = tf.keras.layers.Input(shape=data.shape[1:])
conv = tf.keras.layers.Conv2D(
filters=filters,
kernel_size=(kernel_shape[0], kernel_shape[1]),
activation=tf.nn.relu,
padding=padding,
data_format=data_format,
)(data_in)
keras_model = tf.keras.models.Model(data_in, conv)
# To create quantized values with dynamic range of activations, needs representative dataset
def representative_data_gen():
for _ in range(1):
yield [data]
tflite_model_quant = _quantize_keras_model(
keras_model,
representative_data_gen,
is_float_input=True,
is_float_output=True,
int_quant_dtype=int_quant_dtype,
)
# TFLite.Model.Model has changed to TFLite.Model from 1.14 to 2.1
try:
import tflite.Model
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_quant, 0)
except AttributeError:
import tflite
tflite_model = tflite.Model.GetRootAsModel(tflite_model_quant, 0)
except ImportError as exc:
raise ImportError("The tflite package must be installed") from exc
subgraph = tflite_model.Subgraphs(0)
model_input = subgraph.InputsAsNumpy()
input_node = subgraph.Tensors(model_input).Name().decode("utf-8")
tflite_output = run_tflite_graph(tflite_model_quant, data)
if tf.__version__ < LooseVersion("2.9"):
input_node = data_in.name.replace(":0", "")
else:
input_node = "serving_default_" + data_in.name + ":0"
tvm_output = run_tvm_graph(tflite_model_quant, data, input_node)
tvm.testing.assert_allclose(
np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]), rtol=1e-2, atol=1e-2
)
def test_forward_quantized_convolution():
"""Quantized convolution"""
for int_quant_dtype in [tf.int8, tf.int16]:
_test_tflite2_quantized_convolution(
(1, 28, 28, 1),
(1, 1),
12,
data_format="NHWC",
int_quant_dtype=int_quant_dtype,
)
_test_tflite2_quantized_convolution(
(1, 1, 28, 28),
(1, 1),
12,
data_format="NCWH",
int_quant_dtype=int_quant_dtype,
)
def test_forward_quantized_depthwise_convolution():
for int_quant_dtype in [tf.int8, tf.int16]:
_test_tflite2_quantized_depthwise_convolution(
[1, 8, 8, 128], [1, 1, 128, 1], [1, 1], [1, 1], "SAME", "NHWC", 1, int_quant_dtype
)
_test_tflite2_quantized_depthwise_convolution(
[1, 17, 17, 12], [3, 3, 12, 1], [1, 1], [2, 2], "VALID", "NHWC", 1, int_quant_dtype
)
_test_tflite2_quantized_depthwise_convolution(
[1, 24, 24, 3], [7, 7, 3, 8], [1, 1], [2, 2], "SAME", "NHWC", 8, int_quant_dtype
)
def _test_tflite2_quantized_depthwise_convolution(
input_shape,
kernel_shape,
dilations,
strides,
padding,
data_format,
depth_multiplier,
int_quant_dtype=tf.int8,
):
"""One iteration of TFLite2 quantized depthwise convolution with given shapes and attributes"""
data_format = "channels_last" if data_format == "NHWC" else "channels_first"
data = np.random.uniform(0, 1, input_shape).astype("float32")
kernel = np.random.uniform(0, 1, kernel_shape).astype("float32")
data_in = tf.keras.layers.Input(shape=data.shape[1:])
conv = tf.keras.layers.DepthwiseConv2D(
kernel_size=(kernel_shape[0], kernel_shape[1]),
strides=strides,
padding=padding,
data_format=data_format,
activation="relu",
use_bias=False,
depth_multiplier=depth_multiplier,
)(data_in)
keras_model = tf.keras.models.Model(data_in, conv)
keras_model.layers[1].set_weights([kernel])
# To create quantized values with dynamic range of activations, needs representative dataset
def representative_data_gen():
for _ in range(1):
yield [data]
tflite_model_quant = _quantize_keras_model(
keras_model,
representative_data_gen,
is_float_input=True,
is_float_output=True,
int_quant_dtype=int_quant_dtype,
)
# TFLite.Model.Model has changed to TFLite.Model from 1.14 to 2.1
try:
import tflite.Model
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_quant, 0)
except AttributeError:
import tflite
tflite_model = tflite.Model.GetRootAsModel(tflite_model_quant, 0)
except ImportError as exc:
raise ImportError("The tflite package must be installed") from exc
subgraph = tflite_model.Subgraphs(0)
model_input = subgraph.InputsAsNumpy()
input_node = subgraph.Tensors(model_input).Name().decode("utf-8")
tflite_output = run_tflite_graph(tflite_model_quant, data)
tvm_output = run_tvm_graph(tflite_model_quant, data, input_node)
tvm.testing.assert_allclose(
np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]), rtol=1e-2, atol=1e-2
)
def _test_convolution(
tensor_in_sizes,
filter_in_sizes,
dilations,
strides,
padding,
data_format,
is_depthwise=False,
quantized=False,
fp16_quantized=False,
):
"""One iteration of convolution with given shapes and attributes"""
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
if quantized:
data_array = np.random.uniform(0, 255, tensor_in_sizes).astype("uint8")
filter_array = np.random.uniform(0, 255, filter_in_sizes).astype("uint8")
else:
data_array = [f * 1.0 for f in range(1, total_size_1 + 1)]
filter_array = [f * 1.0 for f in range(1, total_size_2 + 1)]
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype="float32", name="in_data")
in_filter = constant_op.constant(
filter_array, shape=filter_in_sizes, dtype="float32", name="in_filter"
)
strides = [1] + strides + [1]
dilations = [1] + dilations + [1]
if is_depthwise:
out = nn_ops.depthwise_conv2d_native(
in_data, in_filter, strides=strides, padding=padding, data_format=data_format
)
else:
out = nn_ops.conv2d(
in_data, in_filter, strides=strides, padding=padding, data_format=data_format
)
if quantized and not fp16_quantized:
if is_depthwise:
# Quantized the inputs and feed them to the convolution
inq_data = tf.quantization.fake_quant_with_min_max_args(
in_data, min=-100, max=100, name="inq_data"
)
inq_filter = tf.quantization.fake_quant_with_min_max_args(
in_filter, min=-100, max=100, name="inq_filter"
)
out = nn_ops.depthwise_conv2d_native(
inq_data, inq_filter, strides=strides, padding=padding, data_format=data_format
)
out = tf.quantization.fake_quant_with_min_max_args(
out, min=-200, max=200, name="out"
)
# Set the input quantization range
input_range = {"in_data": (-100, 100)} if quantized else None
# Compare
compare_tflite_with_tvm(
data_array,
"in_data",
[in_data],
[out],
quantized=quantized,
input_range=input_range,
experimental_new_converter=True,
)
else:
# Quantized the inputs and feed them to the convolution
inq_data = tf.quantization.fake_quant_with_min_max_args(
in_data, min=-100, max=100, name="inq_data"
)
inq_filter = tf.quantization.fake_quant_with_min_max_args(
in_filter, min=-100, max=100, name="inq_filter"
)
out = nn_ops.conv2d(
inq_data, inq_filter, strides=strides, padding=padding, data_format=data_format
)
out = tf.quantization.fake_quant_with_min_max_args(
out, min=-200, max=200, name="out"
)
# Set the input quantization range
input_range = {"in_data": (-100, 100)} if quantized else None
# Compare
compare_tflite_with_tvm(
data_array,
"in_data",
[in_data],
[out],
quantized=quantized,
input_range=input_range,
experimental_new_converter=True,
)
else:
data_array = np.reshape(data_array, tensor_in_sizes).astype("float32")
compare_tflite_with_tvm(data_array, "in_data", [in_data], [out])
def test_forward_convolution():
"""Convolution"""
for quantized in [False, True]:
for fp16_quantized in [False, True]:
_test_convolution(
[4, 8, 8, 176],
[1, 1, 176, 32],
[1, 1],
[1, 1],
"SAME",
"NHWC",
quantized=quantized,
fp16_quantized=fp16_quantized,
)
_test_convolution(
[4, 17, 17, 19],
[3, 3, 19, 19],
[1, 1],
[2, 2],
"VALID",
"NHWC",
quantized=quantized,
fp16_quantized=fp16_quantized,
)
_test_convolution(
[4, 17, 17, 124],
[1, 1, 124, 19],
[1, 1],
[1, 1],
"SAME",
"NHWC",
quantized=quantized,
fp16_quantized=fp16_quantized,
)
_test_convolution(
[4, 17, 17, 12],
[3, 3, 12, 32],
[1, 1],
[2, 2],
"VALID",
"NHWC",
quantized=quantized,
fp16_quantized=fp16_quantized,
)
# depthwise convolution
_test_convolution(
[4, 8, 8, 176],
[1, 1, 176, 1],
[1, 1],
[1, 1],
"SAME",
"NHWC",
True,
quantized=quantized,
fp16_quantized=fp16_quantized,
)
_test_convolution(
[4, 17, 17, 19],
[3, 3, 19, 1],
[1, 1],
[2, 2],
"VALID",
"NHWC",
True,
quantized=quantized,
fp16_quantized=fp16_quantized,
)
_test_convolution(
[4, 17, 17, 124],
[1, 1, 124, 1],
[1, 1],
[1, 1],
"SAME",
"NHWC",
True,
quantized=quantized,
fp16_quantized=fp16_quantized,
)
_test_convolution(
[4, 17, 17, 12],
[3, 3, 12, 1],
[1, 1],
[2, 2],
"VALID",
"NHWC",
True,
quantized=quantized,
fp16_quantized=fp16_quantized,
)
_test_convolution(
[4, 17, 17, 12],
[3, 3, 12, 2],
[1, 1],
[2, 2],
"VALID",
"NHWC",
True,
quantized=quantized,
fp16_quantized=fp16_quantized,
)
# depthwise convolution with single input channel
_test_convolution(
[1, 76, 64, 1],
[9, 5, 1, 96],
[1, 1],
[1, 1],
"SAME",
"NHWC",
True,
quantized=quantized,
fp16_quantized=fp16_quantized,
)
# TFLite2 quantized convolution testing
if package_version.parse(tf.VERSION) >= package_version.parse("2.3.0"):
_test_convolution(
[1, 8, 8, 176], [1, 1, 176, 32], [1, 1], [1, 1], "SAME", "NHWC", quantized=True
)
_test_convolution(
[1, 17, 17, 12], [3, 3, 12, 32], [1, 1], [2, 2], "VALID", "NHWC", quantized=True
)
_test_convolution(
[1, 17, 17, 19], [3, 3, 19, 19], [1, 1], [2, 2], "VALID", "NHWC", quantized=True
)
_test_convolution(
[1, 17, 17, 124], [1, 1, 124, 19], [1, 1], [1, 1], "SAME", "NHWC", quantized=True
)
#######################################################################
# Transpose Convolution
# ---------------------
def _test_transpose_conv(
tensor_in_sizes,
filter_in_sizes,
output_shape,
strides,
padding,
quantized=False,
fp16_quantized=False,
):
"""One iteration of transpose convolution with given shapes and attributes"""
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
with tf.Graph().as_default():
if quantized and not fp16_quantized:
# Initializes the input tensor with array containing incrementing
# numbers from 1.
data_array = [max(f, 255) for f in range(1, total_size_1 + 1)]
filter_array = [max(f, 255) for f in range(1, total_size_2 + 1)]
data_array = np.reshape(data_array, tensor_in_sizes).astype("uint8")
filter_array = np.reshape(filter_array, filter_in_sizes).astype("uint8")
in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype="float32", name="in_data")
inq_data = tf.quantization.fake_quant_with_min_max_args(
in_data, min=-100, max=100, name="q_data"
)
input_range = {"q_data": (-100, 100)}
in_filter = constant_op.constant(
filter_array, shape=filter_in_sizes, dtype="float32", name="in_filter"
)
inq_filter = tf.quantization.fake_quant_with_min_max_args(
in_filter, min=-100, max=100, name="q_filter"
)
strides = [1] + strides + [1]
out = nn_ops.conv2d_transpose(
inq_data, inq_filter, output_shape=output_shape, strides=strides, padding=padding
)
out = tf.quantization.fake_quant_with_min_max_args(out, min=-100, max=100, name="out")
compare_tflite_with_tvm(
[data_array], ["q_data"], [inq_data], [out], quantized=True, input_range=input_range
)
else:
# Initializes the input tensor with array containing incrementing
# numbers from 1.
data_array = [f * 1.0 for f in range(1, total_size_1 + 1)]
filter_array = [f * 1.0 for f in range(1, total_size_2 + 1)]
in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype="float32", name="in_data")
in_filter = constant_op.constant(
filter_array, shape=filter_in_sizes, dtype="float32", name="in_filter"
)
strides = [1] + strides + [1]
# in_filter layout is HWOI
out = nn_ops.conv2d_transpose(
in_data, in_filter, output_shape=output_shape, strides=strides, padding=padding
)
data_array = np.reshape(data_array, tensor_in_sizes).astype("float32")
compare_tflite_with_tvm(
[data_array], ["in_data"], [in_data], [out], fp16_quantized=fp16_quantized
)
def test_forward_transpose_conv():
"""Transpose convolution"""
for quantized in [True, False]:
for fp16_quantized in [True, False]:
# odd size input, padding VALID
_test_transpose_conv(
[1, 5, 6, 16],
[2, 2, 16, 16],
[1, 10, 12, 16],
[2, 2],
"VALID",
quantized,
fp16_quantized,
)
# odd size input, padding SAME
_test_transpose_conv(
[1, 5, 6, 16],
[2, 2, 16, 16],
[1, 10, 12, 16],
[2, 2],
"SAME",
quantized,
fp16_quantized,
)
# kernel 3x3, padding VALID
_test_transpose_conv(
[4, 32, 32, 16],
[3, 3, 5, 16],
[4, 34, 34, 5],
[1, 1],
"VALID",
quantized,
fp16_quantized,
)
_test_transpose_conv(
[1, 32, 32, 16],
[3, 3, 5, 16],
[1, 65, 65, 5],
[2, 2],
"VALID",
quantized,
fp16_quantized,
)
_test_transpose_conv(
[1, 32, 32, 16],
[3, 3, 5, 16],
[1, 65, 34, 5],
[2, 1],
"VALID",
quantized,
fp16_quantized,
)
# kernel 3x3, padding SAME
_test_transpose_conv(
[4, 32, 32, 16],
[3, 3, 5, 16],
[4, 32, 32, 5],
[1, 1],
"SAME",
quantized,
fp16_quantized,
)
_test_transpose_conv(
[1, 32, 32, 16],
[3, 3, 5, 16],
[1, 64, 64, 5],
[2, 2],
"SAME",
quantized,
fp16_quantized,
)
_test_transpose_conv(
[1, 32, 32, 16],
[3, 3, 5, 16],
[1, 64, 32, 5],
[2, 1],
"SAME",
quantized,
fp16_quantized,
)
# kernel 2x2, padding VALID
_test_transpose_conv(
[4, 32, 32, 16],
[2, 2, 5, 16],
[4, 33, 33, 5],
[1, 1],
"VALID",
quantized,
fp16_quantized,
)
_test_transpose_conv(
[1, 32, 32, 16],
[2, 2, 5, 16],
[1, 64, 64, 5],
[2, 2],
"VALID",
quantized,
fp16_quantized,
)
_test_transpose_conv(
[1, 32, 32, 16],
[2, 2, 5, 16],
[1, 64, 33, 5],
[2, 1],
"VALID",
quantized,
fp16_quantized,
)
# kernel 2x2, padding SAME
_test_transpose_conv(
[4, 32, 32, 16],
[2, 2, 5, 16],
[4, 32, 32, 5],
[1, 1],
"SAME",
quantized,
fp16_quantized,
)
_test_transpose_conv(
[1, 32, 32, 16],
[2, 2, 5, 16],
[1, 64, 64, 5],
[2, 2],
"SAME",
quantized,
fp16_quantized,
)
_test_transpose_conv(
[1, 32, 32, 16],
[2, 2, 5, 16],
[1, 64, 32, 5],
[2, 1],
"SAME",
quantized,
fp16_quantized,
)
# kernel 1x1, padding VALID
_test_transpose_conv(
[4, 32, 32, 16],
[1, 1, 5, 16],
[4, 32, 32, 5],
[1, 1],
"VALID",
quantized,
fp16_quantized,
)
_test_transpose_conv(
[1, 32, 32, 16],
[1, 1, 5, 16],
[1, 63, 63, 5],
[2, 2],
"VALID",
quantized,
fp16_quantized,
)
_test_transpose_conv(
[1, 32, 32, 16],
[1, 1, 5, 16],
[1, 63, 32, 5],
[2, 1],
"VALID",
quantized,
fp16_quantized,
)
# kernel 1x1, padding SAME
_test_transpose_conv(
[4, 32, 32, 16],
[1, 1, 5, 16],
[4, 32, 32, 5],
[1, 1],
"SAME",
quantized,
fp16_quantized,
)
_test_transpose_conv(
[1, 32, 32, 16],
[1, 1, 5, 16],
[1, 63, 63, 5],
[2, 2],
"SAME",
quantized,
fp16_quantized,
)
_test_transpose_conv(
[1, 32, 32, 16],
[1, 1, 5, 16],
[1, 63, 32, 5],
[2, 1],
"SAME",
quantized,
fp16_quantized,
)
def _test_tflite2_quantized_transpose_conv(
input_shape,
kernel_shape,
filters,
padding="valid",
strides=(1, 1),
data_format=None,
int_quant_dtype=tf.int8,
):
"""One iteration of TFLite2 quantized tranpose conv with given shapes and attributes"""
data_format = "channels_last" if data_format == "NHWC" else "channels_first"
data = np.random.uniform(0, 1, input_shape).astype("float32")
_ = np.random.uniform(0, 1, kernel_shape).astype("float32")
data_in = tf.keras.layers.Input(shape=data.shape[1:], batch_size=1)
transpose_conv = tf.keras.layers.Conv2DTranspose(
filters=filters,
kernel_size=(kernel_shape[0], kernel_shape[1]),
padding=padding,
strides=strides,
use_bias=True,
)(data_in)
keras_model = tf.keras.models.Model(data_in, transpose_conv)
# To create quantized values with dynamic range of activations, needs representative dataset
def representative_data_gen():
for _ in range(1):
yield [data]
tflite_model_quant = _quantize_keras_model(
keras_model,
representative_data_gen,
is_float_input=True,
is_float_output=True,
int_quant_dtype=int_quant_dtype,
)
# TFLite.Model.Model has changed to TFLite.Model from 1.14 to 2.1
try:
import tflite.Model
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_quant, 0)
except AttributeError:
import tflite
tflite_model = tflite.Model.GetRootAsModel(tflite_model_quant, 0)
except ImportError as exc:
raise ImportError("The tflite package must be installed") from exc
subgraph = tflite_model.Subgraphs(0)
model_input = subgraph.InputsAsNumpy()
input_node = subgraph.Tensors(model_input).Name().decode("utf-8")
tflite_output = run_tflite_graph(tflite_model_quant, data)
if tf.__version__ < LooseVersion("2.9"):
input_node = data_in.name.replace(":0", "")
else:
input_node = "serving_default_" + data_in.name + ":0"
tvm_output = run_tvm_graph(tflite_model_quant, data, input_node)
tvm.testing.assert_allclose(
np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]), rtol=1e-2, atol=1e-2
)
def test_forward_quantized_transpose_conv():
"""Quantized convolution"""
for int_quant_dtype in [tf.int8, tf.int16]:
_test_tflite2_quantized_transpose_conv(
(1, 1, 5, 64),
(3, 3),
64,
padding="same",
strides=(1, 2),
data_format="NHWC",
int_quant_dtype=int_quant_dtype,
)
#######################################################################
# Reshape
# -------
def _test_reshape(data, out_shape, wrap_shape, quantized=False):
"""One iteration of reshape operation with given data and out shape"""
if quantized:
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype="float32", name="in")
inq_data = tf.quantization.fake_quant_with_min_max_args(
in_data, min=-100, max=100, name="inq_0"
)
input_range = {"inq_0": (-100, 100)}
out_shape = out_shape if not wrap_shape else np.array(out_shape, dtype=np.int32)
in_shape = (
out_shape
if not wrap_shape
else array_ops.placeholder(
shape=out_shape.shape, dtype=out_shape.dtype, name="Newshape"
)
)
out = array_ops.reshape(inq_data, in_shape)
out = tf.quantization.fake_quant_with_min_max_args(out, min=-200, max=200, name="out")
compare_tflite_with_tvm(
[data, out_shape] if wrap_shape else [data],
["inq_0:0", "Newshape:0"] if wrap_shape else ["inq_0:0"],
[inq_data, in_shape] if wrap_shape else [inq_data],
[out],
quantized=True,
input_range=input_range,
mode="vm",
)
else:
# Test with tensor and constant
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
out_shape = out_shape if not wrap_shape else np.array(out_shape, dtype=np.int32)
in_shape = (
out_shape
if not wrap_shape
else array_ops.placeholder(
shape=out_shape.shape, dtype=out_shape.dtype, name="Newshape"
)
)
out = array_ops.reshape(in_data, in_shape)
compare_tflite_with_tvm(
[data, out_shape] if wrap_shape else [data],
["Placeholder:0", "Newshape:0"] if wrap_shape else ["Placeholder:0"],
[in_data, in_shape] if wrap_shape else [in_data],
[out],
mode="vm",
)
def test_forward_reshape():
for wrap in [True, False]:
_test_reshape(np.arange(6.0, dtype=np.float32), [2, 3], wrap)
_test_reshape(np.arange(6), [-1, 2], wrap)
_test_reshape(np.arange(6), [3, -1], wrap)
_test_reshape(np.arange(6), [-1], wrap)
_test_reshape(np.arange(6, dtype=np.uint8), [2, 3], False, True)
_test_reshape(np.arange(6, dtype=np.uint8), [-1, 2], False, True)
#######################################################################
# Resize
# ------
def _test_resize(
tf_resize_op, images_data, size_data, align_corners, half_pixel_centers, quantized=False
):
"""One iteration of Resize"""
# Test with tensor and constant
with tf.Graph().as_default():
images_tensor = array_ops.placeholder(shape=images_data.shape, dtype="float32", name="in")
size = ops.convert_to_tensor(size_data, dtype=size_data.dtype)
if quantized:
images_tensor_q = tf.quantization.fake_quant_with_min_max_args(
images_tensor, min=-3, max=2, name="in"
)
input_range = {"in": (-3, 2)}
out_tensor = tf_resize_op(
images=images_tensor_q,
size=size,
align_corners=align_corners,
half_pixel_centers=half_pixel_centers,
)
out_tensor = tf.quantization.fake_quant_with_min_max_args(
out_tensor, min=-3, max=2, name="out_tensor"
)
compare_tflite_with_tvm(
[images_data],
["in:0"],
[images_tensor],
[out_tensor],
quantized=True,
input_range=input_range,
)
else:
out_tensor = tf_resize_op(
images=images_tensor,
size=size,
align_corners=align_corners,
half_pixel_centers=half_pixel_centers,
)
compare_tflite_with_tvm([images_data], ["in:0"], [images_tensor], [out_tensor])
def test_all_resize():
"""Resize"""
images_data = np.random.uniform(0, 255, (1, 16, 16, 3))
images_data_float32 = images_data.astype(np.float32)
images_data_uint8 = images_data.astype(np.uint8)
size_data = np.array([8, 8]).astype("int32")
### RESIZE_BILINEAR
_test_resize(
tf.image.resize_bilinear,
images_data_float32,
size_data,
align_corners=False,
half_pixel_centers=False,
quantized=False,
)
_test_resize(
tf.image.resize_bilinear,
images_data_float32,
size_data,
align_corners=True,
half_pixel_centers=False,
quantized=False,
)
_test_resize(
tf.image.resize_bilinear,
images_data_uint8,
size_data,
align_corners=False,
half_pixel_centers=False,
quantized=True,
)
_test_resize(
tf.image.resize_bilinear,
images_data_uint8,
size_data,
align_corners=True,
half_pixel_centers=False,
quantized=True,
)
_test_resize(
tf.image.resize_bilinear,
images_data_uint8,
size_data,
align_corners=False,
half_pixel_centers=True,
quantized=True,
)
### RESIZE_NEAREST_NEIGHBOR (was added in v1.13)
# According to topi resize.h
# Align corners not supported for nearest neighbour
if "RESIZE_NEAREST_NEIGHBOR" in dir(BuiltinOperator()):
_test_resize(
tf.image.resize_nearest_neighbor,
images_data_float32,
size_data,
align_corners=False,
half_pixel_centers=False,
)
_test_resize(
tf.image.resize_nearest_neighbor,
images_data_float32,
size_data,
align_corners=True,
half_pixel_centers=False,
)
_test_resize(
tf.image.resize_nearest_neighbor,
images_data_float32,
size_data,
align_corners=False,
half_pixel_centers=True,
)
#######################################################################
# Range
# -----
def _test_range(start, limit, delta):
# tflite 1.13 convert method does not accept empty shapes
if package_version.parse(tf.VERSION) >= package_version.parse("1.14.0"):
tf.reset_default_graph()
with tf.Graph().as_default():
start_scalar, limit_scalar, delta_scalar = (
tf.placeholder(dtype=start.dtype, shape=(), name="start"),
tf.placeholder(dtype=limit.dtype, shape=(), name="limit"),
tf.placeholder(dtype=delta.dtype, shape=(), name="delta"),
)
out = tf.range(start_scalar, limit_scalar, delta_scalar, name="range")
compare_tflite_with_tvm(
[start, limit, delta],
["start", "limit", "delta"],
[start_scalar, limit_scalar, delta_scalar],
[out],
mode="vm",
quantized=False,
)
def _test_range_default():
# tflite 1.13 convert method does not accept empty shapes
if package_version.parse(tf.VERSION) >= package_version.parse("1.14.0"):
tf.reset_default_graph()
with tf.Graph().as_default():
inputs = [
tf.placeholder(dtype=tf.int32, shape=(), name="p1"),
tf.placeholder(dtype=tf.int32, shape=(), name="p2"),
]
outputs = [
tf.range(start=inputs[0], limit=inputs[1]), # use default delta
tf.range(
start=inputs[1]
), # use start as limit with 0 as the first item in the range
]
compare_tflite_with_tvm(
[np.int32(1), np.int32(18)], ["p1", "p2"], inputs, outputs, mode="vm"
)
def test_forward_range():
_test_range(np.int32(1), np.int32(18), np.int32(3))
_test_range(np.int32(1), np.int32(18), np.float32(3.1)) # increment is of type float
_test_range(np.float32(1.0), np.int32(18), np.int32(3.1)) # start is of type float
_test_range_default()
#######################################################################
# Shape
# -----
def _test_shape(dtype):
# tflite 1.13 convert method does not accept empty shapes
if package_version.parse(tf.VERSION) >= package_version.parse("1.14.0"):
tf.reset_default_graph()
with tf.Graph().as_default():
data = np.array([1, 18, 3], dtype=np.int32)
start = tf.placeholder(dtype=tf.int32, shape=[], name="start")
limit = tf.placeholder(dtype=tf.int32, shape=[], name="limit")
delta = tf.placeholder(dtype=tf.int32, shape=[], name="delta")
tf_range = tf.range(start, limit, delta, tf.int32, name="range")
out = tf.shape(tf_range, out_type=dtype)
out = tf.add(out, tf.constant([1], dtype=dtype))
compare_tflite_with_tvm(
list(np.nditer(data)),
["start", "limit", "delta"],
[start, limit, delta],
[out],
mode="vm",
)
def test_forward_shape():
_test_shape(tf.int32)
_test_shape(tf.int64)
#######################################################################
# Concatenation
# -------------
def _test_concatenation(data, axis):
"""One iteration of concatenation"""
assert len(data) >= 1
with tf.Graph().as_default():
in_data = [
array_ops.placeholder(shape=tensor.shape, dtype=tensor.dtype, name=f"in_{idx}")
for idx, tensor in enumerate(data)
]
out = array_ops.concat(in_data, axis)
name = [f"in_{idx}:0" for idx in range(len(data))]
compare_tflite_with_tvm(data, name, in_data, [out])
def test_forward_concatenation():
_test_concatenation([np.arange(6).reshape((1, 2, 1, 3)), np.arange(6).reshape((1, 2, 1, 3))], 1)
_test_concatenation([np.arange(6).reshape((3, 2)), np.arange(6).reshape((3, 2))], 1)
_test_concatenation(
[
np.arange(6).reshape((2, 1, 1, 3)),
np.arange(6).reshape((2, 1, 1, 3)),
np.arange(6).reshape((2, 1, 1, 3)),
],
1,
)
#######################################################################
# Unary elemwise
# --------------
def _test_unary_elemwise(math_op, data, quantized, quant_range=(-6, 6), int_quant_dtype=tf.int8):
"""One iteration of unary elemwise"""
if quantized:
with tf.Graph().as_default():
quant_min, quant_max = quant_range
in_data = array_ops.placeholder(shape=data.shape, dtype="float32", name="in_0")
inq_data = tf.quantization.fake_quant_with_min_max_args(
in_data, min=quant_min, max=quant_max, name="inq_0"
)
input_range = {"inq_0": (quant_min, quant_max)}
out = math_op(inq_data)
out = tf.quantization.fake_quant_with_min_max_args(
out, min=quant_min, max=quant_max, name="out"
)
compare_tflite_with_tvm(
data,
"inq_0:0",
[inq_data],
[out],
quantized=True,
input_range=input_range,
experimental_new_converter=True,
int_quant_dtype=int_quant_dtype,
)
else:
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype, name="in")
out = math_op(in_data)
compare_tflite_with_tvm(data, ["in:0"], [in_data], [out])
def _unary_elewise_create_model(math_op, data, offset=0, int_quant_dtype=tf.int8):
class Model(tf.Module):
@tf.function
def tf_function(self, x):
op = math_op(x)
return op
if int_quant_dtype in (tf.int8, tf.uint8):
_ = "int8"
elif int_quant_dtype in (tf.int16, tf.uint16):
_ = "int16"
else:
raise Exception(f"Unsupported dtype '{int_quant_dtype}' for unary elementwise test.")
model = Model()
# Save the model
export_dir = tempfile.gettempdir() + "/tf_model"
tf.saved_model.save(
model,
export_dir,
signatures=model.tf_function.get_concrete_function(
tf.TensorSpec(data.shape, tf.float32, name="input")
),
)
# Convert the model
def representative_dataset():
for _ in range(100):
tmp_data = np.random.rand(*tuple(data.shape))
yield [tmp_data.astype(np.float32) * 2 - offset]
converter = tf.lite.TFLiteConverter.from_saved_model(export_dir)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
if int_quant_dtype in (tf.int16, tf.uint16):
converter.target_spec.supported_ops = [
tf.lite.OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8
]
else:
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = int_quant_dtype
converter.inference_output_type = int_quant_dtype
tflite_model = converter.convert()
return tflite_model
#######################################################################
# Abs
# ----
def _test_abs(data, quantized, int_quant_dtype=tf.int8):
"""One iteration of abs"""
if quantized:
tflite_model_quant = _unary_elewise_create_model(
tf.math.abs, data, offset=1, int_quant_dtype=int_quant_dtype
)
tflite_output = run_tflite_graph(tflite_model_quant, data)
# TFLite 2.6.x upgrade support
if tf.__version__ < LooseVersion("2.6.1"):
in_node = ["serving_default_input_int8"]
elif tf.__version__ < LooseVersion("2.9"):
in_node = (
["serving_default_input_int16"] if int_quant_dtype == tf.int16 else ["tfl.quantize"]
)
else:
in_node = "serving_default_input"
tvm_output = run_tvm_graph(tflite_model_quant, data, in_node)
tvm.testing.assert_allclose(
np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]), rtol=1e-5, atol=1e-2
)
else:
return _test_unary_elemwise(math_ops.abs, data, quantized, int_quant_dtype=int_quant_dtype)
#######################################################################
# Rsqrt
# ----
def _test_rsqrt(data, quantized, int_quant_dtype=tf.int8):
"""One iteration of rsqrt"""
# tensorflow version upgrade support
if tf.__version__ < LooseVersion("2.6.1") or not quantized:
return _test_unary_elemwise(
math_ops.rsqrt, data, quantized, quant_range=[1, 6], int_quant_dtype=int_quant_dtype
)
else:
tflite_model_quant = _unary_elewise_create_model(
tf.math.rsqrt, data, int_quant_dtype=int_quant_dtype
)
tflite_output = run_tflite_graph(tflite_model_quant, data)
if tf.__version__ < LooseVersion("2.9"):
in_node = ["tfl.quantize"]
else:
in_node = "serving_default_input"
tvm_output = run_tvm_graph(tflite_model_quant, data, in_node)
tvm.testing.assert_allclose(
np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]), rtol=1e-5, atol=1e-2
)
#######################################################################
# Ceil
# ----
def _test_ceil(data, quantized, int_quant_dtype=tf.int8):
"""One iteration of ceil"""
return _test_unary_elemwise(math_ops.ceil, data, quantized, int_quant_dtype=int_quant_dtype)
#######################################################################
# Floor
# -----
def _test_floor(data, quantized, int_quant_dtype=tf.int8):
"""One iteration of floor"""
return _test_unary_elemwise(math_ops.floor, data, quantized, int_quant_dtype=int_quant_dtype)
#######################################################################
# Round
# -----
def _test_round(data, quantized, int_quant_dtype=tf.int8):
"""One iteration of round"""
return _test_unary_elemwise(math_ops.round, data, quantized, int_quant_dtype=int_quant_dtype)
#######################################################################
# Exp
# ---
def _test_exp(data, quantized, int_quant_dtype=tf.int8):
"""One iteration of exp"""
return _test_unary_elemwise(math_ops.exp, data, quantized, int_quant_dtype=int_quant_dtype)
#######################################################################
# Log
# ---
def _test_log(data, quantized, int_quant_dtype=tf.int8):
"""One iteration of log"""
return _test_unary_elemwise(
math_ops.log, data, quantized, quant_range=[1, 6], int_quant_dtype=int_quant_dtype
)
#######################################################################
# Sin
# ---
def _test_sin(data, quantized, int_quant_dtype=tf.int8):
"""One iteration of sin"""
return _test_unary_elemwise(math_ops.sin, data, quantized, int_quant_dtype=int_quant_dtype)
#######################################################################
# Cos
# ---
def _test_cos(data, quantized, int_quant_dtype=tf.int8):
"""One iteration of cos"""
if quantized:
tflite_model_quant = _unary_elewise_create_model(
tf.math.cos, data, int_quant_dtype=int_quant_dtype
)
tflite_output = run_tflite_graph(tflite_model_quant, data)
if tf.__version__ < LooseVersion("2.9"):
in_node = ["tfl.quantize"]
else:
in_node = "serving_default_input"
tvm_output = run_tvm_graph(tflite_model_quant, data, in_node)
tvm.testing.assert_allclose(
np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]), rtol=1e-5, atol=1e-2
)
else:
return _test_unary_elemwise(math_ops.cos, data, quantized)
#######################################################################
# Tan
# ---
def _test_tan(data, quantized, int_quant_dtype=tf.int8):
"""One iteration of tan"""
return _test_unary_elemwise(math_ops.tan, data, quantized, int_quant_dtype=int_quant_dtype)
#######################################################################
# Square
# ------
def _test_square(data, quantized, int_quant_dtype=tf.int8):
"""One iteration of square"""
return _test_unary_elemwise(math_ops.square, data, quantized, int_quant_dtype=int_quant_dtype)
#######################################################################
# Neg
# ------
def _test_neg(data, quantized, int_quant_dtype=tf.int8):
"""One iteration of neg"""
return _test_unary_elemwise(math_ops.neg, data, quantized, int_quant_dtype=int_quant_dtype)
#######################################################################
# Sqrt
# ------
def _test_sqrt(data, quantized, int_quant_dtype=tf.int8):
"""One iteration of sqrt"""
return _test_unary_elemwise(
math_ops.sqrt, data, quantized, quant_range=[1, 6], int_quant_dtype=int_quant_dtype
)
#######################################################################
# Elu
# ---
def _test_elu(data, quantized, int_quant_dtype=tf.int8):
"""One iteration of elu"""
return _test_unary_elemwise(nn_ops.elu, data, quantized, int_quant_dtype=int_quant_dtype)
def _test_forward_unary_elemwise(test_op, int_quant_dtype=None, quantized=True, negative=True):
# input data
in_data, inq_data = [], []
np_dtype = int_quant_dtype.as_numpy_dtype if int_quant_dtype else np.uint8
# quantized input data
if quantized:
inq_data.append(np.arange(1, 240, 40, dtype=np_dtype))
inq_data.append(np.arange(1, 240, 40, dtype=np_dtype).reshape((2, 1, 3)))
if int_quant_dtype == np.int8:
inq_data.append(np.arange(-128, 127, 45, dtype=np.int8))
for data in inq_data:
test_op(data, quantized=True, int_quant_dtype=int_quant_dtype)
# normal input data
if negative:
in_data.append(np.arange(-2.0, 4.0, dtype=np.float32))
in_data.append(np.arange(-2.0, 4.0, dtype=np.float32).reshape((2, 1, 3)))
else:
in_data.append(np.arange(1.0, 7.0, dtype=np.float32))
in_data.append(np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 1, 3)))
for data in in_data:
test_op(data, quantized=False, int_quant_dtype=int_quant_dtype)
def test_all_unary_elemwise():
"""All unary elemwise"""
_test_forward_unary_elemwise(_test_abs, int_quant_dtype=tf.int8)
_test_forward_unary_elemwise(_test_abs, int_quant_dtype=tf.int16)
_test_forward_unary_elemwise(_test_floor)
_test_forward_unary_elemwise(_test_exp)
_test_forward_unary_elemwise(_test_log, negative=False)
_test_forward_unary_elemwise(_test_square)
_test_forward_unary_elemwise(_test_sin)
_test_forward_unary_elemwise(_test_neg)
_test_forward_unary_elemwise(_test_sqrt, negative=False)
# tensorflow version upgrade support
if tf.__version__ < LooseVersion("2.6.1"):
_test_forward_unary_elemwise(_test_rsqrt, negative=False, int_quant_dtype=tf.uint8)
else:
_test_forward_unary_elemwise(_test_rsqrt, negative=False, int_quant_dtype=tf.int8)
# ceil and cos come with TFLite 1.14.0.post1 fbs schema
if package_version.parse(tf.VERSION) >= package_version.parse("1.14.0"):
_test_forward_unary_elemwise(_test_ceil)
if tf.__version__ < LooseVersion("2.6.1"):
_test_forward_unary_elemwise(_test_cos, quantized=False)
else:
_test_forward_unary_elemwise(_test_cos, int_quant_dtype=tf.int8)
_test_forward_unary_elemwise(_test_round)
# This fails with TF and Tflite 1.15.2, this could not have been tested
# in CI or anywhere else. The failure mode is that we see a backtrace
# from the converter that we need to provide a custom Tan operator
# implementation.
# _test_forward_unary_elemwise(_test_tan)
_test_forward_unary_elemwise(_test_elu, quantized=False)
#######################################################################
# Element-wise
# ------------
def _test_elemwise(
math_op,
data,
fused_activation_function=None,
quantized=False,
qnn_op=None,
same_qnn_params=False,
comparison_op=False,
):
"""One iteration of elemwise"""
assert len(data) == 2
def __test_elemwise(in_data):
assert len(in_data) == 2
if quantized:
int_quant_dtype = None
if data[0].dtype == "int8":
int_quant_dtype = tf.int8
elif data[0].dtype == "uint8":
int_quant_dtype = tf.uint8
elif data[0].dtype == "int16":
int_quant_dtype = tf.int16
else:
assert False, "Unsupported conversion from numpy to tflite dtype!"
# set the fp32 output range with respect to the operation
out_min, out_max = _test_elemwise_qnn_out_range(qnn_op)
inq0_min, inq0_max = (-100, 100)
inq1_min, inq1_max = (-50, 50)
# if requested use same quantization parameters provided by _test_elemwise_qnn_out_range
if same_qnn_params:
inq0_min, inq0_max = (out_min, out_max)
inq1_min, inq1_max = (out_min, out_max)
# fake_quant will keep the tensors in float32 until the conversion in the session
inq_data = [
tf.quantization.fake_quant_with_min_max_args(
in_data[0], min=out_min, max=out_max, name="inq_0"
)
if in_data[0] is not None
else tf.quantization.fake_quant_with_min_max_args(
data[0], min=out_min, max=out_max, name="const_tensor0"
),
tf.quantization.fake_quant_with_min_max_args(
in_data[1], min=out_min, max=out_max, name="inq_1"
)
if in_data[1] is not None
else tf.quantization.fake_quant_with_min_max_args(
data[1], min=out_min, max=out_max, name="const_tensor1"
),
]
input_range = {
x[1][0]: x[1][1]
for x in zip(
in_data, (("inq_0", (inq0_min, inq0_max)), ("inq_1", (inq1_min, inq1_max)))
)
if x[0] is not None
}
if comparison_op:
out = math_op(inq_data[0], inq_data[1])
out = with_fused_activation_function(out, fused_activation_function)
compare_tflite_with_tvm(
[x[1] for x in zip(in_data, data) if x[0] is not None],
[x + ":0" for x in input_range.keys()],
[x[1] for x in zip(in_data, inq_data) if x[0] is not None],
[out],
quantized=True,
input_range=input_range,
experimental_new_converter=same_qnn_params,
int_quant_dtype=int_quant_dtype,
)
else:
out = math_op(inq_data[0], inq_data[1])
out = with_fused_activation_function(out, fused_activation_function)
out = tf.quantization.fake_quant_with_min_max_args(
out, min=out_min, max=out_max, name="out"
)
# Note same_qnn_params uses experimental_new_converter as toco failed
compare_tflite_with_tvm(
[x[1] for x in zip(in_data, data) if x[0] is not None],
[x + ":0" for x in input_range.keys()],
[x[1] for x in zip(in_data, inq_data) if x[0] is not None],
[out],
quantized=True,
input_range=input_range,
experimental_new_converter=same_qnn_params,
int_quant_dtype=int_quant_dtype,
)
else:
out = math_op(
in_data[0]
if in_data[0] is not None
else ops.convert_to_tensor(data[0], dtype=data[0].dtype),
in_data[1]
if in_data[1] is not None
else ops.convert_to_tensor(data[1], dtype=data[1].dtype),
)
out = with_fused_activation_function(out, fused_activation_function)
compare_tflite_with_tvm(
[x[1] for x in zip(in_data, data) if x[0] is not None],
[x[1] for x in zip(in_data, ("in_0:0", "in_1:0")) if x[0] is not None],
[x for x in in_data if x is not None],
[out],
)
# Test with two tensors
with tf.Graph().as_default():
__test_elemwise(
in_data=[
array_ops.placeholder(shape=data[0].shape, dtype="float32", name="in_0"),
array_ops.placeholder(shape=data[1].shape, dtype="float32", name="in_1"),
]
)
# Test with tensor and constant
with tf.Graph().as_default():
__test_elemwise(
in_data=[array_ops.placeholder(shape=data[0].shape, dtype="float32", name="in_0"), None]
)
# Test with constant and tensor
with tf.Graph().as_default():
__test_elemwise(
in_data=[None, array_ops.placeholder(shape=data[1].shape, dtype="float32", name="in_1")]
)
#######################################################################
# Add
# ---
def _test_add(data, fused_activation_function=None, quantized=False, qnn_op=None):
"""One iteration of add"""
return _test_elemwise(math_ops.add, data, fused_activation_function, quantized, qnn_op)
#######################################################################
# Subtract
# --------
def _test_sub(data, fused_activation_function=None, quantized=False, qnn_op=None):
"""One iteration of subtract"""
return _test_elemwise(math_ops.subtract, data, fused_activation_function, quantized, qnn_op)
#######################################################################
# Mul
# ---
def _test_mul(data, fused_activation_function=None, quantized=False, qnn_op=None):
"""One iteration of mul"""
return _test_elemwise(math_ops.multiply, data, fused_activation_function, quantized, qnn_op)
#######################################################################
# Divide
# ------
def _test_div(data, fused_activation_function=None):
"""One iteration of divide"""
return _test_elemwise(math_ops.divide, data, fused_activation_function)
#######################################################################
# Power
# -----
def _test_pow(data):
"""One iteration of power"""
return _test_elemwise(math_ops.pow, data)
#######################################################################
# Maximum
# -------
def _test_maximum(data, fused_activation_function=None, quantized=False, qnn_op=None):
"""One iteration of maximum"""
return _test_elemwise(
math_ops.maximum, data, fused_activation_function, quantized, qnn_op, same_qnn_params=True
)
#######################################################################
# Minimum
# -------
def _test_minimum(data, fused_activation_function=None, quantized=False, qnn_op=None):
"""One iteration of minimum"""
return _test_elemwise(
math_ops.minimum, data, fused_activation_function, quantized, qnn_op, same_qnn_params=True
)
#######################################################################
# Greater
# -------
def _test_greater(data, fused_activation_function=None, quantized=False, qnn_op=None):
"""One iteration of greater"""
return _test_elemwise(
math_ops.greater,
data,
fused_activation_function,
quantized,
qnn_op,
same_qnn_params=True,
comparison_op=True,
)
#######################################################################
# Greater_equal
# -------------
def _test_greater_equal(data):
"""One iteration of greater_equal"""
return _test_elemwise(math_ops.greater_equal, data)
#######################################################################
# Less
# ----
def _test_less(data):
"""One iteration of less"""
return _test_elemwise(math_ops.less, data)
#######################################################################
# Less_equal
# ----------
def _test_less_equal(data):
"""One iteration of less_equal"""
return _test_elemwise(math_ops.less_equal, data)
#######################################################################
# Equal
# -----
def _test_equal(data, fused_activation_function=None, quantized=False, qnn_op=None):
"""One iteration of equal"""
return _test_elemwise(
math_ops.equal,
data,
fused_activation_function,
quantized,
qnn_op,
same_qnn_params=True,
comparison_op=True,
)
#######################################################################
# Not_equal
# ---------
def _test_not_equal(data):
"""One iteration of not_equal"""
return _test_elemwise(math_ops.not_equal, data)
#######################################################################
# Squared_difference
# ------------------
def _test_squared_difference(data, fused_activation_function=None, quantized=False, qnn_op=None):
"""One iteration of squared difference"""
return _test_elemwise(
math_ops.squared_difference,
data,
fused_activation_function,
quantized,
qnn_op,
same_qnn_params=True,
)
#######################################################################
# Floor_divide
# ------------
def _test_floor_divide(data):
"""One iteration of floor_div"""
return _test_elemwise(math_ops.floordiv, data)
#######################################################################
# Floor_mod
# ---------
def _test_floor_mod(data):
"""One iteration of floor_mod"""
return _test_elemwise(math_ops.floormod, data)
def _test_forward_elemwise(testop):
"""Elewise"""
testop(
[
np.arange(6.0, dtype=np.float32).reshape((2, 1, 1, 3)),
np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 1, 1, 3)),
]
)
testop(
[
np.arange(6.0, dtype=np.float32).reshape((2, 1, 3)),
np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 1, 3)),
]
)
testop(
[
np.arange(3.0, dtype=np.float32).reshape((1, 3)),
np.arange(1.0, 4.0, dtype=np.float32).reshape((1, 3)),
]
)
def _test_forward_elemwise_quantized(testop, dtype=np.uint8):
type_info = np.iinfo(dtype)
_min, _max = type_info.min, type_info.max
testop(
[
np.array(np.random.uniform(_min, _max, (3, 6)), dtype=dtype),
np.array(np.random.uniform(_min, _max, (3, 6)), dtype=dtype),
],
quantized=True,
qnn_op=testop,
)
def _test_elemwise_qnn_out_range(qnn_op):
# set the fake_quant output range with respect to the input tensors float32 range
qnn_out_range = {
_test_add: (-150, 150),
_test_sub: (-150, 150),
_test_mul: (-5e3, 5e3),
_test_maximum: (-112, 111),
_test_minimum: (-128, 127),
_test_equal: (-150, 150),
_test_greater: (-150, 150),
_test_squared_difference: (0, 65025),
}
return qnn_out_range[qnn_op]
def test_all_elemwise():
"""All_elewise"""
_test_forward_elemwise(_test_add)
_test_forward_elemwise_quantized(_test_add)
_test_forward_elemwise(partial(_test_add, fused_activation_function="RELU"))
# this is broken with tf upgrade 1.15.2 and hits a segfault that needs
# further investigation.
# _test_forward_elemwise(partial(_test_add, fused_activation_function="RELU6"))
_test_forward_elemwise(_test_sub)
_test_forward_elemwise_quantized(_test_sub)
_test_forward_elemwise(partial(_test_sub, fused_activation_function="RELU"))
_test_forward_elemwise(partial(_test_sub, fused_activation_function="RELU6"))
_test_forward_elemwise(_test_mul)
_test_forward_elemwise_quantized(_test_mul)
_test_forward_elemwise(partial(_test_mul, fused_activation_function="RELU"))
_test_forward_elemwise(partial(_test_mul, fused_activation_function="RELU6"))
_test_forward_elemwise(_test_div)
_test_forward_elemwise(partial(_test_div, fused_activation_function="RELU"))
_test_forward_elemwise(partial(_test_div, fused_activation_function="RELU6"))
_test_forward_elemwise(_test_pow)
_test_forward_elemwise(_test_maximum)
_test_forward_elemwise_quantized(_test_maximum)
_test_forward_elemwise(_test_minimum)
_test_forward_elemwise_quantized(_test_minimum)
_test_forward_elemwise(_test_greater)
_test_forward_elemwise_quantized(_test_greater)
_test_forward_elemwise(_test_squared_difference)
_test_forward_elemwise_quantized(_test_squared_difference, np.int8)
_test_forward_elemwise(_test_greater_equal)
_test_forward_elemwise(_test_less)
_test_forward_elemwise(_test_less_equal)
_test_forward_elemwise(_test_equal)
_test_forward_elemwise_quantized(_test_equal)
_test_forward_elemwise(_test_not_equal)
if package_version.parse(tf.VERSION) >= package_version.parse("1.14.0"):
_test_forward_elemwise(_test_floor_divide)
_test_forward_elemwise(_test_floor_mod)
#######################################################################
# AddN
# ----
def _test_forward_add_n(inputs):
tf.reset_default_graph()
with tf.Graph().as_default():
temp = []
for each in inputs:
temp.append(tf.placeholder(shape=each.shape, dtype=each.dtype))
output = tf.add_n(temp)
compare_tflite_with_tvm(
list(inputs),
[each.name for each in temp],
list(temp),
[output],
)
def test_forward_add_n():
"""Add n"""
if package_version.parse(tf.VERSION) >= package_version.parse("1.14.0"):
x = np.random.randint(1, 100, size=(3, 3, 3), dtype=np.int32)
y = np.random.randint(1, 100, size=(3, 3, 3), dtype=np.int32)
z_1 = np.random.randint(1, 100, size=(3, 3, 3), dtype=np.int32)
x_1, x_2, z_2 = x.astype(np.float32), y.astype(np.float32), z_1.astype(np.float32)
in0 = x
in1 = [x, y]
in2 = (x, y, z_1)
in3 = x_1
in4 = [x_1, x_2]
in5 = (x_1, x_2, z_2)
_test_forward_add_n(in0)
_test_forward_add_n(in1)
_test_forward_add_n(in2)
_test_forward_add_n(in3)
_test_forward_add_n(in4)
_test_forward_add_n(in5)
#######################################################################
# Logical operators
# -----------------
def _test_logical_binary(logical_bin_op, data):
with tf.Graph().as_default():
in_data = [
array_ops.placeholder(shape=data[0].shape, dtype="bool", name="in_0"),
array_ops.placeholder(shape=data[1].shape, dtype="bool", name="in_1"),
]
if logical_bin_op is math_ops.logical_not:
out = math_ops.logical_or(in_data[0], in_data[1], name="out1")
out = logical_bin_op(out, name="out")
else:
out = logical_bin_op(in_data[0], in_data[1], name="out")
compare_tflite_with_tvm(data, ["in_0:0", "in_1:0"], in_data, [out])
def _test_forward_logical_and(data):
"""One iteration of logical and"""
return _test_logical_binary(math_ops.logical_and, data)
def _test_forward_logical_or(data):
"""One iteration of logical or"""
return _test_logical_binary(math_ops.logical_or, data)
def _test_forward_logical_not(data):
"""One iteration of logical not"""
return _test_logical_binary(math_ops.logical_not, data)
def test_all_logical():
data = [
np.random.choice(a=[False, True], size=(2, 3, 4)).astype("bool"),
np.random.choice(a=[False, True], size=(2, 3, 4)).astype("bool"),
]
# boolean dtype is not supported by older versions than TFLite 1.15.0
if package_version.parse(tf.VERSION) >= package_version.parse("1.15.0"):
_test_forward_logical_and(data)
_test_forward_logical_or(data)
_test_forward_logical_not(data)
#######################################################################
# Zeros like
# ----------
def _test_zeros_like(data):
"""One iteration of ZEROS LIKE"""
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
out = gen_array_ops.zeros_like(in_data)
compare_tflite_with_tvm(data, "Placeholder:0", [in_data], [out])
def test_forward_zeros_like():
"""ZEROS LIKE"""
_test_zeros_like(np.arange(6.0, dtype=np.float32).reshape((1, 6)))
#######################################################################
# Fill
# ----
def _test_fill(dims, value_data, value_dtype):
"""Use the fill op to create a tensor of value_data with constant dims."""
value_data = np.array(value_data, dtype=value_dtype)
# TF 1.13 TFLite convert method does not accept empty shapes
if package_version.parse(tf.VERSION) >= package_version.parse("1.14.0"):
with tf.Graph().as_default():
value = array_ops.placeholder(dtype=value_dtype, name="value", shape=[])
out = tf.fill(dims, value)
compare_tflite_with_tvm([value_data], ["value"], [value], [out])
with tf.Graph().as_default():
input1 = array_ops.placeholder(dtype=value_dtype, name="input1", shape=dims)
# Fill op gets converted to static tensor during conversion
out = tf.fill(dims, value_data)
out1 = tf.add(out, input1)
input1_data = np.random.uniform(0, 5, size=dims).astype(value_dtype)
compare_tflite_with_tvm([input1_data], ["input1"], [input1], [out1])
def test_forward_fill():
"""Test FILL op"""
_test_fill((1, 2, 2, 4), 5, "int32")
_test_fill((1, 2, 2, 4), 5, "float32")
_test_fill((5,), 5, "int32")
#######################################################################
# Reduce
# ------
def _test_reduce(math_op, data, keep_dims=None):
"""One iteration of reduce"""
assert len(data) == 2
# Test with tensor and constant
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data[0].shape, dtype=data[0].dtype, name="in")
out = math_op(in_data, data[1], keep_dims)
compare_tflite_with_tvm([data[0]], ["in:0"], [in_data], [out])
def _test_reduce_quantize(math_op, data, keep_dims=None):
"""One iteration of reduce"""
assert len(data) == 2
# Test with tensor and constant
with tf.Graph().as_default():
in_data = [array_ops.placeholder(shape=data[0].shape, dtype="float32", name="in")]
inq_data = [
tf.quantization.fake_quant_with_min_max_args(
in_data[0], min=-100, max=100, name="inq_0"
)
]
input_range = {"inq_0": (-100, 100)}
out = math_op(inq_data, data[1], keep_dims)
out = tf.quantization.fake_quant_with_min_max_args(out, min=-200, max=200, name="out")
compare_tflite_with_tvm(
[data[0]], ["inq_0:0"], [inq_data[0]], [out], quantized=True, input_range=input_range
)
#######################################################################
# Reduce_min
# ----------
def _test_reduce_min(data, keep_dims=None):
"""One iteration of reduce_min"""
return _test_reduce(math_ops.reduce_min, data, keep_dims)
#######################################################################
# Reduce_max
# ----------
def _test_reduce_max(data, keep_dims=None):
"""One iteration of reduce_max"""
return _test_reduce(math_ops.reduce_max, data, keep_dims)
#######################################################################
# Reduce_mean
# -----------
def _test_reduce_mean(data, keep_dims=None, quantized=False):
"""One iteration of reduce_mean"""
if quantized:
return _test_reduce_quantize(math_ops.reduce_mean, data, keep_dims)
else:
return _test_reduce(math_ops.reduce_mean, data, keep_dims)
#######################################################################
# Reduce_prod
# -----------
def _test_reduce_prod(data, keep_dims=None):
"""One iteration of reduce_prod"""
return _test_reduce(math_ops.reduce_prod, data, keep_dims)
#######################################################################
# Reduce_sum
# -----------
def _test_reduce_sum(data, keep_dims=None):
"""One iteration of reduce_sum"""
return _test_reduce(math_ops.reduce_sum, data, keep_dims)
#######################################################################
# Reduce_any
# ----------
def _test_reduce_any(data, keep_dims=None):
"""One iteration of reduce_any"""
return _test_reduce(math_ops.reduce_any, data, keep_dims)
def _test_forward_reduce(testop, dtype="float32"):
"""Reduce"""
if dtype == "bool":
data0 = [np.random.choice(a=[False, True], size=(16, 16, 16, 16)).astype(dtype), None]
data1 = [
np.random.choice(a=[False, True], size=(16, 16, 16, 16)).astype(dtype),
np.array(1, dtype=np.int32),
]
data2 = [
np.random.choice(a=[False, True], size=(16, 16, 16, 16)).astype(dtype),
np.array([1, 2], dtype=np.int32),
]
else:
data0 = [np.random.rand(16, 16, 16, 16).astype(dtype), None]
data1 = [np.random.rand(16, 16, 16, 16).astype(dtype), np.array(1, dtype=np.int32)]
data2 = [np.random.rand(16, 16, 16, 16).astype(dtype), np.array([1, 2], dtype=np.int32)]
for data in [data0, data1, data2]:
testop(data)
testop(data, keep_dims=False)
testop(data, keep_dims=True)
def _test_forward_reduce_quantized(testop):
data0 = [
np.array(np.random.uniform(0, 255, (3, 6)), dtype=np.uint8),
np.array([1, 2], dtype=np.int32),
]
testop(data0, quantized=True)
testop(data0, keep_dims=False, quantized=True)
testop(data0, keep_dims=True, quantized=True)
def test_all_reduce():
_test_forward_reduce(_test_reduce_min)
_test_forward_reduce(_test_reduce_max)
_test_forward_reduce(_test_reduce_mean)
_test_forward_reduce_quantized(_test_reduce_mean)
_test_forward_reduce(_test_reduce_prod)
_test_forward_reduce(_test_reduce_sum)
if package_version.parse(tf.VERSION) >= package_version.parse("1.15.0"):
_test_forward_reduce(_test_reduce_any, dtype="bool")
#######################################################################
# Arg_min_max
# -----------
def _test_arg_min_max(math_op, data, axis, quantized=False):
"""One iteration of arg_min_max"""
with tf.Graph().as_default():
t_name = "in"
in_data = array_ops.placeholder(shape=data.shape, dtype=np.float32, name=t_name)
input_range = None
qmin, qmax = -100, 102
if quantized:
inq_data = tf.quantization.fake_quant_with_min_max_args(
in_data, min=qmin, max=qmax, name="q" + t_name
)
input_range = {inq_data.name.split(":")[0]: (qmin, qmax)}
out = math_op(input=inq_data, axis=axis)
compare_tflite_with_tvm(
[data], [inq_data.name], [inq_data], [out], quantized=True, input_range=input_range
)
else:
out = math_op(input=in_data, axis=axis)
compare_tflite_with_tvm([data], [in_data.name], [in_data], [out])
def test_forward_arg_min_max():
"""Arg min max"""
# test quantized
for data in [np.array(np.random.uniform(-100, 100, (3, 4)), dtype=np.uint8)]:
# There is no quantized version of ArgMin
for axis in [None, 0, 1, -1]:
_test_arg_min_max(math_ops.argmax, data, axis, True)
for data in [np.array(np.random.uniform(-100, 100, (3, 4)), dtype=np.float32)]:
for axis in [None, 0, 1, -1]:
_test_arg_min_max(math_ops.argmax, data, axis)
_test_arg_min_max(math_ops.argmin, data, axis)
#######################################################################
# Select, Where
# -------------
def test_forward_select():
"""Select"""
with tf.Graph().as_default():
with tf.Session() as _:
input1 = tf.placeholder(tf.int32, shape=[1, 4, 4, 3], name="input1")
input2 = tf.placeholder(tf.int32, shape=[1, 4, 4, 3], name="input2")
mask = input1 > input2
out = tf.where(mask, input1 + 1, input2 * 2)
in_data1 = np.random.uniform(0, 10, size=(1, 4, 4, 3)).astype("int32")
in_data2 = np.random.uniform(0, 10, size=(1, 4, 4, 3)).astype("int32")
compare_tflite_with_tvm(
[in_data1, in_data2], ["input1:0", "input2:0"], [input1, input2], [out]
)
@pytest.mark.parametrize("quant_bits", [2, 4, 8, 16])
@pytest.mark.parametrize(
"value, min_value, max_value",
[[-10.11, -6, 6], [-3.55, -6, 6], [0, -6, 6], [3.55, -6, 6], [10.11, -6, 6]],
)
def test_forward_fake_quant(value, min_value, max_value, quant_bits):
"""Fake quant"""
with tf.Graph().as_default():
with tf.Session() as _:
input_placeholder = tf.placeholder(tf.float32, shape=[1], name="input")
out = tf.quantization.fake_quant_with_min_max_args(
input_placeholder, min=min_value, max=max_value, num_bits=quant_bits, name=None
)
in_data = np.float32(value)
compare_tflite_with_tvm([in_data], ["input:0"], [input_placeholder], [out])
# Squeeze
# -------
def _test_squeeze(data, squeeze_dims=None):
"""One iteration of squeeze"""
if squeeze_dims is None:
squeeze_dims = []
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
if squeeze_dims:
out = array_ops.squeeze(in_data, squeeze_dims)
else:
out = array_ops.squeeze(in_data)
compare_tflite_with_tvm(data, "Placeholder:0", [in_data], [out])
def test_forward_squeeze():
"""Squeeze"""
_test_squeeze(np.arange(6).reshape((1, 2, 1, 3)), [0, 2])
_test_squeeze(np.arange(6).reshape((2, 1, 3, 1)), [1, 3])
#######################################################################
# Quantize/DeQuantize
# -------------------
def _test_quantize_dequantize(data):
"""One iteration of quantize and dequantize"""
# Keras model to force TFLite converter to insert 2 TFLite quantize ops.
# First TFLite quantize op converts float32 tensor to int8 tensor - Qnn quantize.
# Second TFLite quantize op converts int8 tensor to int8 tensor - Qnn requantize.
data_in = tf.keras.layers.Input(shape=data.shape[1:])
relu = tf.keras.layers.ReLU()(data_in)
add = tf.keras.layers.Add()([data_in, relu])
concat = tf.keras.layers.Concatenate(axis=0)([relu, add])
keras_model = tf.keras.models.Model(inputs=data_in, outputs=concat)
# To create quantized values with dynamic range of activations, needs representative dataset
def representative_data_gen():
for _ in range(1):
yield [data]
tflite_model_quant = _quantize_keras_model(keras_model, representative_data_gen, True, True)
tflite_output = run_tflite_graph(tflite_model_quant, data)
if tf.__version__ < LooseVersion("2.9"):
in_node = data_in.name.split(":")[0]
else:
in_node = "serving_default_" + data_in.name + ":0"
tvm_output = run_tvm_graph(tflite_model_quant, data, in_node)
tvm.testing.assert_allclose(
np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]), rtol=1e-5, atol=1e-2
)
def _test_quantize_dequantize_const(data):
"""One iteration of quantize and dequantize"""
# Keras model to force TFLite converter to insert 2 TFLite quantize ops.
# First TFLite quantize op converts float32 tensor to int8 tensor - Qnn quantize.
# Second TFLite quantize op converts int8 tensor to int8 tensor - Qnn requantize.
data_in = tf.keras.layers.Input(shape=data.shape[1:])
relu = tf.keras.layers.ReLU()(data_in)
add = tf.keras.layers.Add()([data, relu])
concat = tf.keras.layers.Concatenate(axis=0)([relu, add])
keras_model = tf.keras.models.Model(inputs=data_in, outputs=concat)
# To create quantized values with dynamic range of activations, needs representative dataset
def representative_data_gen():
for _ in range(1):
yield [data]
tflite_model_quant = _quantize_keras_model(keras_model, representative_data_gen, True, True)
tflite_output = run_tflite_graph(tflite_model_quant, data)
if tf.__version__ < LooseVersion("2.9"):
in_node = data_in.name.split(":")[0]
else:
in_node = "serving_default_" + data_in.name + ":0"
tvm_output = run_tvm_graph(tflite_model_quant, data, in_node)
tvm.testing.assert_allclose(
np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]), rtol=1e-5, atol=1e-2
)
def test_forward_quantize_dequantize():
"""Quantize Dequantize"""
data = np.random.uniform(0, 1, (1, 4, 4, 3)).astype("float32")
if package_version.parse(tf.VERSION) >= package_version.parse("2.1.0"):
_test_quantize_dequantize(data)
_test_quantize_dequantize_const(data)
#######################################################################
# Pad
# ---
def _test_pad(data, mode="CONSTANT", quantized=False):
"""One iteration of PAD"""
assert len(data) == 2
# Test with tensor and constant
with tf.Graph().as_default():
in_data = [array_ops.placeholder(shape=data[0].shape, dtype="float32", name="in")]
if quantized:
# fake_quant will keep the tensors in float32 until the conversion in the session
input_range = {"inq_0": (-100, 100)}
inq_data = [
tf.quantization.fake_quant_with_min_max_args(
in_data[0], min=-100, max=100, name="inq_0"
)
]
out = array_ops.pad(
inq_data[0], ops.convert_to_tensor(data[1], dtype=data[1].dtype), mode=mode
)
compare_tflite_with_tvm(
[data[0]], ["inq_0:0"], inq_data, [out], quantized=True, input_range=input_range
)
else:
out = array_ops.pad(
in_data[0], ops.convert_to_tensor(data[1], dtype=data[1].dtype), mode=mode
)
compare_tflite_with_tvm([data[0]], ["in:0"], in_data, [out])
def test_forward_pad():
"""Pad"""
_test_pad(
[
np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 1, 1, 3)),
np.array([[1, 1], [2, 2], [1, 1], [2, 2]], dtype=np.int32),
]
)
_test_pad(
[
np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 1, 3)),
np.array([[2, 2], [1, 1], [1, 1]], dtype=np.int32),
]
)
_test_pad(
[
np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 3)),
np.array([[1, 1], [2, 2]], dtype=np.int32),
]
)
_test_pad(
[
np.arange(1.0, 4.0, dtype=np.float32).reshape((1, 3)),
np.array([[1, 1], [2, 2]], dtype=np.int32),
]
)
_test_pad(
[
np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 3)),
np.array([[1, 1], [2, 2]], dtype=np.int32),
],
mode="REFLECT",
)
_test_pad(
[
np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 3)),
np.array([[1, 1], [2, 2]], dtype=np.int32),
],
mode="SYMMETRIC",
)
_test_pad(
[
np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 3)),
np.array([[1, 1], [2, 2]], dtype=np.int64),
],
mode="REFLECT",
)
_test_pad(
[
np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 3)),
np.array([[1, 1], [2, 2]], dtype=np.int64),
],
mode="SYMMETRIC",
)
_test_pad(
[
np.arange(0, 256, dtype=np.uint8).reshape((1, 256)),
np.array([[1, 1], [2, 2]], dtype=np.int32),
],
quantized=True,
)
#######################################################################
# PADV2
# -----
def _test_padv2(data, mode="CONSTANT", quantized=False):
"""One iteration of PADV2"""
assert len(data) == 2 or len(data) == 3
with_constant_values = len(data) == 3
# Test with tensor and constant
with tf.Graph().as_default():
in_data = [array_ops.placeholder(shape=data[0].shape, dtype="float32", name="in")]
if quantized:
# fake_quant will keep the tensors in float32 until the conversion in the session
input_range = {"inq_0": (-100, 100)}
inq_data = [
tf.quantization.fake_quant_with_min_max_args(
in_data[0], min=-100, max=100, name="inq_0"
)
]
if with_constant_values:
in_constant_values = constant_op.constant(
data[2], shape=data[2].shape, dtype="float32", name="in_constant_values"
)
inq_constant_values = tf.quantization.fake_quant_with_min_max_args(
in_constant_values, min=-100, max=100, name="inq_constant_values"
)
out = array_ops.pad_v2(
inq_data[0],
ops.convert_to_tensor(data[1], dtype=data[1].dtype),
constant_values=inq_constant_values,
mode=mode,
)
out = tf.quantization.fake_quant_with_min_max_args(
out, min=-100, max=100, name="out"
)
else:
out = array_ops.pad_v2(
inq_data[0], ops.convert_to_tensor(data[1], dtype=data[1].dtype), mode=mode
)
compare_tflite_with_tvm(
[data[0]], ["inq_0:0"], inq_data, [out], quantized=True, input_range=input_range
)
else:
if with_constant_values:
out = array_ops.pad_v2(
in_data[0],
ops.convert_to_tensor(data[1], dtype=data[1].dtype),
constant_values=ops.convert_to_tensor(data[2], dtype=data[2].dtype),
mode=mode,
)
else:
out = array_ops.pad_v2(
in_data[0], ops.convert_to_tensor(data[1], dtype=data[1].dtype), mode=mode
)
compare_tflite_with_tvm([data[0]], ["in:0"], in_data, [out])
def test_forward_padv2():
"""PADV2"""
# Tests without Constant_values
_test_padv2(
[
np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 1, 1, 3)),
np.array([[1, 1], [2, 2], [1, 1], [2, 2]], dtype=np.int32),
]
)
_test_padv2(
[
np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 1, 3)),
np.array([[2, 2], [1, 1], [1, 1]], dtype=np.int32),
]
)
_test_padv2(
[
np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 3)),
np.array([[1, 1], [2, 2]], dtype=np.int32),
]
)
_test_padv2(
[
np.arange(1.0, 4.0, dtype=np.float32).reshape((1, 3)),
np.array([[1, 1], [2, 2]], dtype=np.int32),
]
)
_test_padv2(
[
np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 3)),
np.array([[1, 1], [2, 2]], dtype=np.int32),
],
mode="REFLECT",
)
_test_padv2(
[
np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 3)),
np.array([[1, 1], [2, 2]], dtype=np.int32),
],
mode="SYMMETRIC",
)
_test_padv2(
[
np.arange(0, 256, dtype=np.uint8).reshape((1, 256)),
np.array([[1, 1], [2, 2]], dtype=np.int32),
],
quantized=True,
)
# Tests with Constant_values
_test_padv2(
[
np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 1, 1, 3)),
np.array([[1, 1], [2, 2], [1, 1], [2, 2]], dtype=np.int32),
np.array([2], dtype=np.float32),
]
)
_test_padv2(
[
np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 1, 3)),
np.array([[2, 2], [1, 1], [1, 1]], dtype=np.int32),
np.array([1], dtype=np.float32),
]
)
_test_padv2(
[
np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 3)),
np.array([[1, 1], [2, 2]], dtype=np.int32),
np.array([-1], dtype=np.float32),
]
)
_test_padv2(
[
np.arange(1.0, 4.0, dtype=np.float32).reshape((1, 3)),
np.array([[1, 1], [2, 2]], dtype=np.int32),
np.array([2], dtype=np.float32),
]
)
# NOTE: In versions > 2.1.0, there is a bug in Tensorflow package for this scenario.
# Hence, it is disabled temporarily for TF version > 2.1.0 .
if package_version.parse(tf.VERSION) <= package_version.parse("2.1.0"):
_test_padv2(
[
np.arange(0, 256, dtype=np.uint8).reshape((1, 256)),
np.array([[1, 1], [2, 2]], dtype=np.int32),
np.array([2], dtype=np.float32),
],
quantized=True,
)
# Constant Values input can be scalar
_test_padv2(
[
np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 1, 1, 3)),
np.array([[1, 1], [2, 2], [1, 1], [2, 2]], dtype=np.int32),
np.float32(2),
]
)
# NOTE: In versions > 2.1.0, there is a bug in Tensorflow package for this scenario.
# Hence, it is disabled temporarily for TF versions > 2.1.0.
if package_version.parse(tf.VERSION) <= package_version.parse("2.1.0"):
_test_padv2(
[
np.arange(0, 256, dtype=np.uint8).reshape((1, 256)),
np.array([[1, 1], [2, 2]], dtype=np.int32),
np.uint8(10),
],
quantized=True,
)
#######################################################################
# EXPAND_DIMS
# -----------
def _test_expand_dims(input_shape, input_type, axis, quantized=False):
"""One iteration of EXPAND_DIMS"""
with tf.Graph().as_default():
axis = ops.convert_to_tensor(axis, dtype=axis.dtype)
if quantized:
# ignoring input_type as quantized requires uint8
input_array = np.random.uniform(0, 256, input_shape).astype("uint8")
in_input = tf.placeholder(dtype="float32", shape=input_array.shape, name="input")
input_range = {"q_input": (-100, 100)}
inq_input = tf.quantization.fake_quant_with_min_max_args(
in_input, min=-100, max=100, name="q_input"
)
out = array_ops.expand_dims(inq_input, axis=axis)
out = tf.quantization.fake_quant_with_min_max_args(out, min=-100, max=100, name="out")
compare_tflite_with_tvm(
[input_array],
["q_input"],
[inq_input],
[out],
quantized=True,
input_range=input_range,
)
else:
input_array = np.random.uniform(-100, 100, input_shape).astype(input_type)
in_input = tf.placeholder(
dtype=input_array.dtype, shape=input_array.shape, name="input"
)
out = array_ops.expand_dims(in_input, axis=axis)
compare_tflite_with_tvm([input_array], ["input"], [in_input], [out])
def test_forward_expand_dims():
"""EXPAND_DIMS"""
for quantized in [False, True]:
_test_expand_dims((6, 2, 7, 5), "float32", np.int32(0), quantized=quantized)
_test_expand_dims((1, 2, 3), "int32", np.int32(-2), quantized=quantized)
_test_expand_dims((2, 4, 5), "float32", np.array([1], dtype=np.int32), quantized=quantized)
#######################################################################
# ONE_HOT
# -------
def _test_one_hot(indices, depth, on_value, off_value, axis=None):
"""One iteration of One_Hot"""
with tf.Graph().as_default():
in_indices = tf.placeholder(dtype=indices.dtype, shape=indices.shape, name="indices")
in_depth = ops.convert_to_tensor(depth, dtype=depth.dtype)
in_on_value = tf.placeholder(dtype=on_value.dtype, shape=on_value.shape, name="on_value")
in_off_value = tf.placeholder(
dtype=off_value.dtype, shape=off_value.shape, name="off_value"
)
if axis is not None:
out = array_ops.one_hot(in_indices, in_depth, in_on_value, in_off_value, axis=axis)
else:
out = array_ops.one_hot(in_indices, in_depth, in_on_value, in_off_value)
compare_tflite_with_tvm(
[indices, on_value, off_value],
["indices", "on_value", "off_value"],
[in_indices, in_on_value, in_off_value],
[out],
)
def test_forward_one_hot():
"""One_Hot"""
_test_one_hot(np.int32(2), np.int32(8), np.int32(1), np.int32(0))
_test_one_hot(np.int32(4), np.int32(8), np.float32(1), np.float32(0))
_test_one_hot(np.array([1, 2, 3], dtype=np.int32), np.int32(8), np.int32(3), np.int32(-1))
_test_one_hot(
np.array([1, 2, 3], dtype=np.int32), np.int32(8), np.int32(3), np.int32(-1), axis=0
)
#######################################################################
# Pack
# ----
def _test_pack(data, is_var, axis, quantized=False):
"""One iteration of pack"""
assert len(data) >= 1
assert len(data) == len(is_var)
if quantized:
with tf.Graph().as_default():
in_data = [
array_ops.placeholder(shape=d.shape, dtype="float32", name="in_" + str(idx))
if is_var[idx]
else constant_op.constant(
d, shape=d.shape, dtype="float32", name="in_constant_" + str(idx)
)
for idx, d in enumerate(data)
]
inq_data = [
tf.quantization.fake_quant_with_min_max_args(
i_data, min=-100, max=100, name=f"inq_{idx}"
)
for idx, i_data in enumerate(in_data)
]
input_range = {}
for i in range(len(data)):
input_range[f"inq_{i}"] = (-100, 100)
out = array_ops.pack(inq_data, axis=axis)
out = tf.quantization.fake_quant_with_min_max_args(out, min=-100, max=100, name="out")
name = [f"inq_{idx}:0" for idx in range(len(data))]
compare_tflite_with_tvm(
data, name, inq_data, [out], quantized=True, input_range=input_range
)
else:
with tf.Graph().as_default():
in_data = [
array_ops.placeholder(shape=d.shape, dtype=d.dtype, name="in_" + str(idx))
if is_var[idx]
else constant_op.constant(
d, shape=d.shape, dtype=d.dtype, name="in_constant_" + str(idx)
)
for idx, d in enumerate(data)
]
out = array_ops.pack(in_data, axis=axis)
name = [_.name for _ in in_data]
compare_tflite_with_tvm(data, name, in_data, [out], experimental_new_converter=True)
def test_forward_pack():
"""Pack"""
_test_pack([np.int32(1), np.int32(5)], [False, False], 0)
_test_pack([np.array([1, 4]), np.array([2, 5]), np.array([3, 6])], [True, False, False], 0)
_test_pack(
[np.arange(6).reshape((1, 2, 1, 3)), np.arange(6).reshape((1, 2, 1, 3))], [True, True], 1
)
_test_pack([np.arange(6).reshape((3, 2)), np.arange(6).reshape((3, 2))], [True, True], 1)
_test_pack(
[
np.arange(6).reshape((2, 1, 1, 3)),
np.arange(6).reshape((2, 1, 1, 3)),
np.arange(6).reshape((2, 1, 1, 3)),
],
[True, True, True],
1,
)
_test_pack(
[
np.arange(6, dtype=np.uint8).reshape((2, 1, 1, 3)),
np.arange(6, dtype=np.uint8).reshape((2, 1, 1, 3)),
np.arange(6, dtype=np.uint8).reshape((2, 1, 1, 3)),
],
[True, True, True],
1,
quantized=True,
)
#######################################################################
# Unpack
# ------
def _test_unpack(data, axis, num_unpacks):
"""One iteration of UNPACK"""
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
out = gen_array_ops.unpack(in_data, num=num_unpacks, axis=axis, name="unpack")
out_names = ["out_" + str(n) + ":0" for n in range(num_unpacks)]
compare_tflite_with_tvm([data], "Placeholder:0", [in_data], out, out_names=out_names)
def test_forward_unpack():
"""UNPACK"""
_test_unpack(np.array(np.random.uniform(0, 5, (3, 1)), dtype=np.int32), axis=1, num_unpacks=1)
_test_unpack(np.array(np.random.uniform(0, 5, (3, 4)), dtype=np.float32), axis=0, num_unpacks=3)
_test_unpack(
np.array(np.random.uniform(0, 5, (3, 1, 2)), dtype=np.float32), axis=0, num_unpacks=3
)
# tflite 1.13 doesn't accept negative axis
if package_version.parse(tf.VERSION) >= package_version.parse("1.14.0"):
_test_unpack(
np.array(np.random.uniform(0, 5, (3, 6)), dtype=np.int32), axis=-2, num_unpacks=3
)
_test_unpack(
np.array(np.random.uniform(0, 5, (2, 3, 4)), dtype=np.int32), axis=-3, num_unpacks=2
)
#######################################################################
# Local response normalization
# ----------------------------
def _test_local_response_normalization(data, depth_radius, bias, alpha, beta):
"""One iteration of LOCAL_RESPONSE_NORMALIZATION"""
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype="float32", name="in_0")
out = nn_ops.local_response_normalization(
in_data, depth_radius=depth_radius, bias=bias, alpha=alpha, beta=beta
)
compare_tflite_with_tvm(data, "in_0:0", [in_data], [out])
def test_forward_local_response_normalization():
"""LOCAL_RESPONSE_NORMALIZATION"""
data = np.random.uniform(size=(1, 6, 4, 3)).astype("float32")
# LOCAL_RESPONSE_NORMALIZATION come with TFLite >= 1.14.0 fbs schema
if package_version.parse(tf.VERSION) >= package_version.parse("1.14.0"):
_test_local_response_normalization(data, depth_radius=5, bias=1, alpha=1, beta=0.5)
#######################################################################
# L2 normalization
# ----------------
def _test_l2_normalization(data, axis, fused_activation_function=None):
"""One iteration of L2_NORMALIZATION"""
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
out = nn_impl.l2_normalize(in_data, axis)
out = with_fused_activation_function(out, fused_activation_function)
compare_tflite_with_tvm(data, "Placeholder:0", [in_data], [out])
def test_forward_l2_normalization():
"""L2_NORMALIZATION"""
data = np.random.uniform(size=(3, 6, 4)).astype("float32")
_test_l2_normalization(data, axis=2)
_test_l2_normalization(data, axis=2, fused_activation_function="RELU")
#######################################################################
# Logistic
# --------
def _test_logistic(data, quantized=False):
"""One iteration of LOGISTIC"""
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype="float32", name="in_0")
if quantized:
inq_data = tf.quantization.fake_quant_with_min_max_args(
in_data, min=-5, max=5, name="inq_0"
)
input_range = {"inq_0": (-5, 5)}
out = math_ops.sigmoid(inq_data)
out = tf.quantization.fake_quant_with_min_max_args(out, min=0, max=1, name="out")
compare_tflite_with_tvm(
data, "inq_0:0", [inq_data], [out], quantized=True, input_range=input_range
)
else:
out = math_ops.sigmoid(in_data)
compare_tflite_with_tvm(data, "in_0:0", [in_data], [out])
def test_forward_logistic():
"""LOGISTIC"""
_test_logistic(np.arange(6.0, dtype=np.float32).reshape((1, 6)))
_test_logistic(np.random.uniform(0, 255, (3, 6)).astype(np.uint8), quantized=True)
#######################################################################
# Softmax
# -------
def _test_softmax(data):
"""One iteration of softmax"""
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
out = nn_ops.softmax(in_data)
compare_tflite_with_tvm(data, "Placeholder:0", [in_data], [out])
def test_forward_softmax():
"""Softmax"""
_test_softmax(np.arange(6.0, dtype=np.float32).reshape((1, 6)))
_test_softmax(np.arange(6.0, dtype=np.float32).reshape((1, 2, 3)))
######################################################################
# Log_softmax
# -----------
def _test_log_softmax(data, quantized=False):
"""One iteration of log_softmax"""
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype="float32", name="in_0")
if quantized:
inq_data = tf.quantization.fake_quant_with_min_max_args(
in_data, min=-10, max=10, name="inq_0"
)
input_range = {"inq_0": (-10, 10)}
# tflite log_softmax supports only the case when axis is not specified
out = nn_ops.log_softmax(inq_data)
out = tf.quantization.fake_quant_with_min_max_args(out, min=-20, max=0, name="out")
compare_tflite_with_tvm(
data, "inq_0:0", [inq_data], [out], quantized=True, input_range=input_range
)
else:
out = nn_ops.log_softmax(in_data)
compare_tflite_with_tvm(data, "in_0:0", [in_data], [out])
def test_forward_log_softmax():
"""Log_softmax"""
_test_log_softmax(np.random.uniform(-10, 10, size=(3, 6)).astype(np.float32))
_test_log_softmax(np.random.uniform(0, 255, (3, 6)).astype(np.uint8), quantized=True)
#######################################################################
# Tanh
# ----
def _test_tanh(data, quantized=False):
"""One iteration of TANH"""
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype="float32", name="in_0")
if quantized:
inq_data = tf.quantization.fake_quant_with_min_max_args(
in_data, min=-3, max=3, name="inq_0"
)
input_range = {"inq_0": (-3, 3)}
out = math_ops.tanh(inq_data)
out = tf.quantization.fake_quant_with_min_max_args(out, min=-1, max=1, name="out")
compare_tflite_with_tvm(
data, "inq_0:0", [inq_data], [out], quantized=True, input_range=input_range
)
else:
out = math_ops.tanh(in_data)
compare_tflite_with_tvm(data, "in_0:0", [in_data], [out])
def test_forward_tanh():
"""TANH"""
_test_tanh(np.arange(6.0, dtype=np.float32).reshape((1, 6)), quantized=False)
_test_tanh(np.arange(0, 256, 30, dtype=np.uint8), quantized=True)
#######################################################################
# ReLu
# ----
def _test_relu(data, quantized=False):
"""One iteration of ReLU"""
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype="float32", name="in_0")
if quantized:
inq_data = tf.quantization.fake_quant_with_min_max_args(
in_data, min=-10, max=10, name="inq_0"
)
input_range = {"inq_0": (-10, 10)}
out = nn_ops.relu(inq_data)
out = tf.quantization.fake_quant_with_min_max_args(out, min=0, max=6, name="out")
compare_tflite_with_tvm(
data, "inq_0:0", [inq_data], [out], quantized=True, input_range=input_range
)
else:
out = nn_ops.relu(in_data)
compare_tflite_with_tvm(data, "in_0:0", [in_data], [out])
def test_forward_relu():
"""ReLU"""
_test_relu(np.arange(6.0, dtype=np.float32).reshape((1, 6)))
_test_relu(np.random.uniform(0, 255, (3, 6)).astype(np.uint8), quantized=True)
#######################################################################
# ReLU6
# -----
def _test_relu6(data, quantized=False):
"""One iteration of ReLU6"""
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype="float32", name="in_0")
if quantized:
inq_data = tf.quantization.fake_quant_with_min_max_args(
in_data, min=-10, max=10, name="inq_0"
)
input_range = {"inq_0": (-10, 10)}
out = nn_ops.relu6(inq_data)
out = tf.quantization.fake_quant_with_min_max_args(out, min=0, max=6, name="out")
compare_tflite_with_tvm(
data, "inq_0:0", [inq_data], [out], quantized=True, input_range=input_range
)
else:
out = nn_ops.relu6(in_data)
compare_tflite_with_tvm(data, "in_0:0", [in_data], [out])
def test_forward_relu6():
"""ReLU6"""
_test_relu6(np.random.uniform(-10, 10, size=(3, 6)).astype(np.float32))
_test_relu6(np.random.uniform(0, 255, (3, 6)).astype(np.uint8), quantized=True)
#######################################################################
# Leaky_ReLU
# ----------
def _test_leaky_relu(data, alpha, quantized=False):
"""One iteration of Leaky_ReLU"""
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype="float32", name="in_0")
if quantized:
inq_data = tf.quantization.fake_quant_with_min_max_args(
in_data, min=-3, max=2, name="inq_0"
)
input_range = {"inq_0": (-3, 2)}
out = nn_ops.leaky_relu(inq_data, alpha)
out = tf.quantization.fake_quant_with_min_max_args(out, min=-3, max=2, name="out")
compare_tflite_with_tvm(
data, "inq_0:0", [inq_data], [out], quantized=True, input_range=input_range
)
else:
out = nn_ops.leaky_relu(in_data, alpha)
compare_tflite_with_tvm(data, "in_0:0", [in_data], [out])
def test_forward_leaky_relu():
"""Leaky_ReLU"""
_test_leaky_relu(np.random.uniform(-5, 5, (1, 6)).astype(np.float32), alpha=0.2)
if package_version.parse(tf.VERSION) >= package_version.parse("1.14.0"):
_test_leaky_relu(
np.random.uniform(0, 255, (2, 3)).astype(np.uint8), alpha=0.3, quantized=True
)
#######################################################################
# ReLU_n1_to_1
# ------------
def _test_relu_n1_to_1(data, quantized=False):
"""One iteration of ReLU_n1_to_1"""
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype="float32", name="in_0")
if quantized:
inq_data = tf.quantization.fake_quant_with_min_max_args(
in_data, min=-3, max=3, name="inq_0"
)
input_range = {"inq_0": (-3, 3)}
# There is no such tf operation.
# The specific pattern will be replaced into RELU_N1_TO_1 by tflite
out = math_ops.maximum(-1.0, math_ops.minimum(inq_data, 1.0))
out = tf.quantization.fake_quant_with_min_max_args(out, min=-1, max=1, name="out")
compare_tflite_with_tvm(
data, "inq_0:0", [inq_data], [out], quantized=True, input_range=input_range
)
else:
out = math_ops.maximum(-1.0, math_ops.minimum(in_data, 1.0))
compare_tflite_with_tvm(data, "in_0:0", [in_data], [out])
def test_forward_relu_n1_to_1():
"""ReLU_n1_to_1"""
_test_relu_n1_to_1(np.random.uniform(-3, 3, (1, 6)).astype(np.float32))
if package_version.parse(tf.VERSION) >= package_version.parse("1.14.0"):
_test_relu_n1_to_1(np.random.uniform(0, 255, (3, 6)).astype(np.uint8), quantized=True)
#######################################################################
# PReLU
# -----
def _test_prelu(data, alpha):
"""One iteration of PReLU"""
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
# This specific pattern will be replaced into PRelu by tflite
out = nn_ops.relu(in_data) + (-alpha * nn_ops.relu(-in_data))
compare_tflite_with_tvm(data, "Placeholder:0", [in_data], [out])
def test_forward_prelu():
"""PReLU"""
_test_prelu(
np.random.uniform(-5, 5, size=(1, 32, 32, 3)).astype("float32"),
np.full((3,), 0.2, dtype="float32"),
)
_test_prelu(
np.random.uniform(-5, 5, size=(1, 32, 32, 3)).astype("float32"),
np.full((1, 3), 0.2, dtype="float32"),
)
_test_prelu(
np.random.uniform(-5, 5, size=(1, 32, 32, 3)).astype("float32"),
np.full((1, 1, 3), 0.2, dtype="float32"),
)
_test_prelu(
np.random.uniform(-5, 5, size=(1, 32, 32, 3)).astype("float32"),
np.full((1, 1, 1, 3), 0.2, dtype="float32"),
)
#
_test_prelu(
np.random.uniform(-5, 5, size=(1, 32, 32, 3)).astype("float32"),
np.full((32, 3), 0.2, dtype="float32"),
)
_test_prelu(
np.random.uniform(-5, 5, size=(1, 32, 32, 3)).astype("float32"),
np.full((32, 32, 3), 0.2, dtype="float32"),
)
_test_prelu(
np.random.uniform(-5, 5, size=(1, 32, 32, 3)).astype("float32"),
np.full((1, 32, 1, 3), 0.2, dtype="float32"),
)
#
_test_prelu(
np.random.uniform(-5, 5, size=(1, 1, 3)).astype("float32"),
np.full((3,), 0.2, dtype="float32"),
)
_test_prelu(
np.random.uniform(-5, 5, size=(1, 32, 3)).astype("float32"),
np.full((32, 3), 0.2, dtype="float32"),
)
_test_prelu(
np.random.uniform(-5, 5, size=(32, 3)).astype("float32"), np.full((3), 0.2, dtype="float32")
)
#######################################################################
# DepthToSpace
# ------------
def _test_depthtospace(data, block_size):
"""One iteration of depth_to_space operation with given data and block size"""
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
out = array_ops.depth_to_space(in_data, block_size)
compare_tflite_with_tvm(data, "Placeholder:0", [in_data], [out])
def test_forward_depthtospace():
# DEPTH_TO_SPACE comes with TFLite >= 1.15.0 fbs schema
if package_version.parse(tf.VERSION) >= package_version.parse("1.15.0"):
_test_depthtospace(np.random.normal(size=[1, 32, 32, 4]).astype("float32"), 2)
_test_depthtospace(np.random.normal(size=[1, 16, 8, 32]).astype("float32"), 4)
#######################################################################
# SpaceToDepth
# ------------
def _test_spacetodepth(data, block_size):
"""One iteration of space_to_depth operation with given data and block size"""
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
out = array_ops.space_to_depth(in_data, block_size)
compare_tflite_with_tvm(data, "Placeholder:0", [in_data], [out])
def test_forward_spacetodepth():
_test_spacetodepth(np.random.normal(size=[1, 32, 32, 4]).astype("float32"), 2)
_test_spacetodepth(np.random.normal(size=[1, 16, 8, 32]).astype("float32"), 4)
#######################################################################
# ReverseSequence
# ---------------
def _test_reverse_sequence(shape, dtype, seq_lengths, batch_axis, seq_axis):
"""One iteration of reverse_sequence operation with given data and attributes"""
data = np.random.uniform(0, 100, size=shape).astype(dtype)
with tf.Graph().as_default():
in_data = array_ops.placeholder(dtype=dtype, name="input", shape=shape)
out = tf.reverse_sequence(
in_data, seq_lengths=seq_lengths, batch_axis=batch_axis, seq_axis=seq_axis
)
compare_tflite_with_tvm(data, "input", [in_data], [out])
def test_forward_reverse_sequence():
if package_version.parse(tf.VERSION) >= package_version.parse("1.14.0"):
_test_reverse_sequence([4, 3], "float32", [3, 2, 1], 1, 0)
_test_reverse_sequence([4, 3], "float32", [3, 2, 1, 3], 0, 1)
_test_reverse_sequence([2, 3, 3, 3], "float32", [2, 3, 2], 2, 1)
_test_reverse_sequence([2, 4, 6, 4, 5], "float32", [5, 3], 0, 2)
_test_reverse_sequence([2, 4, 6, 4, 5], "float32", [5, 3, 1, 4], 3, 2)
#######################################################################
# Sparse To Dense
# ---------------
def _test_sparse_to_dense(sparse_indices, sparse_values, default_value, output_shape):
# tflite 1.13 convert method does not accept empty shapes
if package_version.parse(tf.VERSION) >= package_version.parse("1.14.0"):
with tf.Graph().as_default():
indices = tf.placeholder(
shape=sparse_indices.shape, dtype=str(sparse_indices.dtype), name="indices"
)
values = tf.placeholder(
shape=sparse_values.shape, dtype=str(sparse_values.dtype), name="values"
)
oshape = tf.constant(
output_shape, shape=output_shape.shape, dtype=str(output_shape.dtype)
)
if default_value is None:
output = tf.sparse_to_dense(indices, oshape, values)
compare_tflite_with_tvm(
[sparse_indices, sparse_values],
["indices", "values"],
[indices, values],
[output],
)
else:
dv_placeholder = tf.placeholder(
shape=(), dtype=str(default_value.dtype), name="default_value"
)
output = tf.sparse_to_dense(indices, oshape, values, dv_placeholder)
compare_tflite_with_tvm(
[sparse_indices, sparse_values, default_value],
["indices", "values", "default_value"],
[indices, values, dv_placeholder],
[output],
)
def test_forward_sparse_to_dense():
"""
Works in tvm/topi/tensorflow. But tflite converter breaks this test case
_test_sparse_to_dense(
np.int32(1),
np.int32(3),
np.int32(0),
np.array([5]).astype("int32")
)
"""
# vector
_test_sparse_to_dense(
np.array([0, 1, 4]).astype("int32"),
np.array([3, 3, 3]).astype("int32"),
np.int32(0),
np.array([5]).astype("int32"),
)
# vector nXd
_test_sparse_to_dense(
np.array([[0, 0], [1, 2]]).astype("int32"),
np.array([1, 2]).astype("int32"),
np.int32(0),
np.array([3, 4]).astype("int32"),
)
_test_sparse_to_dense(
np.array([[0, 0, 0], [1, 2, 3]]).astype("int32"),
np.array([1, 2]).astype("int32"),
np.int32(4),
np.array([2, 3, 4]).astype("int32"),
)
# floats
_test_sparse_to_dense(
np.array([0, 1, 4]).astype("int32"),
np.array([3.1, 3.1, 3.1]).astype("float32"),
np.float32(3.5),
np.array([5]).astype("int32"),
)
# default value not specified
_test_sparse_to_dense(
np.array([0, 1, 4]).astype("int32"),
np.array([3.1, 3.1, 3.1]).astype("float32"),
None,
np.array([5]).astype("int32"),
)
#######################################################################
# Fully Connected
# ---------------
def _test_fully_connected(
tensor_in_sizes,
const_input,
filter_in_sizes,
bias_in_size=None,
quantized=False,
fp16_quantized=False,
):
"""One iteration of fully connected"""
total_size_1 = np.prod(tensor_in_sizes)
total_size_2 = np.prod(filter_in_sizes)
assert (
int(total_size_1 / tensor_in_sizes[0]) == filter_in_sizes[0]
), "input size and filter size are mismatched"
# Initializes the input tensor with array containing incrementing
# numbers from 1.
data_array = np.arange(
1, total_size_1 + 1, dtype=np.uint8 if quantized and not fp16_quantized else np.float32
)
filter_array = np.arange(
1, total_size_2 + 1, dtype=np.uint8 if quantized and not fp16_quantized else np.float32
)
in_name = "input"
with tf.Graph().as_default():
in_data = (
constant_op.constant(data_array, shape=tensor_in_sizes, dtype=np.float32, name=in_name)
if const_input
else array_ops.placeholder(shape=tensor_in_sizes, dtype=np.float32, name=in_name)
)
in_filter = constant_op.constant(filter_array, shape=filter_in_sizes, dtype=np.float32)
data_array = np.reshape(data_array, tensor_in_sizes)
# if we have bias
if bias_in_size:
assert bias_in_size[0] == filter_in_sizes[1], "bias and filter size are mismatched"
bias_array = np.arange(
1, bias_in_size[0] + 1, dtype=np.uint8 if quantized else np.float32
)
in_bias = constant_op.constant(bias_array, shape=bias_in_size, dtype=np.float32)
if quantized and not fp16_quantized:
inq_data = tf.quantization.fake_quant_with_min_max_args(
in_data, min=-100, max=100, name="inq_0"
)
input_range = {"inq_0": (-100, 100)}
inq_filter = tf.quantization.fake_quant_with_min_max_args(
in_filter, min=-100, max=100, name="inq_1"
)
input_range = {"inq_0": (-100, 100), "inq_1": (-100, 100)}
# reshape N H W C into N H*W*C
inq_data_reshape = array_ops.reshape(inq_data, [tensor_in_sizes[0], -1])
out = math_ops.mat_mul(inq_data_reshape, inq_filter)
out = tf.quantization.fake_quant_with_min_max_args(out, min=-100, max=100, name="out")
# if we have bias
if bias_in_size:
out = nn_ops.bias_add(out, in_bias)
compare_tflite_with_tvm(
data_array,
inq_data.name,
[inq_data],
[out],
quantized=True,
input_range=input_range,
experimental_new_converter=True,
)
else:
# reshape N H W C into N H*W*C
in_data_reshape = array_ops.reshape(in_data, [tensor_in_sizes[0], -1])
out = math_ops.mat_mul(in_data_reshape, in_filter)
# TODO : Need to construct a fc op with (keep_num_dims == True)
# if we have bias
if bias_in_size:
out = nn_ops.bias_add(out, in_bias)
compare_tflite_with_tvm(
data_array,
in_data.name,
[in_data],
[out],
experimental_new_converter=True,
fp16_quantized=fp16_quantized,
)
def test_forward_fully_connected():
"""Fully Connected"""
for input_shape, weight_shape, bias_shape in [
([1, 4], [4, 4], None),
([1, 4], [4, 4], [4]),
([1, 1, 1, 5], [5, 5], None),
([1, 1, 10], [10, 103], None),
([1, 1, 1, 150], [150, 100], None),
([1, 1, 1, 150], [150, 100], None),
([1, 1, 1, 150], [150, 100], [100]),
([5, 1, 1, 150], [150, 100], None),
([5, 1, 1, 150], [150, 100], [100]),
]:
for const_input in [False, True]:
for quantized in [False, True]:
for fp16_quantized in [False, True]:
_test_fully_connected(
input_shape,
const_input,
weight_shape,
bias_shape,
quantized,
fp16_quantized,
)
#######################################################################
# REVERSE_V2
# ----------
def _test_reverse_v2(input_shape, axis, dtype):
"""One iteration of REVERSE_V2"""
with tf.Graph().as_default():
input_array = np.random.randint(0, 100, size=input_shape).astype(dtype)
in_input = tf.placeholder(dtype=input_array.dtype, shape=input_array.shape, name="input")
in_axis = ops.convert_to_tensor(axis, dtype=axis.dtype)
out = array_ops.reverse(in_input, in_axis)
compare_tflite_with_tvm([input_array], ["input"], [in_input], [out])
def test_forward_reverse_v2():
"""REVERSE_V2"""
for dtype in ["float32", "int32"]:
_test_reverse_v2((5), np.array([0], dtype="int32"), dtype)
_test_reverse_v2((5, 6, 4, 2), np.array([2], dtype="int32"), dtype)
#######################################################################
# MATRIX_SET_DIAG
# ---------------
def _test_matrix_set_diag(input_shape, input_type, quantized=False):
"""One iteration of MATRIX_SET_DIAG"""
with tf.Graph().as_default():
diagonal_shape = list(input_shape[:-2])
diagonal_shape.append(min(input_shape[-2], input_shape[-1]))
if quantized:
# ignoring input_type as quantized requires uint8
input_array = np.random.uniform(0, 256, input_shape).astype("uint8")
in_input = tf.placeholder(dtype="float32", shape=input_array.shape, name="input")
inq_input = tf.quantization.fake_quant_with_min_max_args(
in_input, min=-100, max=100, name="q_input"
)
diagonal = np.random.uniform(0, 256, diagonal_shape).astype("uint8")
in_diagonal = tf.placeholder(dtype="float32", shape=diagonal.shape, name="diagonal")
inq_diagonal = tf.quantization.fake_quant_with_min_max_args(
in_diagonal, min=-100, max=100, name="q_diagonal"
)
input_range = {"q_input": (-100, 100), "q_diagonal": (-100, 100)}
out = array_ops.matrix_set_diag(inq_input, inq_diagonal)
out = tf.quantization.fake_quant_with_min_max_args(out, min=-100, max=100, name="out")
compare_tflite_with_tvm(
[input_array, diagonal],
["q_input", "q_diagonal"],
[inq_input, inq_diagonal],
[out],
quantized=True,
input_range=input_range,
)
else:
input_array = np.random.uniform(0, 100, input_shape).astype(input_type)
diagonal = np.random.uniform(0, 100, diagonal_shape).astype(input_type)
in_input = tf.placeholder(
dtype=input_array.dtype, shape=input_array.shape, name="input"
)
in_diagonal = tf.placeholder(
dtype=diagonal.dtype, shape=diagonal.shape, name="diagonal"
)
out = array_ops.matrix_set_diag(in_input, in_diagonal)
compare_tflite_with_tvm(
[input_array, diagonal], ["input", "diagonal"], [in_input, in_diagonal], [out]
)
def test_forward_matrix_set_diag():
"""MATRIX_SET_DIAG"""
for dtype in [np.float32, np.int32]:
_test_matrix_set_diag((4, 4), dtype)
_test_matrix_set_diag((5, 4, 3, 4), dtype)
_test_matrix_set_diag((4, 4, 2), dtype)
_test_matrix_set_diag((4, 4), np.uint8, quantized=True)
_test_matrix_set_diag((5, 4, 3, 4), np.uint8, quantized=True)
_test_matrix_set_diag((4, 4, 2), np.uint8, quantized=True)
#######################################################################
# MATRIX_DIAG
# -----------
def _test_matrix_diag(diagonal_shape, dtype):
"""One iteration of MATRIX_DIAG"""
with tf.Graph().as_default():
diagonal = np.random.uniform(0, 100, diagonal_shape).astype(dtype)
in_diagonal = tf.placeholder(dtype=diagonal.dtype, shape=diagonal.shape, name="diagonal")
out = array_ops.matrix_diag(in_diagonal)
compare_tflite_with_tvm(
[diagonal], ["diagonal"], [in_diagonal], [out], experimental_new_converter=True
)
def test_forward_matrix_diag():
"""MATRIX_DIAG"""
for dtype in [np.float32, np.int32]:
_test_matrix_diag((4), dtype)
_test_matrix_diag((5, 4, 3), dtype)
_test_matrix_diag((2, 3), dtype)
#######################################################################
# Custom Operators
# ----------------
def _test_detection_postprocess(tf_model_file, box_encodings_size, class_predictions_size):
"""One iteration of detection postProcess with given model and shapes"""
converter = tf.lite.TFLiteConverter.from_frozen_graph(
tf_model_file,
input_arrays=["raw_outputs/box_encodings", "raw_outputs/class_predictions"],
output_arrays=[
"TFLite_Detection_PostProcess",
"TFLite_Detection_PostProcess:1",
"TFLite_Detection_PostProcess:2",
"TFLite_Detection_PostProcess:3",
],
input_shapes={
"raw_outputs/box_encodings": box_encodings_size,
"raw_outputs/class_predictions": class_predictions_size,
},
)
converter.allow_custom_ops = True
converter.inference_type = tf.lite.constants.FLOAT
tflite_model = converter.convert()
np.random.seed(0)
box_encodings = np.random.uniform(size=box_encodings_size).astype("float32")
class_predictions = np.random.uniform(size=class_predictions_size).astype("float32")
tflite_output = run_tflite_graph(tflite_model, [box_encodings, class_predictions])
tvm_output = run_tvm_graph(
tflite_model,
[box_encodings, class_predictions],
["raw_outputs/box_encodings", "raw_outputs/class_predictions"],
num_output=4,
)
# Check all output shapes are equal
assert all(
list(
tvm_tensor.shape == tflite_tensor.shape
for (tvm_tensor, tflite_tensor) in zip(tvm_output, tflite_output)
)
)
# Check valid count is the same
assert tvm_output[3] == tflite_output[3]
valid_count = tvm_output[3][0]
# For boxes that do not have any detections, TFLite puts random values. Therefore, we compare
# tflite and tvm tensors for only valid boxes.
for i in range(0, valid_count):
# Check bounding box co-ords
tvm.testing.assert_allclose(
np.squeeze(tvm_output[0][0][i]),
np.squeeze(tflite_output[0][0][i]),
rtol=1e-5,
atol=1e-5,
)
# Check the class
# Stricter check to ensure class remains same
np.testing.assert_equal(np.squeeze(tvm_output[1][0][i]), np.squeeze(tflite_output[1][0][i]))
# Check the score
tvm.testing.assert_allclose(
np.squeeze(tvm_output[2][0][i]),
np.squeeze(tflite_output[2][0][i]),
rtol=1e-5,
atol=1e-5,
)
def test_detection_postprocess():
"""Detection PostProcess"""
box_encodings_size = (1, 1917, 4)
class_predictions_size = (1, 1917, 91)
tf_model_file = tf_testing.get_workload_official(
"http://download.tensorflow.org/models/object_detection/"
"ssd_mobilenet_v2_quantized_300x300_coco_2019_01_03.tar.gz",
"ssd_mobilenet_v2_quantized_300x300_coco_2019_01_03/tflite_graph.pb",
)
_test_detection_postprocess(tf_model_file, box_encodings_size, class_predictions_size)
box_encodings_size = (1, 2034, 4)
class_predictions_size = (1, 2034, 91)
tf_model_file = download_testdata(
"https://github.com/czh978/models_for_tvm_test/raw/main/tflite_graph_with_postprocess.pb",
"tflite_graph_with_postprocess.pb",
)
_test_detection_postprocess(tf_model_file, box_encodings_size, class_predictions_size)
#######################################################################
# Custom Converter
# ----------------
def test_custom_op_converter():
"""Test case for user-defined operator converter in TFLite frontend"""
class DummyOperatorConverter(relay.frontend.tflite.OperatorConverter):
"""Operator Converter for converting TFLite ops to relay ops"""
def __init__(self, model, subgraph, exp_tab):
super().__init__(model, subgraph, exp_tab)
self.allow_custom_ops = True
convert_map_overwrite = {"SUB": self.convert_sub_dummy}
self.convert_map.update(convert_map_overwrite)
def convert_sub_dummy(self, op):
"""Convert TFLite SUB"""
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 2, "input tensors length should be 2"
lhs_tensor = input_tensors[0]
rhs_tensor = input_tensors[1]
lhs_expr = self.get_expr(lhs_tensor.tensor_idx)
rhs_expr = self.get_expr(rhs_tensor.tensor_idx)
temp_expr = relay.op.negative(rhs_expr)
out = relay.op.add(lhs_expr, temp_expr)
return out
with tf.Graph().as_default():
# Generate TFLite model for single addition
data = [
np.arange(6.0, dtype=np.float32).reshape((2, 1, 1, 3)),
np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 1, 1, 3)),
]
in_data = [
array_ops.placeholder(shape=data[0].shape, dtype="float32", name="in_0"),
array_ops.placeholder(shape=data[1].shape, dtype="float32", name="in_1"),
]
out = math_ops.subtract(in_data[0], in_data[1])
in_name = [x[1] for x in zip(in_data, ("in_0:0", "in_1:0"))]
input_tensors = in_data
output_tensors = [out]
in_node = [0] * len(in_name)
for i, _ in enumerate(in_name):
in_node[i] = in_name[i].split(":")[0]
with tf.Session() as sess:
converter = tf.lite.TFLiteConverter.from_session(sess, input_tensors, output_tensors)
tflite_model_buf = converter.convert()
in_data = [x[1] for x in zip(in_data, data)]
tvm_output_orig = run_tvm_graph(tflite_model_buf, in_data, in_node)
tvm_output_dummy = run_tvm_graph(
tflite_model_buf, in_data, in_node, op_converter=DummyOperatorConverter
)
tvm.testing.assert_allclose(
np.squeeze(tvm_output_orig[0]), np.squeeze(tvm_output_dummy[0]), rtol=1e-5, atol=1e-5
)
#######################################################################
# Mobilenet
# ---------
def test_forward_mobilenet_v1():
"""Test the Mobilenet V1 TF Lite model."""
# MobilenetV1
tflite_model_file = tf_testing.get_workload_official(
"http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224.tgz",
"mobilenet_v1_1.0_224.tflite",
)
with open(tflite_model_file, "rb") as f:
tflite_model_buf = f.read()
data = np.random.uniform(size=(1, 224, 224, 3)).astype("float32")
tflite_output = run_tflite_graph(tflite_model_buf, data)
tvm_output = run_tvm_graph(tflite_model_buf, data, "input")
tvm.testing.assert_allclose(
np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]), rtol=1e-5, atol=1e-5
)
def test_forward_mobilenet_v2():
"""Test the Mobilenet V2 TF Lite model."""
# MobilenetV2
tflite_model_file = tf_testing.get_workload_official(
"http://download.tensorflow.org/models/tflite_11_05_08/mobilenet_v2_1.0_224.tgz",
"mobilenet_v2_1.0_224.tflite",
)
with open(tflite_model_file, "rb") as f:
tflite_model_buf = f.read()
data = np.random.uniform(size=(1, 224, 224, 3)).astype("float32")
tflite_output = run_tflite_graph(tflite_model_buf, data)
tvm_output = run_tvm_graph(tflite_model_buf, data, "input")
tvm.testing.assert_allclose(
np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]), rtol=1e-5, atol=1e-5
)
#######################################################################
# Mobilenet V3
# ------------
def test_forward_mobilenet_v3():
"""Test the Mobilenet V3 TF Lite model."""
# In MobilenetV3, some ops are not supported before tf 1.15 fbs schema
if package_version.parse(tf.VERSION) < package_version.parse("1.15.0"):
return
tflite_model_file = tf_testing.get_workload_official(
"https://storage.googleapis.com/mobilenet_v3/checkpoints/v3-large_224_1.0_float.tgz",
"v3-large_224_1.0_float/v3-large_224_1.0_float.tflite",
)
with open(tflite_model_file, "rb") as f:
tflite_model_buf = f.read()
data = np.random.uniform(size=(1, 224, 224, 3)).astype("float32")
tflite_output = run_tflite_graph(tflite_model_buf, data)
tvm_output = run_tvm_graph(tflite_model_buf, data, "input")
tvm.testing.assert_allclose(
np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]), rtol=1e-5, atol=1e-5
)
#######################################################################
# Mobilenet V1 Sparse
# -----------------
def test_forward_sparse_mobilenet_v1():
"""Test the Sparse version of Mobilenet V1 TF Lite model."""
# MobilenetV1
tflite_model_file = download_testdata(
"https://storage.googleapis.com/fast-convnets/tflite-models/mbv1_140_90_12b4_720.tflite",
"mbv1_140_90_12b4_720.tflite",
)
with open(tflite_model_file, "rb") as f:
tflite_model_buf = f.read()
data = np.random.uniform(size=(1, 224, 224, 3)).astype("float32")
tflite_output = run_tflite_graph(tflite_model_buf, data)
tvm_output = run_tvm_graph(tflite_model_buf, data, "float_image_input")
tvm.testing.assert_allclose(
np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]), rtol=1e-5, atol=1e-5
)
#######################################################################
# Mobilenet V2 Sparse
# -----------------
def test_forward_sparse_mobilenet_v2():
"""Test the Sparse version of Mobilenet V2 TF Lite model."""
# MobilenetV1
tflite_model_file = download_testdata(
"https://storage.googleapis.com/fast-convnets/tflite-models/mbv2_200_85_11-16b2_744.tflite",
"mbv2_200_85_11-16b2_744.tflite",
)
with open(tflite_model_file, "rb") as f:
tflite_model_buf = f.read()
data = np.random.uniform(size=(1, 224, 224, 3)).astype("float32")
tflite_output = run_tflite_graph(tflite_model_buf, data)
tvm_output = run_tvm_graph(tflite_model_buf, data, "float_image_input")
tvm.testing.assert_allclose(
np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]), rtol=1e-5, atol=1e-5
)
#######################################################################
# Inception
# ---------
def test_forward_inception_v3_net():
"""Test the Inception V3 TF Lite model."""
# InceptionV3
tflite_model_file = tf_testing.get_workload_official(
"https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/"
"upload_20180427/inception_v3_2018_04_27.tgz",
"inception_v3.tflite",
)
with open(tflite_model_file, "rb") as f:
tflite_model_buf = f.read()
data = np.random.uniform(size=(1, 299, 299, 3)).astype("float32")
tflite_output = run_tflite_graph(tflite_model_buf, data)
tvm_output = run_tvm_graph(tflite_model_buf, data, "input")
tvm.testing.assert_allclose(
np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]), rtol=1e-5, atol=1e-5
)
def test_forward_inception_v4_net():
"""Test the Inception V4 TF Lite model."""
# InceptionV4
tflite_model_file = tf_testing.get_workload_official(
"https://storage.googleapis.com/download.tensorflow.org/models/"
"tflite/model_zoo/upload_20180427/"
"inception_v4_2018_04_27.tgz",
"inception_v4.tflite",
)
with open(tflite_model_file, "rb") as f:
tflite_model_buf = f.read()
data = np.random.uniform(size=(1, 299, 299, 3)).astype("float32")
tflite_output = run_tflite_graph(tflite_model_buf, data)
tvm_output = run_tvm_graph(tflite_model_buf, data, "input")
tvm.testing.assert_allclose(
np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]), rtol=1e-5, atol=1e-5
)
def test_forward_inception_v4_net_batched():
"""Test the Inception V4 TF Lite model."""
# InceptionV4
tflite_model_file = tf_testing.get_workload_official(
"https://storage.googleapis.com/download.tensorflow.org/models/"
"tflite/model_zoo/upload_20180427/"
"inception_v4_2018_04_27.tgz",
"inception_v4.tflite",
)
with open(tflite_model_file, "rb") as f:
tflite_model_buf = f.read()
data = np.random.uniform(size=(4, 299, 299, 3)).astype("float32")
tflite_output = run_tflite_graph(tflite_model_buf, data)
tvm_output = run_tvm_graph(tflite_model_buf, data, "input")
tvm.testing.assert_allclose(
np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]), rtol=1e-5, atol=1e-5
)
def test_forward_qnn_inception_v1_net():
"""Test the Quantized TFLite Inception model."""
# InceptionV1
tflite_model_file = tf_testing.get_workload_official(
"https://storage.googleapis.com/download.tensorflow.org/models/"
"inception_v1_224_quant_20181026.tgz",
"inception_v1_224_quant.tflite",
)
with open(tflite_model_file, "rb") as f:
tflite_model_buf = f.read()
# Test image. Checking the labels because the requantize implementation is different between
# TFLite and Relay. This cause final output numbers to mismatch. So, testing accuracy via
# labels. Also, giving a real image, instead of random inputs.
data = get_real_image(224, 224)
tflite_output = run_tflite_graph(tflite_model_buf, data)
tflite_predictions = np.squeeze(tflite_output)
tflite_sorted_labels = tflite_predictions.argsort()[-3:][::-1]
tvm_output = run_tvm_graph(tflite_model_buf, data, "input")
tvm_predictions = np.squeeze(tvm_output)
tvm_sorted_labels = tvm_predictions.argsort()[-3:][::-1]
tvm.testing.assert_allclose(tvm_sorted_labels, tflite_sorted_labels)
def test_forward_qnn_mobilenet_v1_net():
"""Test the Quantized TFLite Mobilenet V1 model."""
# MobilenetV1
tflite_model_file = tf_testing.get_workload_official(
"https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/"
"mobilenet_v1_1.0_224_quant.tgz",
"mobilenet_v1_1.0_224_quant.tflite",
)
with open(tflite_model_file, "rb") as f:
tflite_model_buf = f.read()
# Test image. Checking the labels because the requantize implementation is different between
# TFLite and Relay. This cause final output numbers to mismatch. So, testing accuracy via
# labels. Also, giving a real image, instead of random inputs.
data = get_real_image(224, 224)
tflite_output = run_tflite_graph(tflite_model_buf, data)
tflite_predictions = np.squeeze(tflite_output)
tflite_sorted_labels = tflite_predictions.argsort()[-3:][::-1]
tvm_output = run_tvm_graph(tflite_model_buf, data, "input")
tvm_predictions = np.squeeze(tvm_output)
tvm_sorted_labels = tvm_predictions.argsort()[-3:][::-1]
tvm.testing.assert_allclose(tvm_sorted_labels, tflite_sorted_labels)
def test_forward_qnn_mobilenet_v2_net():
"""Test the Quantized TFLite Mobilenet V2 model."""
# MobilenetV2
tflite_model_file = tf_testing.get_workload_official(
"https://storage.googleapis.com/download.tensorflow.org/models/tflite_11_05_08/"
"mobilenet_v2_1.0_224_quant.tgz",
"mobilenet_v2_1.0_224_quant.tflite",
)
with open(tflite_model_file, "rb") as f:
tflite_model_buf = f.read()
# Test image. Checking the labels because the requantize implementation is different between
# TFLite and Relay. This cause final output numbers to mismatch. So, testing accuracy via
# labels. Also, giving a real image, instead of random inputs.
data = get_real_image(224, 224)
tflite_output = run_tflite_graph(tflite_model_buf, data)
tflite_predictions = np.squeeze(tflite_output)
tflite_sorted_labels = tflite_predictions.argsort()[-3:][::-1]
tvm_output = run_tvm_graph(tflite_model_buf, data, "input")
tvm_predictions = np.squeeze(tvm_output)
tvm_sorted_labels = tvm_predictions.argsort()[-3:][::-1]
tvm.testing.assert_allclose(tvm_sorted_labels, tflite_sorted_labels)
#######################################################################
# Mobilenet V3 Quantized
# ----------------------
def test_forward_qnn_mobilenet_v3_net():
"""Test the Quantized TFLite Mobilenet V3 model."""
# In MobilenetV3, some ops are not supported before tf 1.15 fbs schema
if package_version.parse(tf.VERSION) < package_version.parse("1.15.0"):
pytest.skip("Unsupported in tflite < 1.15.0")
else:
pytest.skip("This segfaults with tensorflow 1.15.2 and above")
tflite_model_file = tf_testing.get_workload_official(
"https://storage.googleapis.com/mobilenet_v3/checkpoints/v3-large_224_1.0_uint8.tgz",
"v3-large_224_1.0_uint8/v3-large_224_1.0_uint8.tflite",
)
with open(tflite_model_file, "rb") as f:
tflite_model_buf = f.read()
# Test image. Checking the labels because the requantize implementation is different between
# TFLite and Relay. This cause final output numbers to mismatch. So, testing accuracy via
# labels. Also, giving a real image, instead of random inputs.
data = get_real_image(224, 224)
tflite_output = run_tflite_graph(tflite_model_buf, data)
tflite_predictions = np.squeeze(tflite_output)
tflite_sorted_labels = tflite_predictions.argsort()[-3:][::-1]
tvm_output = run_tvm_graph(tflite_model_buf, data, "input")
tvm_predictions = np.squeeze(tvm_output)
tvm_sorted_labels = tvm_predictions.argsort()[-3:][::-1]
tvm.testing.assert_allclose(tvm_sorted_labels, tflite_sorted_labels)
def test_forward_tflite2_qnn_resnet50():
"""Test the Quantized TFLite version 2.1.0 Resnet50 model."""
if package_version.parse(tf.VERSION) >= package_version.parse("2.1.0"):
tflite_model_file = download_testdata(
"https://raw.githubusercontent.com/dmlc/web-data/main/tensorflow/models/Quantized/"
"resnet_50_quantized.tflite",
"resnet_50_quantized.tflite",
)
with open(tflite_model_file, "rb") as f:
tflite_model_buf = f.read()
data = pre_processed_image(224, 224)
tflite_output = run_tflite_graph(tflite_model_buf, data)
tflite_predictions = np.squeeze(tflite_output)
tflite_sorted_labels = tflite_predictions.argsort()[-3:][::-1]
tvm_output = run_tvm_graph(tflite_model_buf, np.array(data), "input_1")
tvm_predictions = np.squeeze(tvm_output)
tvm_sorted_labels = tvm_predictions.argsort()[-3:][::-1]
tvm.testing.assert_allclose(tvm_sorted_labels, tflite_sorted_labels)
def test_forward_tflite2_qnn_inception_v1():
"""Test the Quantized TFLite version 2.1.0 Inception V1 model."""
if package_version.parse(tf.VERSION) >= package_version.parse("2.1.0"):
tflite_model_file = download_testdata(
"https://raw.githubusercontent.com/dmlc/web-data/main/tensorflow/models/Quantized/"
"inception_v1_quantized.tflite",
"inception_v1_quantized.tflite",
)
with open(tflite_model_file, "rb") as f:
tflite_model_buf = f.read()
data = pre_processed_image(224, 224)
tflite_output = run_tflite_graph(tflite_model_buf, data)
tflite_predictions = np.squeeze(tflite_output)
tflite_sorted_labels = tflite_predictions.argsort()[-3:][::-1]
tvm_output = run_tvm_graph(tflite_model_buf, np.array(data), "input_1")
tvm_predictions = np.squeeze(tvm_output)
tvm_sorted_labels = tvm_predictions.argsort()[-3:][::-1]
tvm.testing.assert_allclose(tvm_sorted_labels, tflite_sorted_labels)
def test_forward_tflite2_qnn_mobilenet_v2():
"""Test the Quantized TFLite version 2.1.0 Mobilenet V2 model."""
if package_version.parse(tf.VERSION) >= package_version.parse("2.1.0"):
tflite_model_file = download_testdata(
"https://raw.githubusercontent.com/dmlc/web-data/main/tensorflow/models/Quantized/"
"mobilenet_v2_quantized.tflite",
"mobilenet_v2_quantized.tflite",
)
with open(tflite_model_file, "rb") as f:
tflite_model_buf = f.read()
data = pre_processed_image(224, 224)
tflite_output = run_tflite_graph(tflite_model_buf, data)
tflite_predictions = np.squeeze(tflite_output)
tflite_sorted_labels = tflite_predictions.argsort()[-3:][::-1]
tvm_output = run_tvm_graph(tflite_model_buf, np.array(data), "input_1")
tvm_predictions = np.squeeze(tvm_output)
tvm_sorted_labels = tvm_predictions.argsort()[-3:][::-1]
tvm.testing.assert_allclose(tvm_sorted_labels, tflite_sorted_labels)
def test_forward_tflite_float16():
"""Test float16 quantized model"""
# MobilenetV2
tflite_model_file = tf_testing.get_workload_official(
"https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_02_22/"
"mobilenet_v1_0.25_128.tgz",
"mobilenet_v1_0.25_128_frozen.pb",
)
converter = tf.lite.TFLiteConverter.from_frozen_graph(
tflite_model_file, ["input"], ["MobilenetV1/Predictions/Reshape_1"]
)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_types = [tf.float16]
tflite_model_buf = converter.convert()
# Test image. Checking the labels because the requantize implementation is different between
# TFLite and Relay. This cause final output numbers to mismatch. So, testing accuracy via
# labels. Also, giving a real image, instead of random inputs.
data = get_real_image(128, 128, quantized=False)
tflite_output = run_tflite_graph(tflite_model_buf, data)
tflite_predictions = np.squeeze(tflite_output)
tflite_sorted_labels = tflite_predictions.argsort()[-3:][::-1]
tvm_output = run_tvm_graph(tflite_model_buf, data, "input")
tvm_predictions = np.squeeze(tvm_output)
tvm_sorted_labels = tvm_predictions.argsort()[-3:][::-1]
tvm.testing.assert_allclose(tvm_sorted_labels, tflite_sorted_labels)
def test_forward_mobilenet_int16():
"""Test int16 quantized model"""
# MobilenetV2
model_file = tf_testing.get_workload_official(
"https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_02_22/"
"mobilenet_v1_0.25_128.tgz",
"mobilenet_v1_0.25_128_frozen.pb",
)
# Test image. Checking the labels because the requantize implementation is different between
# TFLite and Relay. This cause final output numbers to mismatch. So, testing accuracy via
# labels. Also, giving a real image, instead of random inputs.
#
# According to TFLite documentation, despite the quantization being done to make this model
# use int16 types, inputs and outputs are kept float32 by default.
# https://www.tensorflow.org/lite/performance/post_training_integer_quant_16x8
data = get_real_image(128, 128, quantized=False)
converter = tf.lite.TFLiteConverter.from_frozen_graph(
model_file, ["input"], ["MobilenetV1/Predictions/Reshape_1"]
)
def representative_dataset():
for _ in range(1):
yield [data]
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_ops = [
tf.lite.OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8
]
converter.representative_dataset = representative_dataset
tflite_model_buf = converter.convert()
tflite_output = run_tflite_graph(tflite_model_buf, data)
tflite_predictions = np.squeeze(tflite_output)
tflite_sorted_labels = tflite_predictions.argsort()[-3:][::-1]
tvm_output = run_tvm_graph(tflite_model_buf, data, "input")
tvm_predictions = np.squeeze(tvm_output)
tvm_sorted_labels = tvm_predictions.argsort()[-3:][::-1]
tvm.testing.assert_allclose(tvm_sorted_labels, tflite_sorted_labels)
def test_forward_ds_cnn_int16():
"""Test DS_CNN int16 quantized model"""
tflite_model_file = download_testdata(
"https://github.com/ARM-software/ML-zoo/blob/48f458af1e9065d9aad2ad94d24b58d6e7c00817/"
"models/keyword_spotting/ds_cnn_small/tflite_int16/ds_cnn_quantized.tflite?raw=true",
"ds_cnn_quantized_int16.tflite",
)
with open(tflite_model_file, "rb") as f:
tflite_model_buf = f.read()
data = np.random.uniform(size=(1, 490)).astype("int16")
tflite_output = run_tflite_graph(tflite_model_buf, data)
tflite_predictions = np.squeeze(tflite_output)
tflite_sorted_labels = tflite_predictions.argsort()[-3:][::-1]
tvm_output = run_tvm_graph(tflite_model_buf, data, "serving_default_input:0")
tvm_predictions = np.squeeze(tvm_output)
tvm_sorted_labels = tvm_predictions.argsort()[-3:][::-1]
tvm.testing.assert_allclose(tvm_sorted_labels, tflite_sorted_labels)
#######################################################################
# Unidirectional Sequence LSTM
# ---------------------
def test_forward_unidirectional_sequence_lstm():
"""Test the UnidirectionalSequenceLSTM TFLite"""
if package_version.parse(tf.VERSION) >= package_version.parse("2.1.0"):
tflite_model_file = download_testdata(
"https://github.com/SebastianBoblestETAS/nn_models/blob/"
"ce49c5de64889493161ca4194a20e0fd5eb707e6/lstm_1_in_3_out_2_ts_4.tflite?raw=true",
"lstm_1_in_3_out_2_ts_4.tflite",
)
with open(tflite_model_file, "rb") as f:
tflite_model_buf = f.read()
data = np.array(
[
[
[0.5488135, 0.71518934, 0.60276335],
[0.5448832, 0.4236548, 0.6458941],
[0.4375872, 0.891773, 0.96366274],
[0.3834415, 0.79172504, 0.5288949],
]
],
dtype="float32",
)
tflite_output = run_tflite_graph(tflite_model_buf, data)
tvm_output = run_tvm_graph(tflite_model_buf, data, "serving_default_input_1:0")
tvm.testing.assert_allclose(tflite_output, tvm_output)
#######################################################################
# Quantized SSD Mobilenet
# -----------------------
def test_forward_qnn_coco_ssd_mobilenet_v1():
"""Test the quantized Coco SSD Mobilenet V1 TF Lite model."""
pytest.skip(
"LLVM bug - getExtendedVectorNumElements - "
+ "https://discuss.tvm.apache.org/t/segfault-in-llvm/3567. The workaround is to use a "
+ "specific target, for example, llvm -mpcu=core-avx2"
)
tflite_model_file = tf_testing.get_workload_official(
"https://storage.googleapis.com/download.tensorflow.org/models/tflite/"
"coco_ssd_mobilenet_v1_1.0_quant_2018_06_29.zip",
"detect.tflite",
)
with open(tflite_model_file, "rb") as f:
tflite_model_buf = f.read()
data = get_real_image_object_detection(300, 300)
tflite_output = run_tflite_graph(tflite_model_buf, data)
tvm_output = run_tvm_graph(
tflite_model_buf, data, "normalized_input_image_tensor", num_output=4
)
# Check all output shapes are equal
assert all(
list(
tvm_tensor.shape == tflite_tensor.shape
for (tvm_tensor, tflite_tensor) in zip(tvm_output, tflite_output)
)
)
# Check valid count is the same
assert tvm_output[3] == tflite_output[3]
valid_count = tvm_output[3][0]
# For boxes that do not have any detections, TFLite puts random values. Therefore, we compare
# tflite and tvm tensors for only valid boxes.
for i in range(0, valid_count):
# We compare the bounding boxes whose prediction score is above 60%. This is typical in end
# to end application where a low prediction score is discarded. This is also needed because
# multiple low score bounding boxes can have same score and TFlite and TVM can have
# different orderings for same score bounding boxes. Another reason for minor differences in
# low score bounding boxes is the difference between TVM and TFLite for requantize operator.
if tvm_output[2][0][i] > 0.6:
# Check bounding box co-ords. The tolerances have to be adjusted, from 1e-5 to 1e-2,
# because of differences between for requantiize operator in TFLite and TVM.
tvm.testing.assert_allclose(
np.squeeze(tvm_output[0][0][i]),
np.squeeze(tflite_output[0][0][i]),
rtol=1e-2,
atol=1e-2,
)
# Check the class
# Stricter check to ensure class remains same
np.testing.assert_equal(
np.squeeze(tvm_output[1][0][i]), np.squeeze(tflite_output[1][0][i])
)
# Check the score
tvm.testing.assert_allclose(
np.squeeze(tvm_output[2][0][i]),
np.squeeze(tflite_output[2][0][i]),
rtol=1e-5,
atol=1e-5,
)
#######################################################################
# SSD Mobilenet
# -------------
def test_forward_coco_ssd_mobilenet_v1():
"""Test the FP32 Coco SSD Mobilenet V1 TF Lite model."""
tflite_model_file = tf_testing.get_workload_official(
"https://raw.githubusercontent.com/dmlc/web-data/main/tensorflow/models/object_detection/"
"ssd_mobilenet_v1_coco_2018_01_28.tgz",
"ssd_mobilenet_v1_coco_2018_01_28.tflite",
)
with open(tflite_model_file, "rb") as f:
tflite_model_buf = f.read()
np.random.seed(0)
data = np.random.uniform(size=(1, 300, 300, 3)).astype("float32")
tflite_output = run_tflite_graph(tflite_model_buf, data)
tvm_output = run_tvm_graph(
tflite_model_buf, data, "normalized_input_image_tensor", num_output=4
)
# Check all output shapes are equal
assert all(
list(
tvm_tensor.shape == tflite_tensor.shape
for (tvm_tensor, tflite_tensor) in zip(tvm_output, tflite_output)
)
)
# Check valid count is the same
assert tvm_output[3] == tflite_output[3]
valid_count = tvm_output[3][0]
# For boxes that do not have any detections, TFLite puts random values. Therefore, we compare
# tflite and tvm tensors for only valid boxes.
for i in range(0, valid_count):
# Check bounding box co-ords
tvm.testing.assert_allclose(
np.squeeze(tvm_output[0][0][i]),
np.squeeze(tflite_output[0][0][i]),
rtol=1e-5,
atol=1e-5,
)
# Check the class
np.testing.assert_equal(np.squeeze(tvm_output[1][0][i]), np.squeeze(tflite_output[1][0][i]))
# Check the score
tvm.testing.assert_allclose(
np.squeeze(tvm_output[2][0][i]),
np.squeeze(tflite_output[2][0][i]),
rtol=1e-5,
atol=1e-5,
)
#######################################################################
# MediaPipe
# -------------
def test_forward_mediapipe_hand_landmark():
"""Test MediaPipe 2D hand landmark TF Lite model."""
# MediaPipe 2D hand landmark TF
tflite_model_file = download_testdata(
"https://github.com/google/mediapipe/raw/v0.7.4/mediapipe/models/hand_landmark.tflite",
"hand_landmark.tflite",
)
with open(tflite_model_file, "rb") as f:
tflite_model_buf = f.read()
data = np.random.uniform(size=(1, 256, 256, 3)).astype("float32")
tflite_output = run_tflite_graph(tflite_model_buf, data)
tvm_output = run_tvm_graph(tflite_model_buf, data, "input_1", num_output=2)
for i in range(2):
tvm.testing.assert_allclose(
np.squeeze(tvm_output[i]), np.squeeze(tflite_output[i]), rtol=1e-5, atol=1e-5
)
#######################################################################
# Test check for Tensorflow "dynamic range quantization" optimization
# --------------
def test_prevent_tensorflow_dynamic_range():
"""
Should prevent running "dynamic range quantization" optimized TFLite graph
"""
data_array = np.random.randint(0, 2, (1, 1024, 1024)).astype(dtype=np.float32)
filter_array = np.random.randint(0, 2, (1024, 1024)).astype(dtype=np.float32)
data_in = tf.keras.layers.Input(shape=data_array.shape[1:])
dense = tf.keras.layers.Dense(units=filter_array.shape[-1], use_bias=False)(data_in)
keras_model = tf.keras.models.Model(data_in, dense)
keras_model.layers[1].set_weights([filter_array])
converter = interpreter_wrapper.TFLiteConverter.from_keras_model(keras_model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
tflite_model = converter.convert()
with pytest.raises(tvm.error.OpNotImplemented):
_ = run_tvm_graph(tflite_model, data_array, data_in.name.replace(":0", ""))
def _test_nms_v5(
bx_shape, score_shape, iou_threshold, score_threshold, max_output_size, dtype="float32"
):
"""One iteration of nms_v5 with given attributes"""
boxes = np.random.uniform(0, 10, size=bx_shape).astype(dtype)
scores = np.random.uniform(size=score_shape).astype(dtype)
tf.reset_default_graph()
tf.compat.v1.disable_eager_execution()
in_data_1 = array_ops.placeholder(dtype, boxes.shape, name="in_data_1")
in_data_2 = array_ops.placeholder(dtype, scores.shape, name="in_data_2")
out = image_ops.non_max_suppression_with_scores(
boxes=in_data_1,
scores=in_data_2,
max_output_size=max_output_size,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
name="nms",
)
compare_tflite_with_tvm(
[boxes, scores],
["in_data_1:0", "in_data_2:0"],
[in_data_1, in_data_2],
[out[0], out[1]],
out_names=[out[0].name, out[1].name],
experimental_new_converter=True,
)
def test_forward_nms_v5():
"""test nms_v5"""
_test_nms_v5((10000, 4), (10000,), 0.5, 0.4, 100)
_test_nms_v5((1000, 4), (1000,), 0.7, 0.3, 50)
#######################################################################
# Test structural_equal and span of a model
# --------------------------------------
def test_structure_and_span():
"""Test Structure and span of frequently-used models"""
def _verify(res_fptr, golden_fptr):
with tvm.testing.enable_span_filling():
with_span = res_fptr()
with tvm.testing.disable_span_filling():
without_span = res_fptr()
assert tvm.ir.structural_equal(with_span, without_span)
_verify_structural_equal_with_span(with_span, golden_fptr())
def _tf_to_tflite(
input_tensors, output_tensors, init_global_variables=False, experimental_new_converter=False
):
with tf.Session() as sess:
if init_global_variables:
sess.run(variables.global_variables_initializer())
converter = tf.lite.TFLiteConverter.from_session(sess, input_tensors, output_tensors)
converter.experimental_new_converter = experimental_new_converter
tflite_model_buffer = converter.convert()
try:
import tflite.Model
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buffer, 0)
except AttributeError:
import tflite
tflite_model = tflite.Model.GetRootAsModel(tflite_model_buffer, 0)
except ImportError:
raise ImportError("The tflite package must be installed")
return tflite_model
def _test_conv2d_bias_add_span():
def _res():
in_shape = (1, 5, 5, 1)
kernel_shpae = (2, 2, 1, 2)
kernel_in = np.ones(kernel_shpae)
with tf.Graph().as_default():
x = array_ops.placeholder(shape=in_shape, dtype="float32", name="input")
kernel = tf.constant(kernel_in, dtype=tf.float32, name="filter_weight")
tf_model = tf.nn.conv2d(
x, kernel, strides=[1, 1, 1, 1], padding="VALID", name="conv2d"
)
tflite_model = _tf_to_tflite([x], [tf_model])
mod, _ = relay.frontend.from_tflite(
tflite_model,
shape_dict={"input": in_shape},
dtype_dict={"input": "float32"},
op_converter=relay.frontend.tflite.OperatorConverter,
)
return mod["main"]
def _golden():
in_input = relay.var(
"input", relay.TensorType([1, 5, 5, 1]), span=_create_span("input")
)
weight = relay.var(
"_param_1", relay.TensorType([2, 2, 1, 2]), span=_create_span("filter_weight")
)
bias = relay.var("_param_2", relay.TensorType([2]), span=_create_span("conv2d_bias"))
conv2d = _set_span(
relay.nn.conv2d(
in_input,
weight,
channels=2,
kernel_size=[2, 2],
data_layout="NHWC",
kernel_layout="HWIO",
),
"conv2d",
)
bias_add = _set_span(relay.nn.bias_add(conv2d, bias, axis=3), "conv2d")
attrs = ir.make_node("DictAttrs", **{"output_tensor_names": ["conv2d"]})
func = relay.Function([in_input, weight, bias], bias_add, attrs=attrs)
mod = ir.IRModule.from_expr(func)
return mod["main"]
_verify(_res, _golden)
def _test_fully_connected_bias_add_span():
def _res():
in_shape = (1, 10)
kernel_shpae = (10, 10)
kernel_in = np.ones(kernel_shpae)
with tf.Graph().as_default():
x = array_ops.placeholder(shape=in_shape, dtype="float32", name="input")
weight = tf.constant(kernel_in, dtype=tf.float32, name="filter_weight")
tf_model = math_ops.mat_mul(x, weight, name="dense")
tflite_model = _tf_to_tflite([x], [tf_model])
mod, _ = relay.frontend.from_tflite(
tflite_model,
shape_dict={"input": in_shape},
dtype_dict={"input": "float32"},
op_converter=relay.frontend.tflite.OperatorConverter,
)
return mod["main"]
def _golden():
in_input = relay.var("input", relay.TensorType([1, 10]), span=_create_span("input"))
weight = relay.var(
"_param_1", relay.TensorType([10, 10]), span=_create_span("filter_weight/transpose")
)
bias = relay.var("_param_2", relay.TensorType([10]), span=_create_span("dense_bias"))
reshape = _set_span(relay.reshape(in_input, [-1, 10]), "dense")
dense = _set_span(relay.nn.dense(reshape, weight, units=10), "dense")
bias_add = _set_span(relay.nn.bias_add(dense, bias), "dense")
attrs = ir.make_node("DictAttrs", **{"output_tensor_names": ["dense"]})
func = relay.Function([in_input, weight, bias], bias_add, attrs=attrs)
mod = ir.IRModule.from_expr(func)
return mod["main"]
_verify(_res, _golden)
def _test_reshape_span():
def _res():
in_shape = (1, 10)
output_shape = (2, 5)
with tf.Graph().as_default():
x = array_ops.placeholder(shape=in_shape, dtype="float32", name="input")
tf_model = array_ops.reshape(x, output_shape, "reshape")
tflite_model = _tf_to_tflite([x], [tf_model])
mod, _ = relay.frontend.from_tflite(
tflite_model,
shape_dict={"input": in_shape},
dtype_dict={"input": "float32"},
op_converter=relay.frontend.tflite.OperatorConverter,
)
return mod["main"]
def _golden():
in_input = relay.var("input", relay.TensorType([1, 10]), span=_create_span("input"))
reshape = _set_span(relay.reshape(in_input, [2, 5]), "reshape")
attrs = ir.make_node("DictAttrs", **{"output_tensor_names": ["reshape"]})
func = relay.Function([in_input], reshape, attrs=attrs)
mod = ir.IRModule.from_expr(func)
return mod["main"]
_verify(_res, _golden)
_test_conv2d_bias_add_span()
_test_fully_connected_bias_add_span()
_test_reshape_span()
class TestConv2d:
"""Import Conv2d operator from TFLite, build with Relay and test."""
input_shape, kernel_shape, padding = tvm.testing.parameters(
((1, 128, 256, 6), (5, 5, 6, 10), "SAME"),
((1, 128, 256, 6), (5, 5, 6, 10), "VALID"),
# conv2d_group cases
((1, 30, 40, 6), (5, 5, 1, 6), "SAME"),
((1, 30, 40, 6), (5, 5, 1, 6), "VALID"),
)
def test_conv2d(self, input_shape: tuple, kernel_shape: tuple, padding: str):
dtype = tf.float32
kernel_in = np.ones(kernel_shape)
with tf.Graph().as_default():
x = array_ops.placeholder(shape=input_shape, dtype=dtype.name, name="input")
kernel = tf.constant(kernel_in, dtype=dtype, name="filter_weight")
out = tf.nn.conv2d(x, kernel, strides=[1, 1, 1, 1], padding=padding, name="conv2d")
input_data = np.random.randn(*input_shape).astype(dtype.name)
compare_tflite_with_tvm(
[input_data],
["input"],
[x],
[out],
)
if __name__ == "__main__":
tvm.testing.main()
| 197,489 | 34.822601 | 100 | py |
tvm | tvm-main/tests/python/frontend/coreml/test_forward.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
CoreML testcases
====================
This article is a test script to test CoreML operator with Relay.
"""
from os import path
from enum import Enum
import tempfile
import numpy as np
import model_zoo
import coremltools as cm
from coremltools.models.neural_network import NeuralNetworkBuilder
from coremltools.models import datatypes
from tensorflow import keras
import tvm
import tvm.topi.testing
import tvm.testing
from tvm.contrib import graph_executor
from tvm.topi.testing import conv2d_nchw_python
from tvm import relay
def get_tvm_output(
func, x, params, target, device, out_shape=(1, 1000), input_name="image", dtype="float32"
):
"""Generic function to execute and get tvm output"""
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(func, target, params=params)
m = graph_executor.GraphModule(lib["default"](device))
# set inputs
m.set_input(input_name, tvm.nd.array(x.astype(dtype)))
m.run()
# get outputs
out = m.get_output(0, tvm.nd.empty(out_shape, dtype))
return out.numpy()
def run_model_checkonly(model_file, model_name="", input_name="image"):
model = cm.models.MLModel(model_file)
x = model_zoo.get_cat_image()
shape_dict = {input_name: x.shape}
# Some Relay passes change operators on the fly. Ensuring that we generate
# new graph for each target.
for target, dev in tvm.testing.enabled_targets():
mod, params = relay.frontend.from_coreml(model, shape_dict)
tvm_output = get_tvm_output(mod["main"], x, params, target, dev)
print(target, dev, model_name, "prediction id: ", np.argmax(tvm_output.flat))
@tvm.testing.uses_gpu
def test_mobilenet_checkonly():
model_file = model_zoo.get_mobilenet()
run_model_checkonly(model_file, "mobilenet")
@tvm.testing.uses_gpu
def test_resnet50_checkonly():
model_file = model_zoo.get_resnet50()
run_model_checkonly(model_file, "resnet50")
def run_tvm_graph(
coreml_model, target, device, input_data, input_name, output_shape, output_dtype="float32"
):
"""Generic function to compile on relay and execute on tvm"""
if isinstance(input_data, list):
shape_dict = {}
dtype_dict = {}
for i, inp in enumerate(input_name):
shape_dict[inp] = input_data[i].shape
dtype_dict[inp] = input_data[i].dtype
else:
shape_dict = {input_name: input_data.shape}
dtype_dict = {input_name: input_data.dtype}
mod, params = relay.frontend.from_coreml(coreml_model, shape_dict)
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target, params=params)
m = graph_executor.GraphModule(lib["default"](device))
# set inputs
if isinstance(input_data, list):
for i, inp in enumerate(input_name):
m.set_input(inp, tvm.nd.array(input_data[i].astype(input_data[i].dtype)))
else:
m.set_input(input_name, tvm.nd.array(input_data.astype(input_data.dtype)))
# execute
m.run()
# get outputs
if isinstance(output_shape, list) and isinstance(output_dtype, list):
tvm_output_list = []
for i, s in enumerate(output_shape):
tvm_output = m.get_output(i, tvm.nd.empty((s), output_dtype[i]))
tvm_output_list.append(tvm_output.numpy())
return tvm_output_list
else:
if not output_shape:
tvm_output = m.get_output(0)
else:
tvm_output = m.get_output(0, tvm.nd.empty((output_shape), output_dtype))
return tvm_output.numpy()
def verify_add_layer_params(input_dim, alpha=2):
"""Verify add layer params"""
dtype = "float32"
a_np1 = np.random.uniform(size=input_dim).astype(dtype)
a_np2 = np.random.uniform(size=input_dim).astype(dtype)
b_np = np.add(a_np1, a_np2) + alpha
inputs = [("input1", datatypes.Array(*input_dim)), ("input2", datatypes.Array(*input_dim))]
output = [("output", datatypes.Array(*b_np.shape))]
builder = NeuralNetworkBuilder(inputs, output)
builder.add_elementwise(
name="Add", alpha=alpha, input_names=["input1", "input2"], output_name="output", mode="ADD"
)
model = cm.models.MLModel(builder.spec)
for target, dev in tvm.testing.enabled_targets():
out = run_tvm_graph(
model, target, dev, [a_np1, a_np2], ["input1", "input2"], b_np.shape, dtype
)
tvm.testing.assert_allclose(out, b_np, rtol=1e-5)
@tvm.testing.uses_gpu
def test_forward_add_layer_params():
verify_add_layer_params((1, 2, 2), 0)
verify_add_layer_params((1, 2, 2), 1)
verify_add_layer_params((1, 3, 3), 2)
def verify_multiply_layer_params(input_dim, alpha):
"""Verify multiply layer params"""
dtype = "float32"
a_np1 = np.random.uniform(size=input_dim).astype(dtype)
a_np2 = np.random.uniform(size=input_dim).astype(dtype)
b_np = np.multiply(a_np1, a_np2) * alpha
inputs = [("input1", datatypes.Array(*input_dim)), ("input2", datatypes.Array(*input_dim))]
output = [("output", datatypes.Array(*b_np.shape))]
builder = NeuralNetworkBuilder(inputs, output)
builder.add_elementwise(
name="Mul",
alpha=alpha,
input_names=["input1", "input2"],
output_name="output",
mode="MULTIPLY",
)
model = cm.models.MLModel(builder.spec)
for target, dev in tvm.testing.enabled_targets():
out = run_tvm_graph(
model, target, dev, [a_np1, a_np2], ["input1", "input2"], b_np.shape, dtype
)
tvm.testing.assert_allclose(out, b_np, rtol=1e-5)
@tvm.testing.uses_gpu
def test_forward_multiply_layer_params():
verify_multiply_layer_params((1, 2, 2), 0)
verify_multiply_layer_params((1, 2, 2), 1)
verify_multiply_layer_params((1, 3, 3), 2)
def verify_concat_layer_params(input1_dim, input2_dim):
"""Verify concat layer params"""
dtype = "float32"
a_np1 = np.random.uniform(size=input1_dim).astype(dtype)
a_np2 = np.random.uniform(size=input2_dim).astype(dtype)
b_np = np.concatenate((a_np1, a_np2), axis=1)
inputs = [("input1", datatypes.Array(*input1_dim)), ("input2", datatypes.Array(*input2_dim))]
output = [("output", datatypes.Array(*b_np.shape))] # pylint:disable=not-an-iterable
builder = NeuralNetworkBuilder(inputs, output)
builder.add_elementwise(
name="Concate", input_names=["input1", "input2"], output_name="output", mode="CONCAT"
)
model = cm.models.MLModel(builder.spec)
for target, dev in tvm.testing.enabled_targets():
out = run_tvm_graph(
model, target, dev, [a_np1, a_np2], ["input1", "input2"], b_np.shape, dtype
)
tvm.testing.assert_allclose(out, b_np, rtol=1e-5)
@tvm.testing.uses_gpu
def test_forward_concat_layer_params():
verify_concat_layer_params((1, 1, 2, 2), (1, 2, 2, 2))
verify_concat_layer_params((1, 2, 4, 4), (1, 3, 4, 4))
def _verify_upsample_layer_params(input_dim, scale, mode):
dtype = "float32"
a_np = np.full(input_dim, 1, dtype=dtype)
if mode == "NN":
method = "nearest_neighbor"
coord_trans = "asymmetric"
else:
method = "linear"
coord_trans = "align_corners"
b_np = tvm.topi.testing.resize2d_python(a_np, (scale, scale), "NCHW", method, coord_trans)
input_data = [("input", datatypes.Array(*input_dim))]
output = [("output", datatypes.Array(*b_np.shape))]
builder = NeuralNetworkBuilder(input_data, output)
builder.add_upsample(
name="Upsample",
scaling_factor_h=scale,
scaling_factor_w=scale,
mode=mode,
input_name="input",
output_name="output",
)
model = cm.models.MLModel(builder.spec)
for target, dev in tvm.testing.enabled_targets():
out = run_tvm_graph(model, target, dev, a_np, "input", b_np.shape, dtype)
tvm.testing.assert_allclose(out, b_np, rtol=1e-5)
@tvm.testing.uses_gpu
def test_forward_upsample_layer_params():
"""Upsample Layer Params"""
_verify_upsample_layer_params((1, 16, 32, 32), 2, "NN")
_verify_upsample_layer_params((1, 4, 6, 6), 3, "BILINEAR")
def _verify_l2_normalize(input_dim, eps):
dtype = "float32"
a_np = np.random.uniform(size=input_dim).astype(dtype)
b_np = tvm.topi.testing.l2_normalize_python(a_np, eps, 1)
input_data = [("input", datatypes.Array(*input_dim))]
output = [("output", datatypes.Array(*b_np.shape))]
builder = NeuralNetworkBuilder(input_data, output)
builder.add_l2_normalize(name="L2", epsilon=eps, input_name="input", output_name="output")
model = cm.models.MLModel(builder.spec)
for target, dev in tvm.testing.enabled_targets():
out = run_tvm_graph(model, target, dev, a_np, "input", b_np.shape, dtype)
tvm.testing.assert_allclose(out, b_np, rtol=1e-5)
@tvm.testing.uses_gpu
def test_forward_l2_normalize():
_verify_l2_normalize((1, 3, 20, 20), 0.001)
def _verify_lrn(input_dim, size, bias, alpha, beta):
dtype = "float32"
axis = 1
a_np = np.random.uniform(size=input_dim).astype(dtype)
b_np = tvm.topi.testing.lrn_python(a_np, size, axis, bias, alpha, beta)
input_data = [("input", datatypes.Array(*input_dim))]
output = [("output", datatypes.Array(*b_np.shape))]
builder = NeuralNetworkBuilder(input_data, output)
builder.add_lrn(
name="LRN",
input_name="input",
output_name="output",
alpha=alpha,
beta=beta,
k=bias,
local_size=size,
)
model = cm.models.MLModel(builder.spec)
for target, dev in tvm.testing.enabled_targets():
out = run_tvm_graph(model, target, dev, a_np, "input", b_np.shape, dtype)
tvm.testing.assert_allclose(out, b_np, rtol=1e-5)
@tvm.testing.uses_gpu
def test_forward_lrn():
_verify_lrn((1, 3, 10, 20), 3, 1.0, 1.0, 0.5)
def _verify_average(input_dim1, input_dim2, axis=0):
dtype = "float32"
a_np1 = np.random.uniform(size=input_dim1).astype(dtype)
a_np2 = np.random.uniform(size=input_dim2).astype(dtype)
b_np = np.mean((a_np1, a_np2), axis=axis)
inputs = [("input1", datatypes.Array(*input_dim1)), ("input2", datatypes.Array(*input_dim2))]
output = [("output", datatypes.Array(*b_np.shape))]
builder = NeuralNetworkBuilder(inputs, output)
builder.add_elementwise(
name="MEAN", input_names=["input1", "input2"], output_name="output", mode="AVE"
)
model = cm.models.MLModel(builder.spec)
for target, dev in tvm.testing.enabled_targets():
out = run_tvm_graph(
model, target, dev, [a_np1, a_np2], ["input1", "input2"], b_np.shape, dtype
)
tvm.testing.assert_allclose(out, b_np, rtol=1e-5)
@tvm.testing.uses_gpu
def test_forward_average():
_verify_average((1, 3, 20, 20), (1, 3, 20, 20))
_verify_average((3, 20, 20), (1, 3, 20, 20))
_verify_average((20, 20), (1, 3, 20, 20))
def _verify_max(input_dim):
dtype = "float32"
a_np1 = np.random.uniform(size=input_dim).astype(dtype)
a_np2 = np.random.uniform(size=input_dim).astype(dtype)
a_np3 = np.random.uniform(size=input_dim).astype(dtype)
b_np = np.max((a_np1, a_np2, a_np3), axis=0)
inputs = [
("input1", datatypes.Array(*input_dim)),
("input2", datatypes.Array(*input_dim)),
("input3", datatypes.Array(*input_dim)),
]
output = [("output", datatypes.Array(*b_np.shape))]
builder = NeuralNetworkBuilder(inputs, output)
builder.add_elementwise(
name="Max", input_names=["input1", "input2", "input3"], output_name="output", mode="MAX"
)
model = cm.models.MLModel(builder.spec)
for target, dev in tvm.testing.enabled_targets():
out = run_tvm_graph(
model,
target,
dev,
[a_np1, a_np2, a_np3],
["input1", "input2", "input3"],
b_np.shape,
dtype,
)
tvm.testing.assert_allclose(out, b_np, rtol=1e-5)
@tvm.testing.uses_gpu
def test_forward_max():
_verify_max((1, 3, 20, 20))
_verify_max((20, 20))
def _verify_min(input_dim):
dtype = "float32"
a_np1 = np.random.uniform(size=input_dim).astype(dtype)
a_np2 = np.random.uniform(size=input_dim).astype(dtype)
a_np3 = np.random.uniform(size=input_dim).astype(dtype)
b_np = np.min((a_np1, a_np2, a_np3), axis=0)
inputs = [
("input1", datatypes.Array(*input_dim)),
("input2", datatypes.Array(*input_dim)),
("input3", datatypes.Array(*input_dim)),
]
output = [("output", datatypes.Array(*b_np.shape))]
builder = NeuralNetworkBuilder(inputs, output)
builder.add_elementwise(
name="Min", input_names=["input1", "input2", "input3"], output_name="output", mode="MIN"
)
model = cm.models.MLModel(builder.spec)
for target, dev in tvm.testing.enabled_targets():
out = run_tvm_graph(
model,
target,
dev,
[a_np1, a_np2, a_np3],
["input1", "input2", "input3"],
b_np.shape,
dtype,
)
tvm.testing.assert_allclose(out, b_np, rtol=1e-5)
@tvm.testing.uses_gpu
def test_forward_min():
_verify_min((1, 3, 20, 20))
_verify_min((20, 20))
def verify_unary_sqrt(input_dim):
"""Verify unary sqrt"""
dtype = "float32"
a_np = np.random.uniform(size=input_dim).astype(dtype)
ref_val = np.sqrt(a_np)
inputs = [("input", datatypes.Array(*input_dim))]
output = [("output", datatypes.Array(*ref_val.shape))]
builder = NeuralNetworkBuilder(inputs, output)
builder.add_unary(name="sqrt", input_name="input", output_name="output", mode="sqrt")
model = cm.models.MLModel(builder.spec)
for target, dev in tvm.testing.enabled_targets():
out = run_tvm_graph(model, target, dev, [a_np], ["input"], ref_val.shape, dtype)
tvm.testing.assert_allclose(out, ref_val, rtol=1e-5)
def verify_unary_rsqrt(input_dim, epsilon=0):
"""Verify unary rsqrt"""
dtype = "float32"
a_np = np.random.uniform(size=input_dim).astype(dtype)
ref_val = 1 / np.sqrt(a_np + epsilon)
inputs = [("input", datatypes.Array(*input_dim))]
output = [("output", datatypes.Array(*ref_val.shape))]
builder = NeuralNetworkBuilder(inputs, output)
builder.add_unary(
name="rsqrt", input_name="input", output_name="output", mode="rsqrt", epsilon=epsilon
)
model = cm.models.MLModel(builder.spec)
for target, dev in tvm.testing.enabled_targets():
out = run_tvm_graph(model, target, dev, [a_np], ["input"], ref_val.shape, dtype)
tvm.testing.assert_allclose(out, ref_val, rtol=1e-5)
def verify_unary_inverse(input_dim, epsilon=0):
"""Verify unary inverse"""
dtype = "float32"
a_np = np.random.uniform(size=input_dim).astype(dtype)
ref_val = 1 / (a_np + epsilon)
inputs = [("input", datatypes.Array(*input_dim))]
output = [("output", datatypes.Array(*ref_val.shape))]
builder = NeuralNetworkBuilder(inputs, output)
builder.add_unary(
name="inverse", input_name="input", output_name="output", mode="inverse", epsilon=epsilon
)
model = cm.models.MLModel(builder.spec)
for target, dev in tvm.testing.enabled_targets():
out = run_tvm_graph(model, target, dev, [a_np], ["input"], ref_val.shape, dtype)
tvm.testing.assert_allclose(out, ref_val, rtol=1e-5)
def verify_unary_power(input_dim, alpha):
"""Verify unary power"""
dtype = "float32"
a_np = np.random.uniform(size=input_dim).astype(dtype)
ref_val = np.power(a_np, alpha)
inputs = [("input", datatypes.Array(*input_dim))]
output = [("output", datatypes.Array(*ref_val.shape))]
builder = NeuralNetworkBuilder(inputs, output)
builder.add_unary(
name="power", input_name="input", output_name="output", mode="power", alpha=alpha
)
model = cm.models.MLModel(builder.spec)
for target, dev in tvm.testing.enabled_targets():
out = run_tvm_graph(model, target, dev, [a_np], ["input"], ref_val.shape, dtype)
tvm.testing.assert_allclose(out, ref_val, rtol=1e-5)
def verify_unary_exp(input_dim):
"""Verify unary exp"""
dtype = "float32"
a_np = np.random.uniform(size=input_dim).astype(dtype)
ref_val = np.exp(a_np)
inputs = [("input", datatypes.Array(*input_dim))]
output = [("output", datatypes.Array(*ref_val.shape))]
builder = NeuralNetworkBuilder(inputs, output)
builder.add_unary(name="exp", input_name="input", output_name="output", mode="exp")
model = cm.models.MLModel(builder.spec)
for target, dev in tvm.testing.enabled_targets():
out = run_tvm_graph(model, target, dev, [a_np], ["input"], ref_val.shape, dtype)
tvm.testing.assert_allclose(out, ref_val, rtol=1e-5)
def verify_unary_log(input_dim):
"""Verify unary log"""
dtype = "float32"
a_np = np.random.uniform(size=input_dim).astype(dtype)
ref_val = np.log(a_np)
inputs = [("input", datatypes.Array(*input_dim))]
output = [("output", datatypes.Array(*ref_val.shape))]
builder = NeuralNetworkBuilder(inputs, output)
builder.add_unary(name="log", input_name="input", output_name="output", mode="log")
model = cm.models.MLModel(builder.spec)
for target, dev in tvm.testing.enabled_targets():
out = run_tvm_graph(model, target, dev, [a_np], ["input"], ref_val.shape, dtype)
tvm.testing.assert_allclose(out, ref_val, rtol=1e-5)
def verify_unary_abs(input_dim):
"""Verify unary abs"""
dtype = "float32"
a_np = np.random.uniform(-100.0, 100.0, size=input_dim).astype(dtype)
ref_val = np.abs(a_np)
inputs = [("input", datatypes.Array(*input_dim))]
output = [("output", datatypes.Array(*ref_val.shape))]
builder = NeuralNetworkBuilder(inputs, output)
builder.add_unary(name="abs", input_name="input", output_name="output", mode="abs")
model = cm.models.MLModel(builder.spec)
for target, dev in tvm.testing.enabled_targets():
out = run_tvm_graph(model, target, dev, [a_np], ["input"], ref_val.shape, dtype)
tvm.testing.assert_allclose(out, ref_val, rtol=1e-5)
def verify_unary_threshold(input_dim, alpha):
"""Verify unary threshold"""
dtype = "float32"
a_np = np.random.uniform(-100.0, 100.0, size=input_dim).astype(dtype)
ref_val = np.maximum(a_np, alpha)
inputs = [("input", datatypes.Array(*input_dim))]
output = [("output", datatypes.Array(*ref_val.shape))]
builder = NeuralNetworkBuilder(inputs, output)
builder.add_unary(
name="threshold", input_name="input", output_name="output", mode="threshold", alpha=alpha
)
model = cm.models.MLModel(builder.spec)
for target, dev in tvm.testing.enabled_targets():
out = run_tvm_graph(model, target, dev, [a_np], ["input"], ref_val.shape, dtype)
tvm.testing.assert_allclose(out, ref_val, rtol=1e-5)
@tvm.testing.uses_gpu
def test_forward_unary():
"""All unary"""
verify_unary_sqrt((1, 3, 20, 20))
verify_unary_rsqrt((1, 3, 20, 20))
verify_unary_rsqrt((1, 3, 20, 20), epsilon=1e-6)
verify_unary_inverse((1, 3, 20, 20))
verify_unary_inverse((1, 3, 20, 20), epsilon=1e-6)
verify_unary_power((1, 3, 20, 20), alpha=0.5)
verify_unary_power((1, 3, 20, 20), alpha=4)
verify_unary_exp((1, 3, 20, 20))
verify_unary_log((1, 3, 20, 20))
verify_unary_abs((1, 3, 20, 20))
verify_unary_threshold((1, 3, 20, 20), alpha=-6.0)
verify_unary_threshold((1, 3, 20, 20), alpha=5.0)
@tvm.testing.uses_gpu
def test_forward_reduce():
"""Reduce"""
class ReduceAxis(Enum):
# pylint: disable=invalid-name
CHW = 0
HW = 1
C = 2
H = 3
W = 4
def _verify_reduce(input_dim, mode, axis, ref_func, dtype="float32"):
print(input_dim, mode, axis)
a_np = np.random.uniform(size=input_dim).astype(dtype)
# translate to axis from coreml format
if axis == ReduceAxis.CHW:
np_axis = (-3, -2, -1)
elif axis == ReduceAxis.HW:
np_axis = (-2, -1)
elif axis == ReduceAxis.C:
np_axis = -3
elif axis == ReduceAxis.H:
np_axis = -2
elif axis == ReduceAxis.W:
np_axis = -1
if ref_func is np.argmax:
ref_val = np.expand_dims(ref_func(a_np, np_axis), np_axis).astype(dtype)
else:
ref_val = ref_func(a_np, np_axis, keepdims=True)
inputs = [("input", datatypes.Array(*input_dim))]
output = [("output", datatypes.Array(*ref_val.shape))]
builder = NeuralNetworkBuilder(inputs, output)
builder.add_reduce(
name=mode, input_name="input", output_name="output", axis=axis.name, mode=mode
)
model = cm.models.MLModel(builder.spec)
for target, dev in tvm.testing.enabled_targets():
out = run_tvm_graph(model, target, dev, [a_np], ["input"], ref_val.shape, dtype)
tvm.testing.assert_allclose(out, ref_val, rtol=1e-5, atol=1e-5)
dshapes = [[10, 10], [1, 10, 10], [1, 3, 10, 10]]
for dshape in dshapes:
for axis in ReduceAxis:
if len(dshape) < 3 and axis in [ReduceAxis.CHW, ReduceAxis.C]:
# input must have rank at least 3
continue
_verify_reduce(dshape, "sum", axis, np.sum)
_verify_reduce(dshape, "avg", axis, np.mean)
_verify_reduce(dshape, "prod", axis, np.prod)
_verify_reduce(dshape, "min", axis, np.min)
_verify_reduce(dshape, "max", axis, np.max)
if axis in [ReduceAxis.C, ReduceAxis.H, ReduceAxis.W]:
# For mode ArgMax, axis must be [-1] or [-2] or [-3]
_verify_reduce(dshape, "argmax", axis, np.argmax, dtype="int32")
def verify_reshape(input_dim, target_shape, mode):
"""Reshape"""
dtype = "float32"
a_np = np.random.uniform(-100.0, 100.0, size=input_dim).astype(dtype)
ref_val = np.reshape(a_np, target_shape)
inputs = [("input", datatypes.Array(*input_dim))]
output = [("output", datatypes.Array(*ref_val.shape))]
builder = NeuralNetworkBuilder(inputs, output)
builder.add_reshape(
name="reshape",
input_name="input",
output_name="output",
target_shape=target_shape,
mode=mode,
)
model = cm.models.MLModel(builder.spec)
for target, dev in tvm.testing.enabled_targets():
out = run_tvm_graph(model, target, dev, [a_np], ["input"], ref_val.shape, dtype)
tvm.testing.assert_allclose(out, ref_val, rtol=1e-5)
def test_forward_reshape():
for mode in [0, 1]:
verify_reshape((20,), (1, 2, 2, 5), mode)
verify_reshape((1, 3, 20, 20), (1, 12, 10, 10), mode)
def _verify_split(input_dim, out_nums):
dtype = "float32"
a_np = np.random.uniform(-100.0, 100.0, size=input_dim).astype(dtype)
ref_val = np.split(a_np, out_nums, axis=-3)
inputs = [("input", datatypes.Array(*input_dim))]
output_names = []
outputs = []
output_shapes = []
for i, out in enumerate(ref_val):
output_name = "output" + str(i)
output_names = output_names + [output_name]
outputs = outputs + [(output_name, datatypes.Array(*out.shape))]
output_shapes = output_shapes + [out.shape]
builder = NeuralNetworkBuilder(inputs, outputs)
builder.add_split(name="split", input_name="input", output_names=output_names)
model = cm.models.MLModel(builder.spec)
for target, dev in tvm.testing.enabled_targets():
out = run_tvm_graph(
model, target, dev, [a_np], ["input"], output_shapes, [dtype] * len(output_shapes)
)
tvm.testing.assert_allclose(out, ref_val, rtol=1e-5)
def test_forward_split():
"""Split"""
_verify_split(
(
1,
4,
4,
4,
),
2,
)
_verify_split(
(
1,
3,
30,
20,
),
3,
)
def verify_image_scaler(input_dim, blue_bias=0.0, green_bias=0.0, red_bias=0.0, image_scale=1.0):
"""Verify image scaler"""
dtype = "float32"
a_np = np.random.uniform(size=input_dim).astype(dtype)
# make sure it is valid image format CHW.
assert len(a_np.shape) == 3 and a_np.shape[0] == 3
b_np = np.zeros(a_np.shape, dtype=dtype)
b_np[0, :, :] = image_scale * a_np[0, :, :] + blue_bias
b_np[1, :, :] = image_scale * a_np[1, :, :] + green_bias
b_np[2, :, :] = image_scale * a_np[2, :, :] + red_bias
b_np = np.add(a_np, b_np)
inputs = [("input1", datatypes.Array(*input_dim)), ("input2", datatypes.Array(*input_dim))]
output = [("output", datatypes.Array(*b_np.shape))]
builder = NeuralNetworkBuilder(inputs, output)
builder.set_pre_processing_parameters(
image_input_names=["input1"],
is_bgr=True,
blue_bias=blue_bias,
green_bias=green_bias,
red_bias=red_bias,
image_scale=image_scale,
)
# add one add layer to make CoreML model format valid
# add layer has been tested before.
builder.add_elementwise(
name="add", input_names=["input1", "input2"], output_name="output", alpha=0, mode="ADD"
)
model = cm.models.MLModel(builder.spec)
for target, dev in tvm.testing.enabled_targets():
out = run_tvm_graph(
model, target, dev, [a_np, a_np], ["input1", "input2"], b_np.shape, dtype
)
tvm.testing.assert_allclose(out, b_np, rtol=1e-5)
@tvm.testing.uses_gpu
def test_forward_image_scaler():
verify_image_scaler((3, 224, 224), image_scale=0.17)
verify_image_scaler(
(3, 224, 224),
blue_bias=-1.7669800519943237,
green_bias=-1.985260009765625,
red_bias=-2.102560043334961,
image_scale=0.379,
)
def verify_convolution(input_dim, filter_, padding):
"""Verify convolution"""
dtype = "float32"
_, c, h, width = input_dim
out_c, _, kernel_h, kernel_w = filter_
a_np = np.random.uniform(size=input_dim).astype(dtype)
w_np = np.random.uniform(size=(out_c, c, kernel_h, kernel_w)).astype(dtype)
w_np_cm = np.transpose(w_np, axes=(2, 3, 1, 0))
b_np = conv2d_nchw_python(a_np, w_np, [1, 1], padding)
inputs = [("input1", datatypes.Array(c, h, width))]
output = [("output", datatypes.Array(*b_np.shape))] # pylint:disable=not-an-iterable
builder = NeuralNetworkBuilder(inputs, output)
builder.add_convolution(
name="conv",
kernel_channels=3,
output_channels=out_c,
height=kernel_h,
width=kernel_w,
stride_height=1,
stride_width=1,
border_mode=padding.lower(),
groups=1,
W=w_np_cm,
b=None,
has_bias=False,
is_deconv=False,
input_name="input1",
output_name="output",
)
model = cm.models.MLModel(builder.spec)
for target, dev in tvm.testing.enabled_targets():
out = run_tvm_graph(model, target, dev, [a_np], ["input1"], output_shape=None)
tvm.testing.assert_allclose(out, b_np, rtol=1e-5)
@tvm.testing.uses_gpu
def test_forward_convolution():
verify_convolution((1, 3, 224, 224), filter_=(32, 3, 3, 3), padding="VALID")
verify_convolution((1, 3, 224, 224), filter_=(32, 3, 3, 3), padding="SAME")
def test_can_build_keras_to_coreml_to_relay():
"""Test multiple conversion paths and importing from a saved file."""
model = keras.models.Sequential()
model.add(
keras.layers.Conv2D(
filters=6,
kernel_size=(1, 1),
activation="relu",
padding="same",
input_shape=(3, 3, 1),
data_format="channels_first",
)
)
with tempfile.TemporaryDirectory() as tmpdir:
kmodel_fn = path.join(tmpdir, "c1mdl.h5")
model.save(kmodel_fn)
mdl = cm.convert(kmodel_fn)
model_file = path.join(tmpdir, "c1.mlmodel")
mdl.save(model_file)
mdl = cm.models.MLModel(model_file)
desc = mdl.get_spec().description
iname = desc.input[0].name
ishape = desc.input[0].type.multiArrayType.shape
shape_dict = {}
for i in mdl.get_spec().description.input:
iname = i.name
ishape = i.type.multiArrayType.shape
shape_dict[iname] = ishape
mod, params = relay.frontend.from_coreml(mdl, shape_dict)
with tvm.transform.PassContext(opt_level=3):
relay.build(mod, "llvm", params=params)
if __name__ == "__main__":
tvm.testing.main()
| 29,386 | 33.695396 | 99 | py |
tvm | tvm-main/tests/python/frontend/onnx/test_forward.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument
"""
ONNX testcases
================
This article is a test script to test ONNX operator with Relay.
"""
import glob
import os
import platform
import re
import copy
import tempfile
import pytest
import scipy
import numpy as np
import tvm
import tvm.testing
import tvm.topi.testing
from tvm import relay
from tvm.contrib import graph_executor, utils
from tvm.relay.frontend.common import infer_type
from tvm.relay.build_module import bind_params_by_name
from relay.utils.tag_span import _create_span, _set_span, _verify_structural_equal_with_span
import onnx
import onnxruntime.backend
from onnx import TensorProto, helper, mapping, numpy_helper
from onnxruntime.quantization import CalibrationDataReader, quantize_static
import torch
import torchvision
from torch.nn import Linear, Module, Sequential
def get_input_data_shape_dict(graph_def, input_data):
"""Get input data shape"""
if isinstance(input_data, list):
input_names = {}
shape_dict = {}
for i, _ in enumerate(input_data):
input_names[i] = graph_def.graph.input[i].name
input_ = input_data[i]
if input_ is None or not hasattr(input_, "shape") or input_.shape == ():
# Skip adding input shape data when the input data is None;
# This is to enable optional arguments for onnx operators.
continue
elif isinstance(input_, list):
shape_dict[input_names[i]] = (len(input_),)
else:
shape_dict[input_names[i]] = input_.shape
else:
input_names = graph_def.graph.input[0].name
shape_dict = {input_names: input_data.shape}
return input_names, shape_dict
def get_tvm_output_with_vm(
graph_def,
input_data,
target,
dev,
opset=None,
freeze_params=False,
convert_config=None,
validate_structural_equal=True,
):
"""Generic function to execute and get tvm output with vm executor"""
if not isinstance(input_data, list):
input_data = [input_data]
_, shape_dict = get_input_data_shape_dict(graph_def, input_data)
with tvm.testing.disable_span_filling():
mod, params = relay.frontend.from_onnx(
graph_def,
shape_dict,
opset=opset,
freeze_params=freeze_params,
convert_config=convert_config,
)
# handle the bfloat16 so we explicitly allocate
# bfloat16 arrays as input
for i, param in enumerate(mod["main"].params):
if param.type_annotation.dtype == "bfloat16":
input_data[i] = tvm.nd.empty(input_data[i].shape, "bfloat16").copyfrom(
input_data[i]
)
if validate_structural_equal:
with tvm.testing.enable_span_filling():
mod_with_span, _ = relay.frontend.from_onnx(
graph_def,
shape_dict,
opset=opset,
freeze_params=freeze_params,
convert_config=convert_config,
)
assert tvm.ir.structural_equal(mod, mod_with_span)
result = relay.create_executor("vm", mod=mod, device=dev, target=target).evaluate()(
*input_data, **params
)
if isinstance(result, tvm.runtime.NDArray):
return result.numpy()
return [r.numpy() for r in result]
def get_tvm_output(
graph_def,
input_data,
target,
dev,
output_shape=None,
output_dtype="float32",
opset=None,
opt_level=1,
convert_config=None,
):
"""Generic function to execute and get tvm output"""
# TODO: Resolve the issues and remove the following lines
input_names, shape_dict = get_input_data_shape_dict(graph_def, input_data)
mod, params = relay.frontend.from_onnx(
graph_def, shape_dict, opset=opset, convert_config=convert_config
)
with tvm.transform.PassContext(opt_level=opt_level):
graph, lib, params = relay.build(mod, target, params=params)
m = graph_executor.create(graph, lib, dev)
# set inputs
if isinstance(input_data, list):
for i, _ in enumerate(input_names):
# Its possible for some onnx inputs to not be needed in the tvm
# module, confirm its present before setting.
# pylint: disable=unnecessary-list-index-lookup
m.set_input(input_names[i], tvm.nd.array(input_data[i].astype(input_data[i].dtype)))
else:
m.set_input(input_names, tvm.nd.array(input_data.astype(input_data.dtype)))
m.set_input(**params)
# execute
m.run()
# get outputs
if isinstance(output_shape, list):
tvm_output_list = []
for i, _ in enumerate(output_shape):
tvm_output = m.get_output(i)
tvm_output_list.append(tvm_output.numpy())
return tvm_output_list
else:
tvm_output = m.get_output(0)
return tvm_output.numpy()
def get_onnxruntime_output(model, inputs):
"""Generic function to generate onnxruntime output"""
rep = onnxruntime.backend.prepare(model.SerializeToString(), "CPU")
if isinstance(inputs, list) and len(inputs) == 1:
inp = inputs[0]
else:
inp = inputs
output = rep.run(inp)
# Unpack output if there's only a single value.
if len(output) == 1:
output = output[0]
return output
def verify_with_ort_with_inputs(
model,
inputs,
out_shape=None,
target=None,
dev=None,
use_vm=False,
opset=None,
freeze_params=False,
dtype="float32",
rtol=1e-5,
atol=1e-5,
apply_softmax=False,
opt_level=1,
convert_config=None,
):
"""verify_with_ort_with_inputs"""
if opset is not None:
model.opset_import[0].version = opset
ort_out = get_onnxruntime_output(model, inputs)
if use_vm:
tvm_out = get_tvm_output_with_vm(
model,
inputs,
target,
dev,
opset=opset,
freeze_params=freeze_params,
convert_config=convert_config,
)
else:
tvm_out = get_tvm_output(
model,
inputs,
target,
dev,
out_shape,
dtype,
opset=opset,
opt_level=opt_level,
convert_config=convert_config,
)
if not isinstance(tvm_out, list):
tvm_out = [tvm_out]
if not isinstance(ort_out, list):
ort_out = [ort_out]
for tvm_val, ort_val in zip(tvm_out, ort_out):
if apply_softmax:
ort_val = scipy.special.softmax(ort_val)
tvm_val = scipy.special.softmax(tvm_val)
tvm.testing.assert_allclose(ort_val, tvm_val, rtol=rtol, atol=atol)
assert ort_val.dtype == tvm_val.dtype
def verify_with_ort(
model,
input_shapes,
out_shape=None,
target=None,
dev=None,
use_vm=False,
opset=None,
freeze_params=False,
dtype="float32",
rtol=1e-5,
atol=1e-5,
):
"""verify_with_ort"""
inputs = [np.random.uniform(size=ishape).astype(dtype) for ishape in input_shapes]
verify_with_ort_with_inputs(
model,
inputs,
out_shape=out_shape,
target=target,
dev=dev,
use_vm=use_vm,
opset=opset,
freeze_params=freeze_params,
dtype=dtype,
rtol=rtol,
atol=atol,
)
def quantize_and_verify_with_ort(
onnx_model, input_names, input_shapes, target, dev, rtol=1e-5, atol=1e-5
):
"""quantize_and_verify_with_ort"""
input_arrays = [np.random.random(shape).astype("float32") for shape in input_shapes]
class RandomDataReader(CalibrationDataReader):
# pylint: disable=missing-class-docstring
def __init__(self, n=10):
input_dict = dict(zip(input_names, input_shapes))
self.data = iter(
[
{
name: np.random.random(shape).astype("float32")
for name, shape in input_dict.items()
}
for _ in range(n)
]
)
def get_next(self):
return next(self.data, None)
t_dir = tvm.contrib.utils.tempdir()
model_fp32 = os.path.join(t_dir.temp_dir, "model.onnx")
onnx.save_model(onnx_model, model_fp32)
model_quant = os.path.join(t_dir.temp_dir, "model.quant.onnx")
_ = quantize_static( # pylint: disable=assignment-from-no-return
model_fp32, model_quant, RandomDataReader()
)
# opt_level=1 will cause error with qnn lowering
model = onnx.load(model_quant)
verify_with_ort_with_inputs(
model, input_arrays, opt_level=2, target=target, dev=dev, use_vm=True, rtol=rtol, atol=atol
)
def make_constant_node(name, data_type, dims, vals):
return helper.make_node(
"Constant",
inputs=[],
outputs=[name],
value=helper.make_tensor(name=name, data_type=data_type, dims=dims, vals=vals),
)
def is_version_greater_than(ver):
return "".join(re.findall(r"(\d+\.)(\d+\.)(\d)", onnx.__version__)[0]) > "".join(
re.findall(r"(\d+\.)(\d+\.)(\d)", ver)[0]
)
@tvm.testing.parametrize_targets
def test_reshape(target, dev):
"""test_reshape"""
in_shape = (4, 3, 3, 4)
ref_shape = (6, 2, 4, 3)
ref_array = np.array(ref_shape)
ref_node = onnx.helper.make_node(
"Constant",
inputs=[],
outputs=["ref_in"],
value=onnx.helper.make_tensor(
name="const_tensor",
data_type=onnx.TensorProto.INT32,
dims=ref_array.shape,
vals=ref_array.flatten().astype(int),
),
)
reshape_node = helper.make_node("Reshape", ["in", "ref_in"], ["out"])
graph = helper.make_graph(
[ref_node, reshape_node],
"reshape_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(in_shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(ref_shape))],
)
model = helper.make_model(graph, producer_name="reshape_test")
x = np.random.uniform(size=in_shape).astype("int32")
tvm_out = get_tvm_output(model, x, target, dev, ref_shape, "float32")
tvm.testing.assert_allclose(ref_shape, tvm_out.shape)
@tvm.testing.parametrize_targets
def test_double_reshape(target, dev):
"""test_double_reshape"""
in_shape = (4, 3, 3, 4)
ref_shape = (6, 2, 4, 3)
ref_array = np.array(ref_shape)
ref_node = onnx.helper.make_node(
"Constant",
inputs=[],
outputs=["ref_in"],
value=onnx.helper.make_tensor(
name="const_tensor",
data_type=onnx.TensorProto.INT32,
dims=ref_array.shape,
vals=ref_array.flatten().astype(int),
),
)
reshape_node1 = helper.make_node("Reshape", ["in", "ref_in"], ["out1"])
reshape_node2 = helper.make_node("Reshape", ["in", "ref_in"], ["out2"])
add_node = helper.make_node("Add", ["out1", "out2"], ["out"])
graph = helper.make_graph(
[ref_node, reshape_node1, reshape_node2, add_node],
"reshape_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(in_shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(ref_shape))],
)
model = helper.make_model(graph, producer_name="reshape_test")
x = np.random.uniform(size=in_shape).astype("int32")
tvm_out = get_tvm_output(model, x, target, dev, ref_shape, "float32")
tvm.testing.assert_allclose(ref_shape, tvm_out.shape)
@tvm.testing.parametrize_targets
def test_expand(target, dev):
"""test_expand"""
def _test_expand(name, data, shape, ref_data, dtype="int32"):
shape_array = np.array(shape)
if dtype == "int32":
shape_node = onnx.helper.make_node(
"Constant",
inputs=[],
outputs=["shape"],
value=onnx.helper.make_tensor(
name="const_tensor",
data_type=onnx.TensorProto.INT32,
dims=shape_array.shape,
vals=shape_array.flatten().astype("int32"),
),
)
elif dtype == "int64":
shape_node = onnx.helper.make_node(
"Constant",
inputs=[],
outputs=["shape"],
value=onnx.helper.make_tensor(
name="const_tensor",
data_type=onnx.TensorProto.INT64,
dims=shape_array.shape,
vals=shape_array.flatten().astype("int64"),
),
)
else:
raise TypeError("Invalid dtype")
expand_node = helper.make_node("Expand", ["in", "shape"], ["out"])
graph = helper.make_graph(
[shape_node, expand_node],
"expand_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(data.shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(ref_data.shape))],
)
model = helper.make_model(graph, producer_name=name)
tvm_out = get_tvm_output_with_vm(model, data, target, dev, freeze_params=True)
tvm.testing.assert_allclose(ref_data, tvm_out)
in_shape = (3, 1)
shape = (3, 4)
data = np.random.uniform(size=in_shape).astype(np.float32)
ref_data = np.tile(data, 4)
_test_expand("expand_with_dim_unchanged_test", data, shape, ref_data, "int32")
_test_expand("expand_with_dim_unchanged_test", data, shape, ref_data, "int64")
in_shape = (3, 1)
shape = (2, 1, 6)
data = np.random.uniform(size=in_shape).astype(np.float32)
ref_data = data * np.ones(shape, dtype=np.float32)
_test_expand("expand_larger_target_shape_test", data, shape, ref_data, "int32")
_test_expand("expand_larger_target_shape_test", data, shape, ref_data, "int64")
in_shape = (1, 1)
shape = (3,)
data = np.random.uniform(size=in_shape).astype(np.float32)
ref_data = data * np.ones(shape, dtype=np.float32)
_test_expand("expand_smaller_target_shape_test", data, shape, ref_data, "int32")
_test_expand("expand_smaller_target_shape_test", data, shape, ref_data, "int64")
@tvm.testing.parametrize_targets
def test_depth_to_space(target, dev):
"""test_depth_to_space"""
def verify_depth_to_space(inshape, outshape, mode, block_size):
node = onnx.helper.make_node(
"DepthToSpace", inputs=["x"], outputs=["y"], blocksize=block_size
)
graph = helper.make_graph(
[node],
"depth_to_space_test",
inputs=[helper.make_tensor_value_info("x", TensorProto.FLOAT, list(inshape))],
outputs=[helper.make_tensor_value_info("y", TensorProto.FLOAT, list(outshape))],
)
model = helper.make_model(graph, producer_name="depth_to_space_test")
verify_with_ort(model, [inshape], [outshape], target, dev)
# current onnx.checker use OpSet-1 version of DepthToSpace, which doesn't have a mode argument.
# TO-DO, we can add mode argument to test CRD mode and DCR mode
# in the future when we update to a newer onnx version.
verify_depth_to_space((1, 8, 2, 3), (1, 2, 4, 6), mode="CRD", block_size=2)
@tvm.testing.parametrize_targets
def test_space_to_depth(target, dev):
"""test_space_to_depth"""
def verify_space_to_depth(inshape, outshape, block_size):
node = onnx.helper.make_node(
"SpaceToDepth", inputs=["x"], outputs=["y"], blocksize=block_size
)
graph = helper.make_graph(
[node],
"space_to_depth_test",
inputs=[helper.make_tensor_value_info("x", TensorProto.FLOAT, list(inshape))],
outputs=[helper.make_tensor_value_info("y", TensorProto.FLOAT, list(outshape))],
)
model = helper.make_model(graph, producer_name="space_to_depth_test")
verify_with_ort(model, [inshape], [outshape], target, dev)
verify_space_to_depth((1, 1, 4, 6), (1, 4, 2, 3), 2)
@tvm.testing.parametrize_targets
def test_shape(target, dev):
"""test_shape"""
in_shape = (4, 3, 3, 4)
ref_shape = (6, 2, 4, 3)
ref_array = np.array(ref_shape)
ref_node = onnx.helper.make_node(
"Constant",
inputs=[],
outputs=["ref_in"],
value=onnx.helper.make_tensor(
name="const_tensor",
data_type=onnx.TensorProto.INT32,
dims=ref_array.shape,
vals=ref_array.flatten().astype(int),
),
)
reshape_node = helper.make_node("Reshape", ["in", "ref_in"], ["out"])
shape_node = helper.make_node("Shape", ["out"], ["final_out"])
graph = helper.make_graph(
[ref_node, reshape_node, shape_node],
"shape_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(in_shape))],
outputs=[helper.make_tensor_value_info("final_out", TensorProto.FLOAT, list(ref_shape))],
)
model = helper.make_model(graph, producer_name="shape_test")
x = np.random.uniform(size=in_shape).astype("int32")
tvm_out = get_tvm_output(model, x, target, dev, ref_shape, "int32")
tvm.testing.assert_allclose(ref_shape, tvm_out)
@tvm.testing.parametrize_targets
def test_power(target, dev):
"""test_power"""
def _test_power_iteration(x_shape, y_shape):
if isinstance(y_shape, int):
y_shape = [y_shape]
x = np.random.uniform(size=x_shape).astype(np.float32)
y = np.random.uniform(size=y_shape).astype(np.float32)
np_res = np.power(x, y).astype(np.float32)
res = helper.make_node("Pow", ["x", "y"], ["out"])
graph = helper.make_graph(
[res],
"power_test",
inputs=[
helper.make_tensor_value_info("x", TensorProto.FLOAT, list(x_shape)),
helper.make_tensor_value_info("y", TensorProto.FLOAT, list(y_shape)),
],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(np_res.shape))],
)
model = helper.make_model(graph, producer_name="power_test")
tvm_out = get_tvm_output(model, [x, y], target, dev, np_res.shape)
tvm.testing.assert_allclose(np_res, tvm_out, rtol=1e-5, atol=1e-5)
_test_power_iteration((1, 3), (1))
_test_power_iteration((2, 3), (2, 3))
_test_power_iteration((2, 3), (1, 3))
@tvm.testing.parametrize_targets
def test_range(target, dev):
"""test_range"""
def verify_range(start, limit, delta, dtype):
dtype_map = {
"float32": TensorProto.FLOAT,
"int32": TensorProto.INT32,
"int64": TensorProto.INT64,
}
dtype_onnx = dtype_map[dtype]
y = helper.make_node("Range", ["start", "limit", "delta"], ["output"])
graph = helper.make_graph(
[y],
"range_test",
inputs=[
helper.make_tensor_value_info("start", dtype_onnx, []),
helper.make_tensor_value_info("limit", dtype_onnx, []),
helper.make_tensor_value_info("delta", dtype_onnx, []),
],
outputs=[
helper.make_tensor_value_info(
"output", dtype_onnx, np.arange(start, limit, delta).shape
)
],
)
model = helper.make_model(graph, producer_name="range_test")
inputs = [np.array(x).astype(dtype) for x in [start, limit, delta]]
verify_with_ort_with_inputs(model, inputs, target=target, dev=dev, use_vm=True)
for t in ["float32", "int32", "int64"]:
verify_range(0, 10, 1, t)
verify_range(2, 8, 2, t)
verify_range(-3, 6, 4, t)
verify_range(-2, -7, -1, t)
@tvm.testing.parametrize_targets
def test_squeeze(target, dev):
"""test_squeeze"""
def test_squeeze_once(in_shape, out_shape, axes=None):
y = helper.make_node("Squeeze", ["in"], ["out"], axes=axes)
graph = helper.make_graph(
[y],
"squeeze_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(in_shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))],
)
model = helper.make_model(graph, producer_name="squeeze_test")
x = np.random.uniform(size=in_shape).astype("float32")
verify_with_ort_with_inputs(model, [x], [out_shape], target=target, dev=dev, opset=11)
test_squeeze_once((1, 3, 1, 3, 1, 1), (3, 3), [0, 2, 4, 5])
test_squeeze_once((1, 3, 1, 3, 1, 1), (3, 3)) # empty axis.
test_squeeze_once((), ()) # scalar testing.
@tvm.testing.parametrize_targets
def test_flatten(target, dev):
"""test_flatten"""
def verify_flatten(in_shape, axis, ref_shape):
flatten_node = helper.make_node("Flatten", ["in"], ["out"], axis=axis)
graph = helper.make_graph(
[flatten_node],
"flatten_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(in_shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(ref_shape))],
)
model = helper.make_model(graph, producer_name="flatten_test")
verify_with_ort(model, [in_shape], target=target, dev=dev)
verify_flatten((1, 3, 4, 4), 1, (1, 48))
verify_flatten((1,), 1, (1, 1))
@tvm.testing.parametrize_targets
def test_unsqueeze(target, dev):
"""test_unsqueeze"""
in_shape = (3, 3)
axis = (0, 3, 4)
out_shape = (1, 3, 3, 1, 1)
y = helper.make_node("Unsqueeze", ["in"], ["out"], axes=list(axis))
graph = helper.make_graph(
[y],
"squeeze_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(in_shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))],
)
model = helper.make_model(graph, producer_name="squeeze_test")
verify_with_ort(model, [in_shape], target=target, dev=dev, opset=11)
@tvm.testing.parametrize_targets
def test_unsqueeze_with_neg_axes(target, dev):
def verify_unsqueeze_with_neg_axes(opset=11):
in_shape = (2, 3, 4)
axis = (-2, -1)
out_shape = (2, 3, 4, 1, 1)
if opset < 13:
y = helper.make_node("Unsqueeze", ["in"], ["out"], axes=list(axis))
nodes = [y]
else:
axes = np.array(list(axis)).astype(np.int64)
axes = helper.make_node(
"Constant",
inputs=[],
outputs=["axes"],
value=onnx.helper.make_tensor(
name="const_axes",
data_type=onnx.TensorProto.INT64,
dims=axes.shape,
vals=axes.flatten().astype(int),
),
)
y = helper.make_node("Unsqueeze", ["in", "axes"], ["out"])
nodes = [axes, y]
graph = helper.make_graph(
nodes,
"squeeze_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(in_shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))],
)
model = helper.make_model(graph, producer_name="squeeze_test")
verify_with_ort(model, [in_shape], target=target, dev=dev, opset=opset)
verify_unsqueeze_with_neg_axes()
verify_unsqueeze_with_neg_axes(opset=13)
@tvm.testing.parametrize_targets
def test_gather(target, dev):
"""test_gather"""
def verify_gather(in_shape, indices, axis, dtype):
x = np.random.uniform(size=in_shape).astype(dtype)
indices = np.array(indices, dtype="int64")
out_np = np.take(x, indices, axis=axis)
y = helper.make_node("Gather", ["in", "indices"], ["out"], axis=axis)
graph = helper.make_graph(
[y],
"gather_test",
inputs=[
helper.make_tensor_value_info(
"in", mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(dtype)], list(in_shape)
),
helper.make_tensor_value_info("indices", TensorProto.INT64, list(indices.shape)),
],
outputs=[
helper.make_tensor_value_info(
"out", mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(dtype)], list(out_np.shape)
)
],
)
model = helper.make_model(graph, producer_name="gather_test")
verify_with_ort_with_inputs(model, [x, indices], target=target, dev=dev, dtype=dtype)
verify_gather((4,), [1], 0, "int32")
verify_gather((1, 4), [0], 0, "int32")
verify_gather((4,), [[[1, 0], [0, 1]]], 0, "float32")
verify_gather((2, 2), [[[1, 0], [0, 1]]], 1, "int32")
verify_gather((3, 3, 3), [[[1, 0]]], -1, "int32")
verify_gather((4, 3, 5, 6), [[2, 1, 0, 0]], 0, "float32")
@tvm.testing.parametrize_targets
def test_dynamic_gather(target, dev):
"""test_dynamic_gather"""
dtype = "float32"
in_shape = [2, 2]
indices = 1
axis = 1
x = np.random.uniform(size=in_shape).astype(dtype)
indices = np.array(indices, dtype="int64")
out_np = np.take(x, indices, axis=axis)
indices = helper.make_node(
"Constant",
inputs=[],
outputs=["indices"],
value=onnx.helper.make_tensor(
name="const_indices",
data_type=onnx.TensorProto.INT64,
dims=[],
vals=[1],
),
)
y = helper.make_node("Gather", ["in", "indices"], ["out"], axis=axis)
graph = helper.make_graph(
[indices, y],
"gather_test",
inputs=[
helper.make_tensor_value_info(
"in", mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(dtype)], ["?", "?"]
),
],
outputs=[
helper.make_tensor_value_info(
"out", mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(dtype)], ["?"] * len(out_np.shape)
)
],
)
model = helper.make_model(graph, producer_name="dynamic_gather_test")
mod, params = relay.frontend.from_onnx(model)
result = relay.create_executor("vm", mod=mod, device=dev, target=target).evaluate()(x, **params)
tvm.testing.assert_allclose(out_np, result.numpy(), rtol=1e-5, atol=1e-5)
@tvm.testing.parametrize_targets
def test_gatherelements(target, dev):
"""test_gatherelements"""
def verify_gatherelements(in_shape, indices, axis):
x = np.random.uniform(size=in_shape).astype("float32")
indices = np.array(indices, dtype="int32")
y = helper.make_node("GatherElements", ["data", "indices"], ["output"], axis=axis)
graph = helper.make_graph(
[y],
"gather_elements_test",
inputs=[
helper.make_tensor_value_info("data", TensorProto.FLOAT, list(in_shape)),
helper.make_tensor_value_info("indices", TensorProto.INT32, list(indices.shape)),
],
outputs=[helper.make_tensor_value_info("output", TensorProto.FLOAT, list(in_shape))],
)
model = helper.make_model(graph, producer_name="gather_elements_test")
verify_with_ort_with_inputs(model, [x, indices], target=target, dev=dev)
verify_gatherelements((4,), [3, 0, 2, 1], 0)
verify_gatherelements((2, 2), [[1, 0], [0, 1]], 0)
verify_gatherelements((2, 2), [[0, 0], [1, 0]], 1)
verify_gatherelements((2, 2), [[1, 0], [0, 1]], 1)
indices = [
[[1, 0, 0], [1, 0, 1], [0, 1, 1]],
[[1, 1, 1], [1, 2, 1], [1, 0, 1]],
[[1, 2, 1], [1, 2, 1], [1, 2, 1]],
]
verify_gatherelements((3, 3, 3), indices, 2)
@tvm.testing.parametrize_targets
def test_scatter(target, dev):
"""test_scatter"""
def verify_scatter(in_shape, indices, axis):
x = np.random.uniform(size=in_shape).astype("float32")
indices = np.array(indices, dtype="int32")
updates = np.random.uniform(size=indices.shape).astype("float32")
y = helper.make_node("Scatter", ["data", "indices", "updates"], ["output"], axis=axis)
graph = helper.make_graph(
[y],
"scatter_test",
inputs=[
helper.make_tensor_value_info("data", TensorProto.FLOAT, list(in_shape)),
helper.make_tensor_value_info("indices", TensorProto.INT32, list(indices.shape)),
helper.make_tensor_value_info("updates", TensorProto.FLOAT, list(indices.shape)),
],
outputs=[helper.make_tensor_value_info("output", TensorProto.FLOAT, list(in_shape))],
)
model = helper.make_model(graph, producer_name="scatter_test")
# Scatter operator has been supported from version 9 and
# deprecated since version 11 of the default ONNX operator set
verify_with_ort_with_inputs(model, [x, indices, updates], target=target, dev=dev, opset=9)
verify_scatter((4,), [1], 0)
verify_scatter((1, 4), [[0]], 0)
verify_scatter((4,), [2, 3], 0)
verify_scatter((2, 2), [[1, 0], [0, 1]], 1)
verify_scatter((3, 3, 3), [[[-1, -3]]], -1)
verify_scatter((4, 3, 5, 6), [[[[2, 1, 0, 0]]]], 0)
@tvm.testing.parametrize_targets
def test_scatter_elements(target, dev):
"""test_scatter_elements"""
def verify_scatter_elements(in_shape, indices, axis=0, reduction="update"):
x = np.random.uniform(size=in_shape).astype("float32")
indices = np.array(indices, dtype="int32")
updates = np.random.uniform(size=indices.shape).astype("float32")
scatter_elements_node = helper.make_node(
"ScatterElements",
["data", "indices", "updates"],
["output"],
axis=axis,
reduction=reduction,
)
graph = helper.make_graph(
[scatter_elements_node],
"scatter_elements_test",
inputs=[
helper.make_tensor_value_info("data", TensorProto.FLOAT, list(in_shape)),
helper.make_tensor_value_info("indices", TensorProto.INT32, list(indices.shape)),
helper.make_tensor_value_info("updates", TensorProto.FLOAT, list(indices.shape)),
],
outputs=[helper.make_tensor_value_info("output", TensorProto.FLOAT, list(in_shape))],
)
model = helper.make_model(graph, producer_name="scatter_elements_test")
verify_with_ort_with_inputs(model, [x, indices, updates], target=target, dev=dev)
# Usual scatter for 1d input
verify_scatter_elements((4,), [2, 3])
# Usual scatter with specified positive axis
verify_scatter_elements((2, 2), [[1, 0], [0, 1]], 1)
# Usual scatter for 3d input with spicified negative indices and axis
verify_scatter_elements((3, 3, 3), [[[-1, -3]]], -1)
# Usual scatter for 4d input
verify_scatter_elements((4, 3, 5, 6), [[[[2, 1, 0, 0]]]])
# Scatter elements with addition reduction of duplicates
verify_scatter_elements(
(3, 3, 3),
[[[0, 2, 1], [1, 1, 1], [2, 1, 0]], [[0, 2, 1], [1, 1, 1], [2, 1, 0]]],
0,
"add",
)
# Scatter elements with reduction and specified axis
verify_scatter_elements((3, 3, 3), [[[2, 2, 2], [1, 1, 1], [0, 0, 0]]], 2, "add")
# Scatter elements with multiplication reduction of duplicates
verify_scatter_elements(
(3, 3, 3),
[[[0, 2, 1], [1, 1, 1], [2, 1, 0]], [[0, 2, 1], [1, 1, 1], [2, 1, 0]]],
0,
"mul",
)
# TODO(vvchernov): min and max options are supported from 18 version, but CI supports 17 only
# # Scatter elements with min reduction of duplicates
# verify_scatter_elements(
# (3, 3, 3),
# [[[0, 2, 1], [1, 1, 1], [2, 1, 0]], [[0, 2, 1], [1, 1, 1], [2, 1, 0]]],
# 0,
# "min",
# )
# # Scatter elements with max reduction of duplicates
# verify_scatter_elements(
# (3, 3, 3),
# [[[0, 2, 1], [1, 1, 1], [2, 1, 0]], [[0, 2, 1], [1, 1, 1], [2, 1, 0]]],
# 0,
# "max",
# )
@tvm.testing.parametrize_targets
def test_slice(target, dev):
"""test_slice"""
def _test_slice_iteration_v1(indata, outdata, starts, ends, axes=None):
if axes:
y = helper.make_node("Slice", ["in"], ["out"], axes=axes, starts=starts, ends=ends)
else:
y = helper.make_node("Slice", ["in"], ["out"], starts=starts, ends=ends)
graph = helper.make_graph(
[y],
"slice_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(indata.shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(outdata.shape))],
)
model = helper.make_model(graph, producer_name="slice_test")
verify_with_ort_with_inputs(
model, [indata], [outdata.shape], opset=1, target=target, dev=dev
)
def _test_slice_iteration_v10(indata, outdata, **attrs):
starts = attrs["starts"]
ends = attrs["ends"]
axes = None if "axes" not in attrs else attrs["axes"]
steps = None if "steps" not in attrs else attrs["steps"]
starts = np.asarray(starts)
ends = np.asarray(ends)
inputs = [
helper.make_tensor_value_info("data", TensorProto.FLOAT, list(indata.shape)),
helper.make_tensor_value_info("starts", TensorProto.INT64, list(starts.shape)),
helper.make_tensor_value_info("ends", TensorProto.INT64, list(ends.shape)),
]
initializer = [
helper.make_tensor("starts", TensorProto.INT64, list(starts.shape), starts),
helper.make_tensor("ends", TensorProto.INT64, list(ends.shape), ends),
]
nodes = []
if "add_noop_to_input_attrs" in attrs:
def add_noop_to_input_attr(attr_name, attr):
output_name = attr_name + "_output"
ref_shape = list(np.array(attr).shape)
ref_shape.insert(0, 1)
ref_shape = tuple(ref_shape)
ref_array = np.array(ref_shape)
ref_node = onnx.helper.make_node(
"Constant",
inputs=[],
outputs=["ref_in_" + attr_name],
value=onnx.helper.make_tensor(
name="const_tensor__1_" + attr_name,
data_type=onnx.TensorProto.INT64,
dims=ref_array.shape,
vals=ref_array.flatten().astype(int),
),
)
in_shape = np.array(attr).shape
in_array = np.array(in_shape)
ref_node2 = onnx.helper.make_node(
"Constant",
inputs=[],
outputs=["input_shape_" + attr_name],
value=onnx.helper.make_tensor(
name="const_tensor__2_" + attr_name,
data_type=onnx.TensorProto.INT64,
dims=in_array.shape,
vals=in_array.flatten().astype(int),
),
)
reshape1_node = helper.make_node(
"Reshape", [attr_name, "ref_in_" + attr_name], ["reshape_" + attr_name]
)
reshape2_node = helper.make_node(
"Reshape", ["reshape_" + attr_name, "input_shape_" + attr_name], [output_name]
)
return [ref_node, ref_node2, reshape1_node, reshape2_node]
slice_inputs = []
for attr_name in ["starts", "ends", "axes", "steps"]:
if attr_name not in attrs:
continue
if "add_noop_to_input_attrs" in attrs and attr_name in attrs["add_noop_to_input_attrs"]:
nodes.extend(add_noop_to_input_attr(attr_name, attrs[attr_name]))
slice_inputs.append(attr_name + "_output")
else:
slice_inputs.append(attr_name)
if axes:
axes = np.asarray(axes)
inputs.append(
helper.make_tensor_value_info("axes", TensorProto.INT64, list(axes.shape))
)
initializer.append(
helper.make_tensor("axes", TensorProto.INT64, list(axes.shape), axes)
)
if steps:
assert axes is not None and len(axes) == len(steps)
steps = np.asarray(steps)
inputs.append(
helper.make_tensor_value_info("steps", TensorProto.INT64, list(axes.shape))
)
initializer.append(
helper.make_tensor("steps", TensorProto.INT64, list(steps.shape), steps)
)
y = helper.make_node("Slice", ["data", *slice_inputs], ["out"])
nodes.append(y)
graph = helper.make_graph(
nodes,
"slice_test",
inputs=inputs,
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(outdata.shape))],
initializer=initializer,
)
model = helper.make_model(graph, producer_name="slice_test")
verify_with_ort_with_inputs(
model, [indata], opset=10, freeze_params=True, use_vm=True, target=target, dev=dev
)
x = np.random.randn(20, 10, 5).astype(np.float32)
_test_slice_iteration_v1(x, x[0:3, 0:10], starts=(0, 0), ends=(3, 10), axes=(0, 1))
_test_slice_iteration_v1(x, x[0:3, 0:10], starts=(0, 0), ends=(10, 3), axes=(1, 0))
_test_slice_iteration_v1(x, x[:, :, 3:4], starts=(0, 0, 3), ends=(20, 10, 4))
_test_slice_iteration_v1(x, x[:, 1:1000], starts=(1,), ends=(1000,), axes=(1,))
_test_slice_iteration_v1(x, x[:, 0:-1], starts=(0,), ends=(-1,), axes=(1,))
_test_slice_iteration_v10(x, x[0:3, 0:10], starts=(0, 0), ends=(3, 10), axes=(0, 1))
_test_slice_iteration_v10(x, x[0:3, 0:10], starts=(0, 0), ends=(10, 3), axes=(1, 0))
_test_slice_iteration_v10(x, x[:, :, 3:4], starts=(0, 0, 3), ends=(20, 10, 4))
_test_slice_iteration_v10(x, x[:, 1:1000], starts=(1,), ends=(1000,), axes=(1,))
_test_slice_iteration_v10(x, x[:, 0:-1], starts=(0,), ends=(-1,), axes=(1,))
_test_slice_iteration_v10(x, x[:, 0:-1], starts=(0,), ends=(-1,), axes=(-1,))
_test_slice_iteration_v10(
x,
x[0:3, 0:10],
starts=(0, 0),
ends=(3, 10),
axes=(0, 1),
add_noop_to_input_attrs=["starts"],
)
_test_slice_iteration_v10(
x, x[:, :, 3:4], starts=(0, 0, 3), ends=(20, 10, 4), add_noop_to_input_attrs=["ends"]
)
_test_slice_iteration_v10(
x, x[:, 1:1000], starts=(1,), ends=(1000,), axes=(1,), add_noop_to_input_attrs=["axes"]
)
_test_slice_iteration_v10(
x,
x[:, 0:-1],
starts=(0,),
ends=(-1,),
axes=(1,),
add_noop_to_input_attrs=["starts", "ends"],
)
_test_slice_iteration_v10(
x,
x[0:3, 0:10],
starts=(0, 0),
ends=(3, 10),
axes=(0, 1),
add_noop_to_input_attrs=["ends", "axes"],
)
_test_slice_iteration_v10(
x,
x[:, :, 3:4],
starts=(0, 0, 3),
ends=(20, 10, 4),
add_noop_to_input_attrs=["starts", "axes"],
)
_test_slice_iteration_v10(
x,
x[:, 1:1000],
starts=(1,),
ends=(1000,),
axes=(1,),
add_noop_to_input_attrs=["starts", "ends", "axes"],
)
x = np.random.randn(1, 1, 1, 128).astype(np.float32)
_test_slice_iteration_v10(
x, x, starts=(0, 0), ends=(9223372036854775807, 9223372036854775807), axes=(0, 3)
)
x = np.random.randn(4, 4).astype(np.float32)
_test_slice_iteration_v10(
x, x[:, 1::2], starts=(1,), ends=(9223372036854775807,), axes=(1,), steps=(2,)
)
_test_slice_iteration_v10(
x,
x[0::1, 1::2],
starts=(0, 1),
ends=(4, 4),
axes=(0, 1),
steps=(1, 2),
)
def _test_onnx_op_elementwise(
target, dev, inshape, outfunc, npargs, dtype, opname, kwargs, opset=None, verify=True
):
indata = np.random.uniform(-1, 1, size=inshape).astype(dtype)
outdata = outfunc(indata, **npargs)
y = helper.make_node(opname, ["in"], ["out"], **kwargs)
ONNX_DTYPE = mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(dtype)]
graph = helper.make_graph(
[y],
opname + "_test",
inputs=[helper.make_tensor_value_info("in", ONNX_DTYPE, list(indata.shape))],
outputs=[helper.make_tensor_value_info("out", ONNX_DTYPE, list(outdata.shape))],
)
model = helper.make_model(graph, producer_name=opname + "_test")
if verify:
verify_with_ort_with_inputs(
model, [indata], [outdata.shape], opset=opset, dtype=dtype, target=target, dev=dev
)
else:
get_tvm_output(
model,
[indata],
target,
dev,
[outdata.shape],
dtype,
opset=opset,
opt_level=3,
)
@tvm.testing.parametrize_targets
def test_floor(target, dev):
_test_onnx_op_elementwise(target, dev, (2, 4, 5, 6), np.floor, {}, "float32", "Floor", {})
@tvm.testing.parametrize_targets
def test_ceil(target, dev):
_test_onnx_op_elementwise(target, dev, (2, 4, 5, 6), np.ceil, {}, "float32", "Ceil", {})
@tvm.testing.parametrize_targets
def test_clip(target, dev):
"""test_clip"""
_test_onnx_op_elementwise(
target,
dev,
(2, 4, 5, 6),
np.clip,
{"a_min": -1.0, "a_max": 1.0},
"float32",
"Clip",
{"min": -1.0, "max": 1.0},
opset=6,
)
_test_onnx_op_elementwise(
target,
dev,
(2, 4, 5, 6),
np.clip,
{"a_min": -np.inf, "a_max": 1.0},
"float32",
"Clip",
{"max": 1.0},
opset=6,
)
_test_onnx_op_elementwise(
target,
dev,
(2, 4, 5, 6),
np.clip,
{"a_min": -1.0, "a_max": np.inf},
"float32",
"Clip",
{"min": -1.0},
opset=6,
)
@tvm.testing.parametrize_targets
def test_clip_min_max_as_inputs(target, dev):
"""test_clip_min_max_as_inputs"""
input_shape = (2, 4, 5, 6)
nodes = [
make_constant_node("min", onnx.TensorProto.FLOAT, (), [0.0]),
make_constant_node("max", onnx.TensorProto.FLOAT, (), [6.0]),
]
input_names = ["in", "min", "max"]
nodes.append(helper.make_node("Clip", inputs=input_names, outputs=["out"]))
graph = helper.make_graph(
nodes,
"clip_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(input_shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(input_shape))],
)
model = helper.make_model(graph, producer_name="clip_test")
verify_with_ort(model, [input_shape], out_shape=[input_shape], target=target, dev=dev)
@tvm.testing.parametrize_targets
def test_round(target, dev):
_test_onnx_op_elementwise(target, dev, (2, 4, 5, 6), np.round, {}, "float32", "Round", {})
_test_onnx_op_elementwise(
target, dev, (2, 4, 5, 6), np.round, {}, "float64", "Round", {}, verify=False
) # TODO: enable verification once ORT supports float64
def _test_finite_ops(target, dev, inshape, outfunc, npargs, dtype, opname, kwargs):
indata = np.random.choice(a=[np.nan, np.inf, -np.inf, 0.5, 1.0, 0], size=inshape).astype(dtype)
outdata = outfunc(indata, **npargs)
y = helper.make_node(opname, ["in"], ["out"], **kwargs)
graph = helper.make_graph(
[y],
opname + "_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(indata.shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.BOOL, list(outdata.shape))],
)
model = helper.make_model(graph, producer_name=opname + "_test")
verify_with_ort_with_inputs(
model, [indata], [outdata.shape], dtype=dtype, target=target, dev=dev
)
@tvm.testing.parametrize_targets
def test_isinf(target, dev):
_test_finite_ops(target, dev, (2, 4, 5, 6), np.isinf, {}, "float32", "IsInf", {})
@tvm.testing.parametrize_targets
def test_isnan(target, dev):
"""test_isnan"""
_test_finite_ops(target, dev, (2, 4, 5, 6), np.isnan, {}, "float32", "IsNaN", {})
@tvm.testing.parametrize_targets
def test_gather_nd(target, dev):
"""test_gather_nd"""
def verify_gather_nd(in_shape, indices, out_shape, dtype="float32", batch_dims=0, opset=11):
x = np.random.uniform(size=in_shape).astype(dtype)
indices = np.array(indices, dtype="int64")
y = helper.make_node("GatherND", ["in", "indices"], ["out"])
if opset >= 12:
batch_dims_attr = helper.make_attribute("batch_dims", batch_dims)
y.attribute.append(batch_dims_attr)
graph = helper.make_graph(
[y],
"gather_test",
inputs=[
helper.make_tensor_value_info(
"in", mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(dtype)], list(in_shape)
),
helper.make_tensor_value_info("indices", TensorProto.INT64, list(indices.shape)),
],
outputs=[
helper.make_tensor_value_info(
"out", mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(dtype)], list(out_shape)
)
],
)
model = helper.make_model(graph, producer_name="gather_test")
verify_with_ort_with_inputs(
model, [x, indices], [out_shape], opset=opset, target=target, dev=dev
)
verify_gather_nd([2, 2], [[0, 0], [1, 1]], [2], "int32")
verify_gather_nd([2, 2], [[1], [0]], [2, 2])
verify_gather_nd([2, 2, 2], [[0, 1], [1, 0]], [2, 2])
verify_gather_nd([2, 2, 2], [[[0, 1]], [[1, 0]]], [2, 1, 2])
if is_version_greater_than("1.6.0"):
verify_gather_nd([2, 2, 2], [[1], [0]], [2, 2], batch_dims=1, opset=12)
verify_gather_nd(
(3, 2, 2, 3, 4),
np.random.randint(low=0, high=2, size=(3, 2, 3), dtype="int64"),
(3, 2),
batch_dims=2,
opset=12,
)
@tvm.testing.parametrize_targets
def test_onehot(target, dev):
"""test_onehot"""
indices_shape = [10]
indices_array = np.random.randint(low=0, high=9, size=indices_shape, dtype="int32")
depth = 10
values = np.asarray([0, 1]).astype("int32")
out_np = np.eye(depth)[indices_array.reshape(-1)]
onehot_node = helper.make_node("OneHot", ["indices", "depth", "values"], ["out"])
graph = helper.make_graph(
[onehot_node],
"onehot_test",
inputs=[
helper.make_tensor_value_info("indices", TensorProto.INT32, indices_shape),
helper.make_tensor_value_info("depth", TensorProto.INT32, [1]),
helper.make_tensor_value_info("values", TensorProto.INT32, values.shape),
],
outputs=[helper.make_tensor_value_info("out", TensorProto.INT32, out_np.shape)],
)
model = helper.make_model(graph, producer_name="onehot_test")
# TODO(jwfromm): Replace test against np with test against onnxrt once we update versions.
tvm_out = get_tvm_output_with_vm(
model, [indices_array, np.array([depth]).astype("int32"), values], target, dev
)
tvm.testing.assert_allclose(out_np, tvm_out, rtol=1e-5, atol=1e-5)
@tvm.testing.parametrize_targets
def test_gemm(target, dev):
"""test_gemm"""
def verify_gemm(a_shape, b_shape, c_shape=None, freeze_params=False, dtype="float32"):
out_shape = [a_shape[0], b_shape[1]]
a_array = np.random.uniform(size=a_shape).astype(dtype)
b_array = np.random.uniform(size=b_shape).astype(dtype)
input_names = ["a", "b"]
ONNX_DTYPE = mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(dtype)]
input_nodes = [
helper.make_tensor_value_info("a", ONNX_DTYPE, list(a_shape)),
helper.make_tensor_value_info("b", ONNX_DTYPE, list(b_shape)),
]
input_values = [a_array, b_array]
if c_shape is not None:
c_array = np.random.uniform(size=c_shape).astype(dtype)
input_names.append("c")
input_nodes.append(helper.make_tensor_value_info("c", ONNX_DTYPE, list(c_shape)))
input_values.append(c_array)
gemm_node = helper.make_node("Gemm", input_names, ["out"])
graph = helper.make_graph(
[gemm_node],
"gemm_test",
inputs=input_nodes,
outputs=[helper.make_tensor_value_info("out", ONNX_DTYPE, list(out_shape))],
)
model = helper.make_model(graph, producer_name="gemm_test")
atol = 1e-5
rtol = 1e-5
if dtype == "float16":
atol = 1e-3
rtol = 1e-3
verify_with_ort_with_inputs(
model,
input_values,
freeze_params=freeze_params,
dtype=dtype,
atol=atol,
rtol=rtol,
target=target,
dev=dev,
)
verify_gemm(a_shape=(4, 3), b_shape=(3, 4))
verify_gemm(a_shape=(4, 3), b_shape=(3, 4), c_shape=(4,))
verify_gemm(a_shape=(4, 3), b_shape=(3, 4), c_shape=(4,), freeze_params=True)
verify_gemm(a_shape=(4, 3), b_shape=(3, 4), c_shape=(4,), freeze_params=True, dtype="float16")
@tvm.testing.parametrize_targets
def test_matmul(target, dev):
"""test_matmul"""
def test_one_matmul(a_shape, b_shape):
out_shape = np.matmul(np.zeros(a_shape), np.zeros(b_shape)).shape
a_array = np.random.uniform(size=a_shape).astype("float32")
b_array = np.random.uniform(size=b_shape).astype("float32")
mul_node = helper.make_node("MatMul", ["a", "b"], ["out"])
graph = helper.make_graph(
[mul_node],
"matmul_test",
inputs=[
helper.make_tensor_value_info("a", TensorProto.FLOAT, list(a_shape)),
helper.make_tensor_value_info("b", TensorProto.FLOAT, list(b_shape)),
],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))],
)
model = helper.make_model(graph, producer_name="matmul_test")
verify_with_ort_with_inputs(model, [a_array, b_array], target=target, dev=dev)
test_one_matmul((4, 3), (3, 4))
test_one_matmul((3,), (3, 1))
test_one_matmul((1, 3), (3,))
test_one_matmul((3,), (3,))
@tvm.testing.parametrize_targets
def test_batch_matmul(target, dev):
"""test_batch_matmul"""
def verify_batch_matmul(a_shape, b_shape, out_shape, convert_config=None):
a_array = np.random.uniform(size=a_shape).astype("float32")
b_array = np.random.uniform(size=b_shape).astype("float32")
mul_node = helper.make_node("MatMul", ["a", "b"], ["out"])
graph = helper.make_graph(
[mul_node],
"matmul_test",
inputs=[
helper.make_tensor_value_info("a", TensorProto.FLOAT, list(a_shape)),
helper.make_tensor_value_info("b", TensorProto.FLOAT, list(b_shape)),
],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, out_shape)],
)
model = helper.make_model(graph, producer_name="matmul_test")
verify_with_ort_with_inputs(
model,
[a_array, b_array],
use_vm=True,
target=target,
dev=dev,
convert_config=convert_config,
)
verify_batch_matmul((2, 3, 4, 3), (2, 3, 3, 4), (2, 3, 4, 4))
verify_batch_matmul((2, 4, 3), (3, 4), (2, 4, 4))
verify_batch_matmul((2, 3, 4, 3), (3, 4), (2, 3, 4, 4))
# Test implicit broadcasting.
verify_batch_matmul((4, 3), (2, 3, 4), (2, 4, 4))
verify_batch_matmul((2, 4, 3), (1, 3, 4), (2, 4, 4))
verify_batch_matmul((1, 4, 3), (2, 3, 4), (2, 4, 4))
verify_batch_matmul((4, 32, 16), (16, 32), (4, 32, 32))
verify_batch_matmul((4, 32, 16, 32), (32, 16), (4, 32, 16, 16))
verify_batch_matmul((4, 32, 16, 32), (1, 32, 32, 16), (4, 32, 16, 16))
verify_batch_matmul((4, 1, 16, 32), (1, 32, 32, 16), (4, 32, 16, 16))
# Test transb=False
verify_batch_matmul(
(2, 3, 4, 3),
(2, 3, 3, 4),
(2, 3, 4, 4),
convert_config={"use_nt_batch_matmul": False},
)
@tvm.testing.parametrize_targets
def test_use_nt_batch_matmul(target, dev):
"""test_use_nt_batch_matmul"""
a_shape = (2, 3, 4)
b_shape = (2, 4, 3)
out_shape = [2, 3, 3]
a_array = np.random.uniform(size=a_shape).astype("float32")
b_array = np.random.uniform(size=b_shape).astype("float32")
for use_nt_batch_matmul in [True, False]:
mul_node = helper.make_node("MatMul", ["a", "b"], ["out"])
graph = helper.make_graph(
[mul_node],
"matmul_test",
inputs=[
helper.make_tensor_value_info("a", TensorProto.FLOAT, list(a_shape)),
helper.make_tensor_value_info("b", TensorProto.FLOAT, list(b_shape)),
],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))],
)
model = helper.make_model(graph, producer_name="matmul_test")
_, shape_dict = get_input_data_shape_dict(model, [a_array, b_array])
mod, _ = relay.frontend.from_onnx(
model, shape_dict, convert_config={"use_nt_batch_matmul": use_nt_batch_matmul}
)
has_transpose_op = "transpose" in str(mod)
# use_nt_batch_matmul implies, TVM converts qualified onnx `matmul`
# to `transpose(weight) + nn.batch_matmul_NT`, otherwise to `nn.batch_matmul`
assert has_transpose_op == use_nt_batch_matmul
@tvm.testing.parametrize_targets
def test_matmulinteger16(target, dev):
"""test_matmulinteger16"""
def verify_matmulinteger16(a_shape, b_shape, out_shape):
a_dtype = "int16"
b_dtype = "int16"
low = np.iinfo(np.int16).min
high = np.iinfo(np.int16).max
a_proto = TensorProto.INT16
b_proto = TensorProto.INT16
out_proto = TensorProto.INT32
a_array = np.random.randint(low, high, size=a_shape).astype(a_dtype)
b_array = np.random.randint(low, high, size=b_shape).astype(b_dtype)
mul_node = helper.make_node("MatMulInteger16", ["a", "b"], ["out"], domain="com.microsoft")
graph = helper.make_graph(
[mul_node],
"matmuli16_test",
inputs=[
helper.make_tensor_value_info("a", a_proto, list(a_shape)),
helper.make_tensor_value_info("b", b_proto, list(b_shape)),
],
outputs=[helper.make_tensor_value_info("out", out_proto, list(out_shape))],
)
model = helper.make_model(graph, producer_name="matmuli16_test")
verify_with_ort_with_inputs(model, [a_array, b_array], target=target, dev=dev)
# 2D computation to verify matmul op
verify_matmulinteger16((4, 3), (3, 4), (4, 4))
verify_matmulinteger16((5, 7), (7, 8), (5, 8))
# Verify 3D matmul using batch_matmul op
verify_matmulinteger16((2, 4, 3), (1, 3, 4), (2, 4, 4))
verify_matmulinteger16((1, 4, 3), (2, 3, 4), (2, 4, 4))
# Test implicit broadcasting
verify_matmulinteger16((2, 3, 5, 3), (2, 3, 3, 5), (2, 3, 5, 5))
verify_matmulinteger16((2, 7, 3), (3, 7), (2, 7, 7))
verify_matmulinteger16((2, 3, 4, 3), (3, 4), (2, 3, 4, 4))
def verify_simple_dynamic_model(a_shape, b_shape, target, dev):
"""verify_simple_dynamic_model"""
def verify_model(model, a_shape, b_shape):
a_array = np.random.uniform(size=a_shape).astype("float32")
b_array = np.random.uniform(size=b_shape).astype("float32")
# matmul
out_np = np.matmul(a_array, b_array)
# relu
out_np[out_np < 0] = 0
tvm_out = model(a_array, b_array).numpy()
tvm.testing.assert_allclose(out_np, tvm_out, rtol=1e-5, atol=1e-5)
mul_node = helper.make_node("MatMul", ["a", "b"], ["out"])
relu_node = helper.make_node("Relu", ["out"], ["relu"])
a_array = np.random.uniform(size=a_shape).astype("float32")
b_array = np.random.uniform(size=b_shape).astype("float32")
# matmul
out_np = np.matmul(a_array, b_array)
graph = helper.make_graph(
[mul_node, relu_node],
"matmul_test",
inputs=[
helper.make_tensor_value_info("a", TensorProto.FLOAT, list(a_shape)),
helper.make_tensor_value_info("b", TensorProto.FLOAT, list(b_shape)),
],
outputs=[helper.make_tensor_value_info("relu", TensorProto.FLOAT, list(out_np.shape))],
)
model = helper.make_model(graph, producer_name="matmul_test")
a_anys = [relay.Any()] * len(a_shape)
b_anys = [relay.Any()] * len(b_shape)
mod, _ = relay.frontend.from_onnx(model, {"a": a_anys, "b": b_anys})
model = relay.create_executor("vm", mod=mod, device=dev, target=target).evaluate()
verify_model(model, a_shape, b_shape)
verify_model(model, [a * 2 for a in a_shape], [b * 2 for b in b_shape])
verify_model(model, [a * 3 for a in a_shape], [b * 3 for b in b_shape])
# TODO(mbrookhart, electriclilies): Add CUDA as a target once batch matmul is fixed
@tvm.testing.parametrize_targets("llvm")
def test_batch_matmul_dynamic_model(target, dev):
verify_simple_dynamic_model((2, 3, 4, 3), (2, 3, 3, 4), target, dev)
verify_simple_dynamic_model((2, 4, 3), (3, 4), target, dev)
verify_simple_dynamic_model((2, 3, 4, 3), (3, 4), target, dev)
@tvm.testing.parametrize_targets
def test_lrn(target, dev):
"""test_lrn"""
def verify_lrn(shape, nsize, dtype, alpha=None, beta=None, bias=None):
in_array = np.random.uniform(size=shape).astype(dtype)
if alpha is None and beta is None and bias is None:
alpha = 0.0001
beta = 0.75
bias = 1.0
node = onnx.helper.make_node("LRN", inputs=["in"], outputs=["out"], size=nsize)
else:
node = onnx.helper.make_node(
"LRN", inputs=["in"], outputs=["out"], alpha=alpha, beta=beta, bias=bias, size=nsize
)
graph = helper.make_graph(
[node],
"lrn_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(shape))],
)
model = helper.make_model(graph, producer_name="lrn_test")
verify_with_ort_with_inputs(model, [in_array], target=target, dev=dev)
verify_lrn((5, 5, 5, 5), 3, "float32")
verify_lrn((5, 5, 5, 5), 3, "float32", alpha=0.0002, beta=0.5, bias=2.0)
@tvm.testing.parametrize_targets
def test_instance_norm(target, dev):
"""test_instance_norm"""
def verify_instance_norm(shape, axis=1):
x = np.random.randn(*shape).astype(np.float32)
gamma = np.random.randn(shape[1]).astype(np.float32)
beta = np.random.randn(shape[1]).astype(np.float32)
epsilon = 1e-5
node = onnx.helper.make_node(
"InstanceNormalization",
inputs=["x", "gamma", "beta"],
outputs=["y"],
epsilon=epsilon,
)
graph = helper.make_graph(
[node],
"instance_norm_test",
inputs=[
helper.make_tensor_value_info("x", TensorProto.FLOAT, list(shape)),
helper.make_tensor_value_info("gamma", TensorProto.FLOAT, (shape[1],)),
helper.make_tensor_value_info("beta", TensorProto.FLOAT, (shape[1],)),
],
outputs=[helper.make_tensor_value_info("y", TensorProto.FLOAT, list(shape))],
)
model = helper.make_model(graph, producer_name="instance_norm_test")
verify_with_ort_with_inputs(
model, [x, gamma, beta], out_shape=[shape], target=target, dev=dev
)
verify_instance_norm((2, 3, 4, 5))
verify_instance_norm((32, 64, 80, 64))
verify_instance_norm((8, 6, 5))
verify_instance_norm((8, 7, 6, 5, 4))
@tvm.testing.parametrize_targets
def test_upsample_nearest(target, dev):
"""test_upsample_nearest"""
scale = 2
in_shape = (1, 1, 3, 3)
out_shape = (1, 1, 3 * scale, 3 * scale)
y = helper.make_node("Upsample", ["in"], ["out"], mode="nearest", scales=[1.0, 1.0, 2.0, 2.0])
in_array = np.random.uniform(size=in_shape).astype(np.float32)
graph = helper.make_graph(
[y],
"upsample_nearest_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(in_shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))],
)
model = helper.make_model(graph, producer_name="upsample_nearest_test")
verify_with_ort_with_inputs(model, [in_array], [out_shape], opset=7, target=target, dev=dev)
@tvm.testing.parametrize_targets
def test_upsample3d_nearest(target, dev):
"""test_upsample3d_nearest"""
scale = 2
in_shape = (1, 1, 3, 3, 3)
out_shape = (1, 1, 3 * scale, 3 * scale, 3 * scale)
y = helper.make_node(
"Upsample", ["in"], ["out"], mode="nearest", scales=[1.0, 1.0, 2.0, 2.0, 2.0]
)
in_array = np.random.uniform(size=in_shape).astype(np.float32)
graph = helper.make_graph(
[y],
"upsample_nearest_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(in_shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))],
)
model = helper.make_model(graph, producer_name="upsample_nearest_test")
# Upsample is deprecated after opset 9
verify_with_ort_with_inputs(model, [in_array], [out_shape], opset=7, target=target, dev=dev)
@tvm.testing.parametrize_targets
def test_upsample_bilinear(target, dev):
"""test_upsample_bilinear"""
scale = 2
in_shape = (1, 1, 3, 3)
out_shape = (1, 1, 3 * scale, 3 * scale)
y = helper.make_node("Upsample", ["in"], ["out"], mode="linear", scales=[1.0, 1.0, 2.0, 2.0])
in_array = np.random.uniform(size=in_shape).astype(np.float32)
graph = helper.make_graph(
[y],
"upsample_bilinear_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(in_shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))],
)
model = helper.make_model(graph, producer_name="upsample_bilinear_test")
verify_with_ort_with_inputs(model, [in_array], [out_shape], opset=7, target=target, dev=dev)
@tvm.testing.parametrize_targets
def test_upsample3d_trilinear(target, dev):
"""test_upsample3d_trilinear"""
scale = 2
in_shape = (1, 1, 3, 3, 3)
out_shape = (1, 1, 3 * scale, 3 * scale, 3 * scale)
y = helper.make_node("Upsample", ["in", "scales"], ["out"], mode="linear")
scales = [1.0, 1.0, 2.0, 2.0, 2.0]
in_array = np.random.uniform(size=in_shape).astype(np.float32)
out_array = tvm.topi.testing.resize3d_python(
in_array,
(scale, scale, scale),
"NCDHW",
"linear",
coordinate_transformation_mode="asymmetric",
)
ref_array = np.array(scales)
ref_node = helper.make_node(
"Constant",
inputs=[],
outputs=["scales"],
value=onnx.helper.make_tensor(
name="const_tensor",
data_type=TensorProto.FLOAT,
dims=ref_array.shape,
vals=ref_array.flatten().astype(float),
),
)
graph = helper.make_graph(
[ref_node, y],
"upsample_trilinear_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(in_shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))],
)
model = helper.make_model(graph, producer_name="upsample_trilinear_test")
# TODO(jwfromm): Trilinear upsampling not supported in 1.0.0 onnxruntime.
# Replace topi comparison with verify_with_ort once we update.
tvm_out = get_tvm_output(model, in_array, target, dev, out_shape, "float32")
tvm.testing.assert_allclose(out_array, tvm_out, rtol=1e-5, atol=1e-5)
# TODO: Fix softmax with dynamic input on cuda and enable this test
@tvm.testing.known_failing_targets("cuda")
@tvm.testing.parametrize_targets
def test_softmax(target, dev):
"""test_softmax"""
def verify_softmax(inshape, axis, opset=None, dynamic=False):
opname = "Softmax"
outshape = inshape
node_list = []
input_node_list = [helper.make_tensor_value_info("in", TensorProto.FLOAT, list(inshape))]
output_node_list = [helper.make_tensor_value_info("out", TensorProto.FLOAT, list(outshape))]
input_list = [np.random.uniform(size=inshape).astype(np.float32)]
softmax_inputs = ["in"]
if dynamic:
input_node_list.append(
helper.make_tensor_value_info("shape", TensorProto.INT64, [len(inshape)])
)
input_list.append(np.asarray(inshape))
reshape_node = helper.make_node("Reshape", ["in", "shape"], ["dynamic_in"])
softmax_inputs[0] = "dynamic_in"
node_list += [reshape_node]
y = helper.make_node(opname, softmax_inputs, ["out"])
if axis is not None:
axis_attr = helper.make_attribute("axis", axis)
y.attribute.append(axis_attr)
node_list.append(y)
graph = helper.make_graph(
node_list,
opname + "_test",
inputs=input_node_list,
outputs=output_node_list,
)
model = helper.make_model(graph, producer_name=opname + "_test")
verify_with_ort_with_inputs(
model, input_list, use_vm=True, opset=opset, target=target, dev=dev
)
verify_softmax((1, 10), None)
verify_softmax((1, 10), 1)
verify_softmax((1, 2, 3, 10), 0)
verify_softmax((1, 2, 3, 10), 2)
verify_softmax((1, 2, 3, 4, 10), 3)
verify_softmax((1, 2, 3, 4, 10), 4)
verify_softmax((1, 10), -1, dynamic=True)
verify_softmax((1, 2, 3, 10), -1, dynamic=True)
verify_softmax((1, 10), -1, opset=8, dynamic=True)
verify_softmax((1, 2, 3, 10), -1, opset=8, dynamic=True)
@tvm.testing.parametrize_targets
def test_forward_min(target, dev):
"""test_forward_min"""
def verify_min(input_dim):
dtype = "float32"
a_np1 = np.random.uniform(size=input_dim).astype(dtype)
a_np2 = np.random.uniform(size=input_dim).astype(dtype)
a_np3 = np.random.uniform(size=input_dim).astype(dtype)
min_node = helper.make_node("Min", ["a_np1", "a_np2", "a_np3"], ["out"])
graph = helper.make_graph(
[min_node],
"Min_test",
inputs=[
helper.make_tensor_value_info("a_np1", TensorProto.FLOAT, list(input_dim)),
helper.make_tensor_value_info("a_np2", TensorProto.FLOAT, list(input_dim)),
helper.make_tensor_value_info("a_np3", TensorProto.FLOAT, list(input_dim)),
],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(input_dim))],
)
model = helper.make_model(graph, producer_name="Min_test")
verify_with_ort_with_inputs(model, [a_np1, a_np2, a_np3], target=target, dev=dev)
verify_min((1, 3, 20, 20))
verify_min((20, 20))
@tvm.testing.parametrize_targets
def test_forward_max(target, dev):
"""test_forward_max"""
def verify_max(input_dim):
dtype = "float32"
a_np1 = np.random.uniform(size=input_dim).astype(dtype)
a_np2 = np.random.uniform(size=input_dim).astype(dtype)
a_np3 = np.random.uniform(size=input_dim).astype(dtype)
max_node = helper.make_node("Max", ["a_np1", "a_np2", "a_np3"], ["out"])
graph = helper.make_graph(
[max_node],
"Max_test",
inputs=[
helper.make_tensor_value_info("a_np1", TensorProto.FLOAT, list(input_dim)),
helper.make_tensor_value_info("a_np2", TensorProto.FLOAT, list(input_dim)),
helper.make_tensor_value_info("a_np3", TensorProto.FLOAT, list(input_dim)),
],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(input_dim))],
)
model = helper.make_model(graph, producer_name="Max_test")
verify_with_ort_with_inputs(model, [a_np1, a_np2, a_np3], target=target, dev=dev)
verify_max((1, 3, 20, 20))
verify_max((20, 20))
@tvm.testing.parametrize_targets
def test_forward_mean(target, dev):
"""test_forward_mean"""
def verify_mean(input_dim):
dtype = "float32"
a_np1 = np.random.uniform(size=input_dim).astype(dtype)
a_np2 = np.random.uniform(size=input_dim).astype(dtype)
a_np3 = np.random.uniform(size=input_dim).astype(dtype)
mean_node = helper.make_node("Mean", ["a_np1", "a_np2", "a_np3"], ["out"])
graph = helper.make_graph(
[mean_node],
"Mean_test",
inputs=[
helper.make_tensor_value_info("a_np1", TensorProto.FLOAT, list(input_dim)),
helper.make_tensor_value_info("a_np2", TensorProto.FLOAT, list(input_dim)),
helper.make_tensor_value_info("a_np3", TensorProto.FLOAT, list(input_dim)),
],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(input_dim))],
)
model = helper.make_model(graph, producer_name="Mean_test")
verify_with_ort_with_inputs(model, [a_np1, a_np2, a_np3], target=target, dev=dev)
verify_mean((1, 3, 20, 20))
verify_mean((20, 20))
@tvm.testing.parametrize_targets
def test_forward_hardsigmoid(target, dev):
"""test_forward_hardsigmoid"""
def verify_hardsigmoid(input_dim, alpha, beta):
dtype = "float32"
a_np1 = np.random.uniform(size=input_dim).astype(dtype)
hardsigmoid_node = helper.make_node(
"HardSigmoid", ["a_np1"], ["out"], alpha=alpha, beta=beta
)
graph = helper.make_graph(
[hardsigmoid_node],
"HardSigmoid_test",
inputs=[helper.make_tensor_value_info("a_np1", TensorProto.FLOAT, list(input_dim))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(input_dim))],
)
model = helper.make_model(graph, producer_name="HardSigmoid_test")
verify_with_ort_with_inputs(model, [a_np1], target=target, dev=dev)
verify_hardsigmoid((1, 3, 20, 20), 0.5, 0.6)
verify_hardsigmoid((20, 20), 0.3, 0.4)
# TODO (mbrookhart, electriclilies) Fix argmin on GPU and enable this test
@tvm.testing.known_failing_targets("cuda")
@tvm.testing.parametrize_targets
def test_forward_arg_min_max(target, dev):
"""test_forward_arg_min_max"""
def verify_argreduce(input_dim, op_name, axis=None, keepdims=None):
a_np1 = np.random.uniform(-10, 10, input_dim).astype(np.int32)
out_shape = list(a_np1.shape)
def_axis = axis if axis is not None else 0
if keepdims == 1 or keepdims is None:
out_shape[def_axis] = 1
else:
out_shape.pop(def_axis)
node = onnx.helper.make_node(op_name, inputs=["a_np1"], outputs=["out"])
if keepdims is not None:
keepdims_attr = helper.make_attribute("keepdims", keepdims)
node.attribute.append(keepdims_attr)
if axis is not None:
axis_attr = helper.make_attribute("axis", axis)
node.attribute.append(axis_attr)
graph = helper.make_graph(
[node],
"argreduce_test",
inputs=[helper.make_tensor_value_info("a_np1", TensorProto.INT32, list(a_np1.shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.INT64, list(out_shape))],
)
model = helper.make_model(graph, producer_name="argreduce_test")
verify_with_ort_with_inputs(model, [a_np1], target=target, dev=dev)
# Verify argmin and argmax
verify_argreduce([3, 4, 4], "ArgMin")
verify_argreduce([3, 4, 4], "ArgMax")
verify_argreduce([3, 4, 4], "ArgMin", axis=1)
verify_argreduce([3, 4, 4], "ArgMax", axis=0)
verify_argreduce([3, 4, 4], "ArgMin", keepdims=0)
verify_argreduce([3, 4, 4], "ArgMax", keepdims=1)
for axis in [None, 0, 1, 2]:
for keepdims in [None, True, False]:
verify_argreduce([3, 4, 4], "ArgMin", axis, keepdims)
verify_argreduce([3, 4, 4], "ArgMax", axis, keepdims)
@tvm.testing.parametrize_targets
def test_constantofshape(target, dev):
"""test_constantofshape"""
def verify_constantofshape(input_dim, value, dtype):
fill_node = helper.make_node(
"ConstantOfShape",
["input"],
["output"],
value=helper.make_tensor(
"value", mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(dtype)], (1,), (value,)
),
)
inputs = [helper.make_tensor_value_info("input", TensorProto.INT64, [len(input_dim)])]
graph = helper.make_graph(
[fill_node],
"fill_test",
inputs,
outputs=[
helper.make_tensor_value_info(
"output", mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(dtype)], input_dim
)
],
)
model = helper.make_model(graph, producer_name="fill_test")
input_np = np.array(input_dim).astype("int64")
verify_with_ort_with_inputs(model, [input_np], use_vm=True, target=target, dev=dev)
verify_constantofshape((2, 3, 4, 5), 10, "float32")
verify_constantofshape((3, 3), 0, "int32")
verify_constantofshape((1, 2, 3), -1, "float32")
@tvm.testing.parametrize_targets
def test_pad(target, dev):
"""test_pad"""
def verify_pad(indata, pads, mode="constant", value=0.0):
indata = np.array(indata).astype(np.float32)
# numpy expect result
len_dim = len(pads) // 2
np_pads = [(pads[i], pads[i + len_dim]) for i in range(len_dim)]
# onnx graph
if mode in ["edge", "reflect"]:
outdata = np.pad(indata, pad_width=np_pads, mode=mode)
node = helper.make_node(
"Pad",
inputs=["input"],
outputs=["output"],
mode=mode,
pads=pads,
)
else:
outdata = np.pad(indata, pad_width=np_pads, mode="constant", constant_values=value)
node = helper.make_node(
"Pad", inputs=["input"], outputs=["output"], mode="constant", pads=pads, value=value
)
graph = helper.make_graph(
[node],
"pad_test",
inputs=[helper.make_tensor_value_info("input", TensorProto.FLOAT, list(indata.shape))],
outputs=[
helper.make_tensor_value_info("output", TensorProto.FLOAT, list(outdata.shape))
],
)
model = helper.make_model(graph, producer_name="pad_test")
verify_with_ort_with_inputs(
model, [indata], [outdata.shape], dtype="float32", opset=2, target=target, dev=dev
)
def verify_pad_v11(indata, pads, mode="constant", value=0.0):
indata = np.array(indata).astype(np.float32)
# numpy expect result
len_dim = len(pads) // 2
np_pads = [(pads[i], pads[i + len_dim]) for i in range(len_dim)]
pads = np.array(pads)
# onnx graph
if mode in ["edge", "reflect"]:
inputs = [indata]
outdata = np.pad(indata, pad_width=np_pads, mode=mode)
node = helper.make_node("Pad", inputs=["input", "pads"], outputs=["output"], mode=mode)
graph = helper.make_graph(
[node],
"pad_test",
inputs=[
helper.make_tensor_value_info("input", TensorProto.FLOAT, list(indata.shape)),
helper.make_tensor_value_info("pads", TensorProto.INT64, (len(pads),)),
],
initializer=[helper.make_tensor("pads", TensorProto.INT64, (len(pads),), pads)],
outputs=[
helper.make_tensor_value_info("output", TensorProto.FLOAT, list(outdata.shape))
],
)
else:
inputs = [indata]
outdata = np.pad(indata, pad_width=np_pads, mode="constant", constant_values=value)
node = helper.make_node(
"Pad",
inputs=["input", "pads", "constant_value"],
outputs=["output"],
mode="constant",
)
graph = helper.make_graph(
[node],
"pad_test",
inputs=[
helper.make_tensor_value_info("input", TensorProto.FLOAT, list(indata.shape)),
helper.make_tensor_value_info("pads", TensorProto.INT64, (len(pads),)),
helper.make_tensor_value_info("constant_value", TensorProto.FLOAT, (1,)),
],
initializer=[
helper.make_tensor("pads", TensorProto.INT64, (len(pads),), pads),
helper.make_tensor("constant_value", TensorProto.FLOAT, (1,), [value]),
],
outputs=[
helper.make_tensor_value_info("output", TensorProto.FLOAT, list(outdata.shape))
],
)
model = helper.make_model(graph, producer_name="pad_test")
verify_with_ort_with_inputs(model, inputs, opset=11, use_vm=True, target=target, dev=dev)
verify_pad(np.random.randn(2, 2).astype(np.float32), [0, 1, 0, 0], "constant", 0.0)
verify_pad(np.random.randn(2, 3).astype(np.float32), [1, 0, 0, 1], "constant", 0.0)
verify_pad(np.random.randn(3, 2).astype(np.float32), [0, 0, 1, 0], "constant", 5.0)
verify_pad(np.random.randn(1, 3, 4, 5).astype(np.float32), [0, 0, 1, 1, 0, 0, 1, 1], "edge")
verify_pad(np.random.randn(1, 3, 4, 5).astype(np.float32), [0, 0, 1, 1, 0, 0, 1, 1], "reflect")
verify_pad_v11(np.random.randn(2, 2).astype(np.float32), [0, 1, 0, 0], "constant", 0.0)
verify_pad_v11(np.random.randn(2, 3).astype(np.float32), [1, 0, 0, 1], "constant", 0.0)
verify_pad_v11(np.random.randn(3, 2).astype(np.float32), [0, 0, 1, 0], "constant", 5.0)
verify_pad_v11(np.random.randn(1, 3, 4, 5).astype(np.float32), [0, 0, 1, 1, 0, 0, 1, 1], "edge")
verify_pad_v11(
np.random.randn(1, 3, 4, 5).astype(np.float32), [0, 0, 1, 1, 0, 0, 1, 1], "reflect"
)
@tvm.testing.parametrize_targets
def test_all_reduce_funcs(target, dev):
"""test_all_reduce_funcs"""
def verify_reduce_func(func, data, axis, keepdims):
inshape = data.shape
outshape = np.sum(data, axis=axis, keepdims=keepdims == 1).shape
if axis:
node = onnx.helper.make_node(
func, inputs=["x"], outputs=["y"], axes=axis, keepdims=keepdims
)
else:
node = onnx.helper.make_node(func, inputs=["x"], outputs=["y"], keepdims=keepdims)
graph = helper.make_graph(
[node],
"reduce_test",
inputs=[helper.make_tensor_value_info("x", TensorProto.FLOAT, list(inshape))],
outputs=[helper.make_tensor_value_info("y", TensorProto.FLOAT, list(outshape))],
)
model = helper.make_model(graph, producer_name="reduce_test")
verify_with_ort_with_inputs(
model,
[data],
[outshape],
opset=11,
target=target,
dev=dev,
rtol=1e-4,
atol=1e-4,
)
funcs = [
"ReduceMax",
"ReduceMean",
"ReduceMin",
"ReduceProd",
"ReduceSum",
"ReduceSumSquare",
"ReduceLogSum",
"ReduceLogSumExp",
"ReduceL1",
"ReduceL2",
]
for func in funcs:
verify_reduce_func(func, np.array(1.0).astype(np.float32), axis=None, keepdims=False)
for keepdims in [True, False]:
verify_reduce_func(
func, np.random.randn(3, 2, 2).astype(np.float32), axis=None, keepdims=keepdims
)
verify_reduce_func(
func, np.random.randn(3, 2, 3).astype(np.float32), axis=None, keepdims=keepdims
)
verify_reduce_func(
func, np.random.randn(3, 3, 3).astype(np.float32), axis=(1,), keepdims=keepdims
)
verify_reduce_func(
func, np.random.randn(3, 3, 3, 1).astype(np.float32), axis=(1, 2), keepdims=keepdims
)
verify_reduce_func(
func, np.random.randn(3, 3, 3, 1).astype(np.float32), axis=(1,), keepdims=keepdims
)
verify_reduce_func(
func, np.random.randn(1, 3, 4, 1).astype(np.float32), axis=(1,), keepdims=keepdims
)
@tvm.testing.parametrize_targets
def test_split(target, dev):
"""test_split"""
def verify_split(indata, outdatas, split, axis=0, pass_split=True, opset=11):
indata = np.array(indata).astype(np.float32)
outdatas = [np.array(o).astype(np.float32) for o in outdatas]
inputs = [helper.make_tensor_value_info("input", TensorProto.FLOAT, list(indata.shape))]
input_names = ["input"]
initializer = []
if split:
split_index = range(len(split))
else:
split_index = range(len(outdatas))
if pass_split:
if opset >= 13:
input_names.append("split")
np_split = np.array(split).astype(np.int64)
inputs.append(
helper.make_tensor_value_info("split", TensorProto.INT64, list(np_split.shape))
)
# TODO(mbrookhart): Support dynamic split, edit this test case to remove split from
# the initializer and add it back to the input data
indata = [indata] # , np_split]
initializer.append(
helper.make_tensor("split", TensorProto.INT64, list(np_split.shape), np_split)
)
node = helper.make_node(
"Split",
inputs=input_names,
outputs=[f"output_{i}" for i in range(len(split_index))],
axis=axis,
)
if pass_split and opset < 13:
split_attr = helper.make_attribute("split", split)
node.attribute.append(split_attr)
graph = helper.make_graph(
[node],
"split_test",
inputs=inputs,
initializer=initializer,
outputs=[
helper.make_tensor_value_info(
f"output_{i}", TensorProto.FLOAT, list(outdatas[i].shape)
)
for i in range(len(split_index))
],
)
model = helper.make_model(graph, producer_name="split_test")
verify_with_ort_with_inputs(
model,
indata,
out_shape=list(range(len(split_index))),
opset=opset,
target=target,
dev=dev,
use_vm=True,
freeze_params=(opset >= 13),
)
# 1D
verify_split([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], [[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], [2, 2, 2], 0)
verify_split(
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0], [[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], [2, 2, 2], 0, False
)
verify_split([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], [[1.0, 2.0], [3.0], [4.0, 5.0, 6.0]], [2, 1, 3], 0)
verify_split(
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0], [[1.0, 2.0], [3.0], [4.0, 5.0, 6.0]], [2, 1, 3], 0, opset=13
)
# 2D
verify_split(
[[1.0, 2.0, 3.0, 4.0], [7.0, 8.0, 9.0, 10.0]],
[[[1.0, 2.0], [7.0, 8.0]], [[3.0, 4.0], [9.0, 10.0]]],
[2, 2],
1,
)
verify_split(
[[1.0, 2.0, 3.0, 4.0], [7.0, 8.0, 9.0, 10.0]],
[[[1.0, 2.0], [7.0, 8.0]], [[3.0, 4.0], [9.0, 10.0]]],
[2, 2],
1,
opset=13,
)
# Split evenly (unstack)
verify_split([1, 2, 3], [[1], [2], [3]], False, 0, False)
# Split a single value to a single value
verify_split([1], [[1]], [1], pass_split=True)
# Test that the default case modifies nothing when split list has length one
verify_split([[1.0, 2.0]], [[1.0, 2.0]], [2], 1)
verify_split([[1.0, 2.0]], [[1.0, 2.0]], [1], 0)
@tvm.testing.parametrize_targets
def test_binary_ops(target, dev):
"""test_binary_ops"""
in_shape = (1, 2, 3, 3)
dtype = "float32"
out_shape = in_shape
def verify_binary_ops(op, x, y, out_type="float32"):
out = helper.make_node(op, ["in1", "in2"], ["out"])
graph = helper.make_graph(
[out],
"_test",
inputs=[
helper.make_tensor_value_info("in1", TensorProto.FLOAT, x.shape),
helper.make_tensor_value_info("in2", TensorProto.FLOAT, y.shape),
],
outputs=[
helper.make_tensor_value_info(
"out", mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(out_type)], list(out_shape)
)
],
)
model = helper.make_model(graph, producer_name="_test")
verify_with_ort_with_inputs(model, [x, y], target=target, dev=dev)
x = np.random.uniform(size=in_shape).astype(dtype)
y = np.random.uniform(size=in_shape).astype(dtype)
z_array = np.random.uniform(size=(3,)).astype(dtype)
verify_binary_ops("Add", x, y)
verify_binary_ops("Add", x, z_array)
verify_binary_ops("Sub", x, y)
verify_binary_ops("Sub", x, z_array)
verify_binary_ops("Mul", x, y)
verify_binary_ops("Mul", x, z_array)
verify_binary_ops("Div", x, y)
verify_binary_ops("Div", x, z_array)
verify_binary_ops("Sum", x, y)
verify_binary_ops("Sum", x, z_array)
verify_binary_ops("Greater", x, y, "bool")
verify_binary_ops("Greater", x, z_array, "bool")
verify_binary_ops("GreaterOrEqual", x, y, "bool")
verify_binary_ops("GreaterOrEqual", x, z_array, "bool")
verify_binary_ops("Less", x, y, "bool")
verify_binary_ops("Less", x, z_array, "bool")
verify_binary_ops("LessOrEqual", x, y, "bool")
verify_binary_ops("LessOrEqual", x, z_array, "bool")
verify_binary_ops("Equal", x, y, "bool")
verify_binary_ops("Equal", x, z_array, "bool")
@tvm.testing.parametrize_targets
def test_unary_ops(target, dev):
"""test_unary_ops"""
in_shape = (1, 2, 3, 3)
_ = "float32"
out_shape = in_shape
def verify_unary_ops(op, x, rtol=1e-5, atol=1e-5, dtype="float32"):
x = x.astype(dtype)
ONNX_DTYPE = mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(dtype)]
out = helper.make_node(op, ["in1"], ["out"])
graph = helper.make_graph(
[out],
"_test",
inputs=[
helper.make_tensor_value_info("in1", ONNX_DTYPE, list(in_shape)),
],
outputs=[helper.make_tensor_value_info("out", ONNX_DTYPE, list(out_shape))],
)
model = helper.make_model(graph, producer_name="_test")
verify_with_ort_with_inputs(model, [x], rtol=rtol, atol=atol, target=target, dev=dev)
x = np.random.uniform(size=in_shape)
verify_unary_ops("Neg", x)
verify_unary_ops("Abs", x)
verify_unary_ops("Reciprocal", x)
verify_unary_ops("Reciprocal", x, dtype="float16")
verify_unary_ops("Sqrt", x)
verify_unary_ops("Relu", x)
verify_unary_ops("Exp", x)
verify_unary_ops("Log", x)
verify_unary_ops("Log", x)
verify_unary_ops("Acos", x)
verify_unary_ops("Acosh", x)
verify_unary_ops("Asin", x)
verify_unary_ops("Asinh", x)
verify_unary_ops("Atan", x)
verify_unary_ops("Atanh", x)
verify_unary_ops("Cos", x)
verify_unary_ops("Cosh", x)
verify_unary_ops("Sin", x)
verify_unary_ops("Sinh", x)
verify_unary_ops("Tan", x)
verify_unary_ops("Tanh", x)
verify_unary_ops("Sigmoid", x)
verify_unary_ops("Softsign", x)
@tvm.testing.parametrize_targets
def test_leaky_relu(target, dev):
def leaky_relu_x(x, alpha):
return np.where(x >= 0, x, x * alpha)
_test_onnx_op_elementwise(
target,
dev,
(2, 4, 5, 6),
leaky_relu_x,
{"alpha": 0.25},
"float32",
"LeakyRelu",
{"alpha": 0.25},
)
@tvm.testing.parametrize_targets
def test_elu(target, dev):
def elu_x(x, alpha):
return np.where(x > 0, x, alpha * (np.exp(x) - 1.0))
_test_onnx_op_elementwise(
target, dev, (2, 4, 5, 6), elu_x, {"alpha": 0.25}, "float32", "Elu", {"alpha": 0.25}
)
@tvm.testing.parametrize_targets
def test_selu(target, dev):
def selu_x(x, alpha, gamma):
return gamma * np.where(x > 0, x, alpha * (np.exp(x) - 1.0))
_test_onnx_op_elementwise(
target,
dev,
(2, 4, 5, 6),
selu_x,
{"alpha": 0.25, "gamma": 0.3},
"float32",
"Selu",
{"alpha": 0.25, "gamma": 0.3},
)
@tvm.testing.parametrize_targets
def test_prelu(target, dev):
"""test_prelu"""
def verify_prelu(x_shape, a_shape):
node = helper.make_node("PRelu", inputs=["X", "slope"], outputs=["Y"])
graph = helper.make_graph(
[node],
"prelu_test",
inputs=[
helper.make_tensor_value_info("X", TensorProto.FLOAT, list(x_shape)),
helper.make_tensor_value_info("slope", TensorProto.FLOAT, list(a_shape)),
],
outputs=[helper.make_tensor_value_info("Y", TensorProto.FLOAT, list(x_shape))],
)
model = helper.make_model(graph, producer_name="prelu_test")
verify_with_ort(
model,
[x_shape, a_shape],
out_shape=[list(x_shape)],
use_vm=True,
target=target,
dev=dev,
)
verify_prelu([3, 4, 5, 6], [1, 4, 1, 1])
verify_prelu([1, 8, 5, 6], [1, 8, 1, 1])
verify_prelu([2, 12, 16, 16], [1, 12, 1, 1])
verify_prelu([2, 12, 16, 16], [1]) # Test alpha broadcasting.
verify_prelu([3, 1], [3, 1]) # Test non NCHW workload.
@tvm.testing.parametrize_targets
def test_thresholded_relu(target, dev):
def thresholded_relu_x(x, alpha):
out_np = np.clip(x, alpha, np.inf)
out_np[out_np == alpha] = 0
return out_np
_test_onnx_op_elementwise(
target,
dev,
(2, 4, 5, 6),
thresholded_relu_x,
{"alpha": 0.25},
"float32",
"ThresholdedRelu",
{"alpha": 0.25},
)
@tvm.testing.parametrize_targets
def test_logsoftmax(target, dev):
_test_onnx_op_elementwise(
target,
dev,
(1, 4),
tvm.topi.testing.log_softmax_python,
{},
"float32",
"LogSoftmax",
{"axis": 1},
)
def check_torch_conversion(model, input_size, target, dev):
dummy_input = torch.randn(*input_size)
file_name = f"{model.__name__}.onnx"
# Set verbose=True for more output
torch.onnx.export(model(), dummy_input, file_name, export_params=True, verbose=False)
onnx_model = onnx.load(file_name)
input_data = np.random.uniform(size=input_size).astype("float32")
verify_with_ort_with_inputs(
onnx_model, [input_data], apply_softmax=True, target=target, dev=dev
)
@tvm.testing.parametrize_targets
def test_resnet(target, dev):
check_torch_conversion(torchvision.models.resnet18, (1, 3, 224, 224), target, dev)
# check_torch_conversion(torchvision.models.resnet101, (1,3,224,224))
# def test_alexnet():
# Torch's ONNX export does not support the adaptive pooling used by AlexNet?
# check_torch_conversion(torchvision.models.alexnet, (1,3,224,224))
# Torch's ONNX export does not support the adaptive pooling used by vgg16?
# def test_vgg16():
# check_torch_conversion(torchvision.models.vgg16, (1,3,224,224))
# TODO(@jroesch): Update Torch + ONNX to support this import.
# def test_squeezenet():
# # Torch's ONNX export does not support the max pooling used by Squezenet
# check_torch_conversion(torchvision.models.squeezenet1_0, (1,3,224,224))
@tvm.testing.parametrize_targets
def test_densenet(target, dev):
check_torch_conversion(torchvision.models.densenet161, (1, 3, 224, 224), target, dev)
@tvm.testing.parametrize_targets
def test_inception(target, dev):
check_torch_conversion(torchvision.models.inception_v3, (1, 3, 224, 224), target, dev)
# TODO(@jroesch): Update Torch + ONNX to support this import.
# def test_googlenet():
# check_torch_conversion(torchvision.models.googlenet, (1,3,224,224))
# TODO(@jroesch): Update Torch + ONNX to support this import.
# def test_shufflenetv2():
# check_torch_conversion(torchvision.models.shufflenetv2, (1,3,224,224))
@tvm.testing.parametrize_targets
def test_sign(target, dev):
def sign_x(x):
return np.sign(x)
_test_onnx_op_elementwise(target, dev, (3, 4, 5, 6), sign_x, {}, "float32", "Sign", {})
@tvm.testing.parametrize_targets
def test_not(target, dev):
"""test_not"""
def verify_not(indata, dtype):
x = indata.astype(dtype)
node = helper.make_node(
"Not",
inputs=["in"],
outputs=["out"],
)
graph = helper.make_graph(
[node],
"not_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.BOOL, list(x.shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.BOOL, list(x.shape))],
)
model = helper.make_model(graph, producer_name="not_test")
verify_with_ort_with_inputs(model, [x], target=target, dev=dev)
# 2d
verify_not(indata=(np.random.randn(3, 4) > 0), dtype=bool)
# 3d
verify_not(indata=(np.random.randn(3, 4, 5) > 0), dtype=bool)
# 4d
verify_not(indata=(np.random.randn(3, 4, 5, 6) > 0), dtype=bool)
@tvm.testing.parametrize_targets
def test_and(target, dev):
"""test_and"""
def verify_and(indata, dtype):
x = indata[0].astype(dtype)
y = indata[1].astype(dtype)
outdata = np.logical_and(x, y)
node = helper.make_node(
"And",
inputs=["in1", "in2"],
outputs=["out"],
)
graph = helper.make_graph(
[node],
"and_test",
inputs=[
helper.make_tensor_value_info("in1", TensorProto.BOOL, list(x.shape)),
helper.make_tensor_value_info("in2", TensorProto.BOOL, list(y.shape)),
],
outputs=[helper.make_tensor_value_info("out", TensorProto.BOOL, list(outdata.shape))],
)
model = helper.make_model(graph, producer_name="and_test")
verify_with_ort_with_inputs(model, [x, y], [outdata.shape], target=target, dev=dev)
# 2d
x = np.random.randn(3, 4) > 0
y = np.random.randn(3, 4) > 0
verify_and(indata=[x, y], dtype=bool)
# 3d
x = np.random.randn(3, 4, 5) > 0
y = np.random.randn(3, 4, 5) > 0
verify_and(indata=[x, y], dtype=bool)
# 4d
x = np.random.randn(3, 4, 5, 6) > 0
y = np.random.randn(3, 4, 5, 6) > 0
verify_and(indata=[x, y], dtype=bool)
# 3d vs 1d
x = np.random.randn(3, 4, 5) > 0
y = np.random.randn(5) > 0
verify_and(indata=[x, y], dtype=bool)
# 3d vs 2d
x = np.random.randn(3, 4, 5) > 0
y = np.random.randn(4, 5) > 0
verify_and(indata=[x, y], dtype=bool)
@tvm.testing.parametrize_targets
def test_tile(target, dev):
"""test_tile"""
def verify_tile_v6(indata, repeats, outdata):
node = helper.make_node("Tile", inputs=["input", "repeats"], outputs=["out"])
graph = helper.make_graph(
[node],
"tile_test",
inputs=[
helper.make_tensor_value_info("input", TensorProto.FLOAT, list(indata.shape)),
helper.make_tensor_value_info("repeats", TensorProto.INT64, list(repeats.shape)),
],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(outdata.shape))],
)
model = helper.make_model(graph, producer_name="tile_test")
verify_with_ort_with_inputs(
model, [indata, repeats], use_vm=True, opset=6, target=target, dev=dev
)
x = np.random.rand(2, 3, 4, 5).astype(np.float32)
repeats = np.random.randint(low=1, high=10, size=(np.ndim(x),)).astype(np.int64)
z_array = np.tile(x, repeats)
verify_tile_v6(x, repeats, z_array)
@tvm.testing.parametrize_targets
def test_erf(target, dev):
"""test_erf"""
def verify_erf(indata, outdata):
node = helper.make_node("Erf", inputs=["in"], outputs=["out"])
graph = helper.make_graph(
[node],
"erf_test",
inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(indata.shape))],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(outdata.shape))],
)
model = helper.make_model(graph, producer_name="erf_test")
verify_with_ort_with_inputs(model, [indata], [outdata.shape], target=target, dev=dev)
x = np.random.rand(2, 3, 4, 6).astype(np.float32)
z_array = scipy.special.erf(x)
verify_erf(x, z_array)
@tvm.testing.parametrize_targets
def test_where(target, dev):
"""test_where"""
def verify_where(condition, x, y, dtype, outdata, dynamic=False):
node_list = []
where_inputs = ["condition", "x", "y"]
if dynamic:
shape_node = helper.make_node("Shape", ["x"], ["shape"])
reshape_node = helper.make_node("Reshape", ["x", "shape"], ["X"])
where_inputs[1] = "X"
node_list += [shape_node, reshape_node]
node = helper.make_node("Where", inputs=where_inputs, outputs=["out"])
node_list.append(node)
graph = helper.make_graph(
node_list,
"where_test",
inputs=[
helper.make_tensor_value_info("condition", TensorProto.BOOL, list(condition.shape)),
helper.make_tensor_value_info("x", dtype, list(x.shape)),
helper.make_tensor_value_info("y", dtype, list(y.shape)),
],
outputs=[helper.make_tensor_value_info("out", dtype, list(outdata.shape))],
)
model = helper.make_model(graph, producer_name="where_test")
verify_with_ort_with_inputs(
model, [condition, x, y], [outdata.shape], use_vm=True, target=target, dev=dev
)
condition = np.array([[1, 0], [1, 1]], dtype=bool)
x = np.array([[1, 2], [3, 4]], dtype=np.int64)
y = np.array([[9, 8], [7, 6]], dtype=np.int64)
outdata = np.where(condition, x, y)
verify_where(condition, x, y, TensorProto.INT64, outdata)
x = np.array([[1, 2], [3, 4]], dtype=np.float32)
y = np.array([[9, 8], [7, 6]], dtype=np.float32)
outdata = np.where(condition, x, y)
verify_where(condition, x, y, TensorProto.FLOAT, outdata)
x = np.array(1, dtype=np.float32)
y = np.array([2], dtype=np.float32)
outdata = np.where(condition, x, y)
verify_where(condition, x, y, TensorProto.FLOAT, outdata)
x = np.array([2], dtype=np.float32)
y = np.array(1, dtype=np.float32)
outdata = np.where(condition, x, y)
verify_where(condition, x, y, TensorProto.FLOAT, outdata)
condition = np.array(1, dtype=bool)
x = np.array([[1, 2], [3, 4]], dtype=np.float32)
y = np.array([[5, 6], [7, 8]], dtype=np.float32)
outdata = np.where(condition, x, y)
verify_where(condition, x, y, TensorProto.FLOAT, outdata)
x = np.array([[1, 2], [3, 4]], dtype=np.float32)
y = np.array([[1], [7]], dtype=np.float32)
outdata = np.where(condition, x, y)
verify_where(condition, x, y, TensorProto.FLOAT, outdata)
verify_where(condition, x, y, TensorProto.FLOAT, outdata, dynamic=True)
condition = np.random.uniform(size=(3, 1)) < 0.5
x = np.random.uniform(size=2).astype(np.float32)
y = np.random.uniform(size=2).astype(np.float32)
outdata = np.where(condition, x, y)
verify_where(condition, x, y, TensorProto.FLOAT, outdata)
@tvm.testing.parametrize_targets
def test_or(target, dev):
"""test_or"""
def verify_or(indata, dtype):
x = indata[0].astype(dtype)
y = indata[1].astype(dtype)
outdata = np.logical_or(x, y)
node = helper.make_node(
"Or",
inputs=["in1", "in2"],
outputs=["out"],
)
graph = helper.make_graph(
[node],
"or_test",
inputs=[
helper.make_tensor_value_info("in1", TensorProto.BOOL, list(x.shape)),
helper.make_tensor_value_info("in2", TensorProto.BOOL, list(y.shape)),
],
outputs=[helper.make_tensor_value_info("out", TensorProto.BOOL, list(outdata.shape))],
)
model = helper.make_model(graph, producer_name="or_test")
verify_with_ort_with_inputs(model, [x, y], [outdata.shape], target=target, dev=dev)
# 2d
x = np.random.randn(3, 4) > 0
y = np.random.randn(3, 4) > 0
verify_or(indata=[x, y], dtype=bool)
# 3d
x = np.random.randn(3, 4, 5) > 0
y = np.random.randn(3, 4, 5) > 0
verify_or(indata=[x, y], dtype=bool)
# 4d
x = np.random.randn(3, 4, 5, 6) > 0
y = np.random.randn(3, 4, 5, 6) > 0
verify_or(indata=[x, y], dtype=bool)
# 3d vs 1d
x = np.random.randn(3, 4, 5) > 0
y = np.random.randn(5) > 0
verify_or(indata=[x, y], dtype=bool)
# 3d vs 2d
x = np.random.randn(3, 4, 5) > 0
y = np.random.randn(4, 5) > 0
verify_or(indata=[x, y], dtype=bool)
@tvm.testing.parametrize_targets
def test_batch_norm(target, dev):
"""test_batch_norm"""
def verify_batch_norm(in_shape):
batchnorm = onnx.helper.make_node(
"BatchNormalization", inputs=["x", "scale", "B", "mean", "var"], outputs=["Y"]
)
graph = helper.make_graph(
[batchnorm],
"batchnorm_test",
inputs=[
helper.make_tensor_value_info("x", TensorProto.FLOAT, list(in_shape)),
helper.make_tensor_value_info("scale", TensorProto.FLOAT, [in_shape[1]]),
helper.make_tensor_value_info("B", TensorProto.FLOAT, [in_shape[1]]),
helper.make_tensor_value_info("mean", TensorProto.FLOAT, [in_shape[1]]),
helper.make_tensor_value_info("var", TensorProto.FLOAT, [in_shape[1]]),
],
outputs=[helper.make_tensor_value_info("Y", TensorProto.FLOAT, list(in_shape))],
)
model = helper.make_model(graph, producer_name="batchnorm_test")
# X, scale, b, mean, var
inshapes = [in_shape, in_shape[1], in_shape[1], in_shape[1], in_shape[1]]
verify_with_ort(model, inshapes, out_shape=[in_shape], target=target, dev=dev)
verify_batch_norm([1, 3, 224, 224])
verify_batch_norm([1, 3, 24, 24])
verify_batch_norm([16, 3, 24, 24])
verify_batch_norm([16, 16, 24, 24])
verify_batch_norm([16, 16, 10, 10])
@tvm.testing.parametrize_targets
def test_batch_norm_dynamic_subgraph(target, dev):
"""test_batch_norm_dynamic_subgraph"""
def verify_batch_norm_dynamic_subgraph(in_shape, o_shape):
batchnorm = onnx.helper.make_node(
"BatchNormalization", inputs=["x", "scale", "B", "mean", "var"], outputs=["Y"]
)
shape_node = helper.make_node("Shape", ["Y"], ["shape"])
reshape_node = helper.make_node("Reshape", ["in", "shape"], ["out"])
graph = helper.make_graph(
[batchnorm, shape_node, reshape_node],
"batchnorm_test",
inputs=[
helper.make_tensor_value_info("x", TensorProto.FLOAT, list(in_shape)),
helper.make_tensor_value_info("in", TensorProto.FLOAT, list(o_shape)),
helper.make_tensor_value_info("scale", TensorProto.FLOAT, [in_shape[1]]),
helper.make_tensor_value_info("B", TensorProto.FLOAT, [in_shape[1]]),
helper.make_tensor_value_info("mean", TensorProto.FLOAT, [in_shape[1]]),
helper.make_tensor_value_info("var", TensorProto.FLOAT, [in_shape[1]]),
],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(in_shape))],
)
model = helper.make_model(graph, producer_name="batchnorm_test")
# X, inp, scale, b, mean, var
inshapes = [in_shape, o_shape, in_shape[1], in_shape[1], in_shape[1], in_shape[1]]
verify_with_ort(model, inshapes, out_shape=[in_shape], use_vm=True, target=target, dev=dev)
verify_batch_norm_dynamic_subgraph([16, 16, 10, 10], [160, 160])
@tvm.testing.parametrize_targets
def test_conv(target, dev):
"""test_conv"""
def verify_conv(
x_shape,
w_shape,
y_shape,
padding,
kernel_shape,
strides,
dilations,
group=1,
auto_pad="NOTSET",
unset_pad=False,
):
if unset_pad:
node = helper.make_node(
"Conv",
inputs=["x", "W"],
outputs=["y"],
kernel_shape=kernel_shape,
# Default values for other attributes:
strides=strides,
dilations=dilations,
group=group,
)
elif padding is None:
## autopadding with unset default attributes
kwargs = {}
if not all(list(s == 1 for s in strides)):
kwargs["strides"] = strides
if not all(list(d == 1 for d in dilations)):
kwargs["dilations"] = dilations
node = helper.make_node(
"Conv",
inputs=["x", "W"],
outputs=["y"],
# Default values for other attributes:
auto_pad=auto_pad,
group=group,
**kwargs,
)
else:
node = helper.make_node(
"Conv",
inputs=["x", "W"],
outputs=["y"],
kernel_shape=kernel_shape,
# Default values for other attributes:
strides=strides,
dilations=dilations,
group=group,
pads=padding,
)
graph = helper.make_graph(
[node],
"conv_test",
inputs=[
helper.make_tensor_value_info("x", TensorProto.FLOAT, list(x_shape)),
helper.make_tensor_value_info("W", TensorProto.FLOAT, list(w_shape)),
],
outputs=[helper.make_tensor_value_info("y", TensorProto.FLOAT, list(y_shape))],
)
model = helper.make_model(graph, producer_name="conv_test")
verify_with_ort(
model,
[x_shape, w_shape],
[y_shape],
use_vm=True,
target=target,
dev=dev,
)
def repeat(num, dims):
return tuple(num for _ in range(dims))
for dims in [1, 2, 3]:
# Convolution with padding
verify_conv(
(1, 1) + repeat(5, dims),
(1, 1) + repeat(3, dims),
(1, 1) + repeat(5, dims),
2 * repeat(1, dims),
repeat(3, dims),
repeat(1, dims),
repeat(1, dims),
)
# Convolution with asymmetric padding
verify_conv(
(1, 1) + repeat(5, dims),
(1, 1) + repeat(3, dims),
(1, 1) + repeat(4, dims),
repeat(0, dims) + repeat(1, dims),
repeat(3, dims),
repeat(1, dims),
repeat(1, dims),
)
# Convolution without padding
verify_conv(
(1, 1) + repeat(5, dims),
(1, 1) + repeat(3, dims),
(1, 1) + repeat(3, dims),
2 * repeat(0, dims),
repeat(3, dims),
repeat(1, dims),
repeat(1, dims),
)
# Convolution with autopadding
verify_conv(
(1, 1) + repeat(5, dims),
(1, 1) + repeat(3, dims),
(1, 1) + repeat(5, dims),
None,
repeat(3, dims),
repeat(1, dims),
repeat(1, dims),
auto_pad="SAME_UPPER",
)
# Convolution with valid autopadding
verify_conv(
(1, 1) + repeat(5, dims),
(1, 1) + repeat(3, dims),
(1, 1) + repeat(3, dims),
None,
repeat(3, dims),
repeat(1, dims),
repeat(1, dims),
auto_pad="VALID",
)
# Convolution with unset padding
verify_conv(
(1, 1) + repeat(5, dims),
(1, 1) + repeat(3, dims),
(1, 1) + repeat(3, dims),
2 * repeat(0, dims),
repeat(3, dims),
repeat(1, dims),
repeat(1, dims),
True,
)
# Convolution with non uniform stride
verify_conv(
(1, 1) + repeat(5, dims),
(1, 1) + repeat(3, dims),
(1, 1) + repeat(3, dims),
None,
repeat(3, dims),
repeat(2, dims),
repeat(1, dims),
auto_pad="SAME_UPPER",
)
# Convolution with dilation
verify_conv(
(1, 1) + repeat(5, dims),
(1, 1) + repeat(3, dims),
(1, 1) + repeat(5, dims),
2 * repeat(2, dims),
repeat(3, dims),
repeat(1, dims),
repeat(2, dims),
)
# TODO(jwfromm): Merge with other tests once group_conv3d is supported.
for dims in [1, 2, 3]:
# Group Convolution
verify_conv(
(1, 8) + repeat(5, dims),
(8, 1) + repeat(3, dims),
(1, 8) + repeat(5, dims),
2 * repeat(1, dims),
repeat(3, dims),
repeat(1, dims),
repeat(1, dims),
group=8,
)
verify_conv(
(1, 12) + repeat(5, dims),
(30, 4) + repeat(3, dims),
(1, 30) + repeat(5, dims),
2 * repeat(1, dims),
repeat(3, dims),
repeat(1, dims),
repeat(1, dims),
group=3,
)
@tvm.testing.parametrize_targets
def test_convtranspose(target, dev):
"""test_convtranspose"""
def verify_convtranspose_with_output_shape(
x_shape,
w_shape,
output_shape,
kernel_shape,
strides,
dilations,
auto_pad="SAME_UPPER",
group=1,
):
node = helper.make_node(
"ConvTranspose",
inputs=["x", "W"],
outputs=["y"],
kernel_shape=kernel_shape,
# Default values for other attributes:
strides=strides,
dilations=dilations,
output_shape=output_shape,
auto_pad=auto_pad,
)
if group is not None:
group_attr = helper.make_attribute("group", group)
node.attribute.append(group_attr)
graph = helper.make_graph(
[node],
"ConvTranspose_with_output_shape_test",
inputs=[
helper.make_tensor_value_info("x", TensorProto.FLOAT, list(x_shape)),
helper.make_tensor_value_info("W", TensorProto.FLOAT, list(w_shape)),
],
outputs=[
helper.make_tensor_value_info("y", TensorProto.FLOAT, [1, 1] + list(output_shape))
],
)
model = helper.make_model(graph, producer_name="convtranspose_output_shape_test")
verify_with_ort(model, [x_shape, w_shape], use_vm=True, target=target, dev=dev)
def verify_convtranspose_with_padding(
x_shape,
w_shape,
padding,
kernel_shape,
strides,
dilations,
auto_pad="NOTSET",
unset_pad=False,
group=1,
):
node = helper.make_node(
"ConvTranspose",
inputs=["x", "W"],
outputs=["y"],
kernel_shape=kernel_shape,
# Default values for other attributes:
strides=strides,
dilations=dilations,
)
if not unset_pad:
if padding is None:
pad_attr = helper.make_attribute("auto_pad", auto_pad)
else:
pad_attr = helper.make_attribute("pads", padding)
node.attribute.append(pad_attr)
if group is not None:
group_attr = helper.make_attribute("group", group)
node.attribute.append(group_attr)
graph = helper.make_graph(
[node],
"convtranspose_test",
inputs=[
helper.make_tensor_value_info("x", TensorProto.FLOAT, list(x_shape)),
helper.make_tensor_value_info("W", TensorProto.FLOAT, list(w_shape)),
],
outputs=[helper.make_tensor_value_info("y", TensorProto.FLOAT, ["?"] * len(x_shape))],
)
model = helper.make_model(graph, producer_name="convtranspose_pad_test")
verify_with_ort(model, [x_shape, w_shape], use_vm=True, target=target, dev=dev)
def verify_convtranspose(x_shape, w_shape, y_shape, p, group=1):
node = onnx.helper.make_node(
"ConvTranspose",
inputs=["x", "W"],
outputs=["y"],
strides=[3, 2],
kernel_shape=[3, 3],
pads=p,
)
if group is not None:
group_attr = helper.make_attribute("group", group)
node.attribute.append(group_attr)
graph = helper.make_graph(
[node],
"verify_convtranspose_test",
inputs=[
helper.make_tensor_value_info("x", TensorProto.FLOAT, list(x_shape)),
helper.make_tensor_value_info("W", TensorProto.FLOAT, list(w_shape)),
],
outputs=[helper.make_tensor_value_info("y", TensorProto.FLOAT, list(y_shape))],
)
model = helper.make_model(graph, producer_name="convtranspose_test")
verify_with_ort(model, [x_shape, w_shape], y_shape, opset=11, target=target, dev=dev)
# Convolution Transpose with padding
# (1, 1, 3, 3) input tensor
# (1, 2, 3, 3) tensor for convolution weights
# (1, 2, 7, 3) output tensor
# [1, 2, 1, 2] list for pads
verify_convtranspose((1, 1, 3, 3), (1, 2, 3, 3), (1, 2, 7, 3), [1, 2, 1, 2])
# Test undefined groups.
verify_convtranspose((1, 1, 3, 3), (1, 2, 3, 3), (1, 2, 7, 3), [1, 2, 1, 2], group=None)
if "llvm" in target:
# GPU does not support groups != 1 for convtranspose, so only test llvm
# Test depthwise-convolution
verify_convtranspose((1, 10, 3, 3), (10, 1, 3, 3), (1, 10, 7, 3), [1, 2, 1, 2], group=10)
# Test grouped-convolution
verify_convtranspose((1, 10, 3, 3), (10, 1, 3, 3), (1, 5, 7, 3), [1, 2, 1, 2], group=5)
def repeat(num, dims):
return tuple(num for _ in range(dims))
# Once onnxruntime update is complete
for dims in [1, 2, 3]:
# Convolution with padding
verify_convtranspose_with_padding(
(1, 1) + repeat(5, dims),
(1, 1) + repeat(3, dims),
2 * repeat(1, dims),
repeat(3, dims),
repeat(1, dims),
repeat(1, dims),
)
# Convolution without padding
verify_convtranspose_with_padding(
(1, 1) + repeat(5, dims),
(1, 1) + repeat(3, dims),
2 * repeat(0, dims),
repeat(3, dims),
repeat(1, dims),
repeat(1, dims),
)
# Convolution with unset padding
verify_convtranspose_with_padding(
(1, 1) + repeat(5, dims),
(1, 1) + repeat(3, dims),
2 * repeat(0, dims),
repeat(3, dims),
repeat(1, dims),
repeat(1, dims),
True,
)
# Convolution with autopadding
verify_convtranspose_with_padding(
(1, 1) + repeat(5, dims),
(1, 1) + repeat(3, dims),
None,
repeat(3, dims),
repeat(1, dims),
repeat(1, dims),
auto_pad="SAME_UPPER",
)
# Convolution with valid autopadding
verify_convtranspose_with_padding(
(1, 1) + repeat(5, dims),
(1, 1) + repeat(3, dims),
None,
repeat(3, dims),
repeat(1, dims),
repeat(1, dims),
auto_pad="VALID",
)
# Convolution with non uniform stride
verify_convtranspose_with_padding(
(1, 1) + repeat(5, dims),
(1, 1) + repeat(3, dims),
None,
repeat(3, dims),
repeat(2, dims),
repeat(1, dims),
auto_pad="SAME_UPPER",
)
# Convolution with dilation
# TODO(mbrookhart): Relay doesn't currently support convtranspose with dilation
# verify_convtranspose_with_padding(
# (1, 1) + repeat(5, D),
# (1, 1) + repeat(3, D),
# 2 * repeat(2, D),
# repeat(3, D),
# repeat(1, D),
# repeat(2, D),
# )
# Convolution with output_shape
for dims in [1, 2, 3]:
for num in range(60, 66):
verify_convtranspose_with_output_shape(
(1, 1) + repeat(32, dims),
(1, 1) + repeat(4, dims),
repeat(num, dims),
repeat(4, dims),
repeat(2, dims),
repeat(1, dims),
)
verify_convtranspose_with_output_shape(
(1, 1) + repeat(32, dims),
(1, 1) + repeat(4, dims),
repeat(num, dims),
repeat(4, dims),
repeat(2, dims),
repeat(1, dims),
auto_pad="SAME_LOWER",
)
@tvm.testing.parametrize_targets
def test_unsqueeze_constant(target, dev):
"""test_unsqueeze_constant"""
class Flatten(Module):
def forward(self, input_):
return input_.view(input_.size(0), -1)
with tempfile.NamedTemporaryFile() as f:
file_name = f.name
input_size = (1, 16, 32, 32)
dummy_input = torch.randn(*input_size)
layer = Sequential(Flatten(), Linear(16 * 32 * 32, 64))
torch.onnx.export(layer, dummy_input, file_name, export_params=True)
onnx_model = onnx.load(file_name)
relay.frontend.from_onnx(onnx_model, {"onnx::Reshape_0": input_size})
@tvm.testing.parametrize_targets
def test_pooling(target, dev):
"""test_pooling"""
def verify_pooling(x_shape, kernel_shape, strides, pads, out_shape, mode, auto_pad="NOTSET"):
_ = np.random.uniform(size=x_shape).astype("float32")
if mode == "max":
node_type = "MaxPool"
elif mode == "average":
node_type = "AveragePool"
else:
raise ValueError(f"Pool method {mode} is not supported.")
pool_node = helper.make_node(
node_type, inputs=["x"], outputs=["y"], kernel_shape=kernel_shape, strides=strides
)
if pads is None:
pad_attr = helper.make_attribute("auto_pad", auto_pad)
else:
pad_attr = helper.make_attribute("pads", pads)
pool_node.attribute.append(pad_attr)
if mode == "max":
storage_attr = helper.make_attribute("storage_order", 0)
pool_node.attribute.append(storage_attr)
graph = helper.make_graph(
[pool_node],
"pooling_test",
inputs=[helper.make_tensor_value_info("x", TensorProto.FLOAT, list(x_shape))],
outputs=[helper.make_tensor_value_info("y", TensorProto.FLOAT, list(out_shape))],
)
model = helper.make_model(graph, producer_name="pooling_test")
verify_with_ort(
model,
[x_shape],
[out_shape],
use_vm=False,
target=target,
dev=dev,
)
for mode in ["max", "average"]:
# Pool1D
verify_pooling(
x_shape=[1, 1, 32],
kernel_shape=[3],
strides=[1],
pads=[1, 1],
out_shape=[1, 1, 32],
mode=mode,
)
# Pool2D
verify_pooling(
x_shape=[1, 1, 32, 32],
kernel_shape=[3, 3],
strides=[1, 1],
pads=[1, 1, 1, 1],
out_shape=[1, 1, 32, 32],
mode=mode,
)
# Pool1D with stride
verify_pooling(
x_shape=[1, 1, 32],
kernel_shape=[3],
strides=[2],
pads=[1, 1],
out_shape=[1, 1, 16],
mode=mode,
)
# Pool2D with stride
verify_pooling(
x_shape=[1, 1, 32, 32],
kernel_shape=[3, 3],
strides=[2, 2],
pads=[1, 1, 1, 1],
out_shape=[1, 1, 16, 16],
mode=mode,
)
# Pool1D with stride and autopadding
verify_pooling(
x_shape=[1, 1, 32],
kernel_shape=[3],
strides=[2],
pads=None,
out_shape=[1, 1, 16],
mode=mode,
auto_pad="SAME_UPPER",
)
# Pool2D with stride and autopadding
verify_pooling(
x_shape=[1, 1, 32, 32],
kernel_shape=[3, 3],
strides=[2, 2],
pads=None,
out_shape=[1, 1, 16, 16],
mode=mode,
auto_pad="SAME_UPPER",
)
# Pool3D with stride
verify_pooling(
x_shape=[1, 1, 32, 32, 32],
kernel_shape=[3, 3, 3],
strides=[2, 2, 2],
pads=[1, 1, 1, 1, 1, 1],
out_shape=[1, 1, 16, 16, 16],
mode=mode,
)
# Pool3D with stride and autopadding
verify_pooling(
x_shape=[1, 1, 32, 32, 32],
kernel_shape=[3, 3, 3],
strides=[2, 2, 2],
pads=None,
out_shape=[1, 1, 16, 16, 16],
mode=mode,
auto_pad="SAME_UPPER",
)
@tvm.testing.parametrize_targets
def test_global_pooling(target, dev):
"""test_global_pooling"""
def verify_global_pooling(x_shape, mode):
out_shape = x_shape[:2] + [1] * (len(x_shape) - 2)
if mode == "max":
node_type = "GlobalMaxPool"
elif mode == "average":
node_type = "GlobalAveragePool"
else:
raise ValueError(f"Pool method {mode} is not supported.")
pool_node = helper.make_node(node_type, inputs=["x"], outputs=["y"])
graph = helper.make_graph(
[pool_node],
"global_pooling_test",
inputs=[helper.make_tensor_value_info("x", TensorProto.FLOAT, list(x_shape))],
outputs=[helper.make_tensor_value_info("y", TensorProto.FLOAT, list(out_shape))],
)
model = helper.make_model(graph, producer_name="global_pooling_test")
verify_with_ort(
model,
[x_shape],
[out_shape],
use_vm=False,
target=target,
dev=dev,
)
# Test each pooling mode across all N-D inputs.
for mode in ["average", "max"]:
# 1D Pooling (NCW)
verify_global_pooling([1, 8, 8], mode)
verify_global_pooling([4, 1, 4], mode)
# 2D Pooling (NCHW)
verify_global_pooling([1, 8, 8, 8], mode)
verify_global_pooling([4, 1, 6, 4], mode)
# 3D Pooling (NCDHW)
verify_global_pooling([1, 8, 6, 8, 8], mode)
verify_global_pooling([4, 1, 2, 6, 4], mode)
@pytest.mark.skip("flaky")
@tvm.testing.parametrize_targets
def test_qlinear_average_pool(target, dev):
"""test_qlinear_average_pool"""
def verify_qlinear_average_pool(
x_shape, kernel_shape, strides, pads, out_shape, auto_pad="NOTSET"
):
input_nodes = [
helper.make_tensor_value_info("X", TensorProto.FLOAT, list(x_shape)),
]
output_nodes = [
helper.make_tensor_value_info("Y", TensorProto.FLOAT, list(out_shape)),
]
input_names = ["X"]
node = helper.make_node(
"AveragePool",
inputs=input_names,
outputs=["Y"],
kernel_shape=kernel_shape,
strides=strides,
)
if pads is None:
pad_attr = helper.make_attribute("auto_pad", auto_pad)
else:
pad_attr = helper.make_attribute("pads", pads)
node.attribute.append(pad_attr)
graph = helper.make_graph(
[node],
"qlinear_average_pool_test",
inputs=input_nodes,
outputs=output_nodes,
)
model = helper.make_model(graph, producer_name="qlinear_average_pool_Test")
quantize_and_verify_with_ort(model, input_names, [x_shape], target, dev)
# Pool1D
verify_qlinear_average_pool(
x_shape=[1, 1, 32],
kernel_shape=[3],
strides=[1],
pads=[1, 1],
out_shape=[1, 1, 32],
)
# Pool2D
verify_qlinear_average_pool(
x_shape=[1, 1, 32, 32],
kernel_shape=[3, 3],
strides=[1, 1],
pads=[1, 1, 1, 1],
out_shape=[1, 1, 32, 32],
)
# Pool1D with stride
verify_qlinear_average_pool(
x_shape=[1, 1, 32],
kernel_shape=[3],
strides=[2],
pads=[1, 1],
out_shape=[1, 1, 16],
)
# Pool2D with stride
verify_qlinear_average_pool(
x_shape=[1, 1, 32, 32],
kernel_shape=[3, 3],
strides=[2, 2],
pads=[1, 1, 1, 1],
out_shape=[1, 1, 16, 16],
)
# Pool1D with stride and autopadding
verify_qlinear_average_pool(
x_shape=[1, 1, 32],
kernel_shape=[3],
strides=[2],
pads=None,
out_shape=[1, 1, 16],
auto_pad="SAME_UPPER",
)
# Pool2D with stride and autopadding
verify_qlinear_average_pool(
x_shape=[1, 1, 32, 32],
kernel_shape=[3, 3],
strides=[2, 2],
pads=None,
out_shape=[1, 1, 16, 16],
auto_pad="SAME_UPPER",
)
# Pool3D with stride
verify_qlinear_average_pool(
x_shape=[1, 1, 32, 32, 32],
kernel_shape=[3, 3, 3],
strides=[2, 2, 2],
pads=[1, 1, 1, 1, 1, 1],
out_shape=[1, 1, 16, 16, 16],
)
# Pool3D with stride and autopadding
verify_qlinear_average_pool(
x_shape=[1, 1, 32, 32, 32],
kernel_shape=[3, 3, 3],
strides=[2, 2, 2],
pads=None,
out_shape=[1, 1, 16, 16, 16],
auto_pad="SAME_UPPER",
)
@tvm.testing.parametrize_targets
def test_qlinear_global_average_pool(target, dev):
"""test_qlinear_global_average_pool"""
def verify_qlinear_global_average_pool(x_shape):
out_shape = x_shape[:2] + [1] * (len(x_shape) - 2)
node_type = "GlobalAveragePool"
input_names = ["X"]
pool_node = helper.make_node(node_type, inputs=input_names, outputs=["Y"])
graph = helper.make_graph(
[pool_node],
"qlinear_global_average_pool_test",
inputs=[helper.make_tensor_value_info("X", TensorProto.FLOAT, list(x_shape))],
outputs=[helper.make_tensor_value_info("Y", TensorProto.FLOAT, list(out_shape))],
)
model = helper.make_model(graph, producer_name="qlinear_global_average_pool_test")
quantize_and_verify_with_ort(model, input_names, [x_shape], target, dev)
# 1D Pooling (NCW)
verify_qlinear_global_average_pool([1, 8, 8])
verify_qlinear_global_average_pool([4, 1, 4])
# 2D Pooling (NCHW)
verify_qlinear_global_average_pool([1, 8, 8, 8])
verify_qlinear_global_average_pool([4, 1, 6, 4])
# 3D Pooling (NCDHW)
verify_qlinear_global_average_pool([1, 8, 6, 8, 8])
verify_qlinear_global_average_pool([4, 1, 2, 6, 4])
@tvm.testing.parametrize_targets
def test_mod(target, dev):
"""test_mod"""
def verify_mod(x_shape, y_shape, fmod, out_shape, dtype="float32"):
x_np = np.random.uniform(-100.0, 100.0, x_shape).astype(dtype)
y_np = np.random.uniform(-100.0, 100.0, y_shape).astype(dtype)
y_np = np.where(y_np == 0, 1, y_np) # remove 0's to avoid division by zero error
mod_node = helper.make_node("Mod", inputs=["x", "y"], outputs=["z"], fmod=fmod)
onnx_dtype = TensorProto.FLOAT if dtype == "float32" else TensorProto.INT32
graph = helper.make_graph(
[mod_node],
"mod_test",
inputs=[
helper.make_tensor_value_info("x", onnx_dtype, list(x_shape)),
helper.make_tensor_value_info("y", onnx_dtype, list(y_shape)),
],
outputs=[helper.make_tensor_value_info("z", onnx_dtype, list(out_shape))],
)
model = helper.make_model(graph, producer_name="mod_test")
verify_with_ort_with_inputs(model, [x_np, y_np], [out_shape], target=target, dev=dev)
# Mod
verify_mod(
x_shape=[1, 32, 32], y_shape=[1, 1, 32], fmod=0, out_shape=(1, 32, 32), dtype="int32"
)
verify_mod(
x_shape=[1, 32, 32, 32],
y_shape=[1, 32, 32, 32],
fmod=0,
out_shape=(1, 32, 32, 32),
dtype="int32",
)
# fmod
verify_mod(
x_shape=[1, 32, 32], y_shape=[1, 32, 32], fmod=1, out_shape=(1, 32, 32), dtype="int32"
)
verify_mod(x_shape=[1, 1, 32, 32], y_shape=[1, 32, 32, 32], fmod=1, out_shape=(1, 32, 32, 32))
verify_mod(x_shape=[1, 32, 32, 32], y_shape=[1, 1, 32, 32], fmod=1, out_shape=(1, 32, 32, 32))
verify_mod(
x_shape=[1, 32, 32, 32],
y_shape=[1, 32, 32, 32],
fmod=1,
out_shape=(1, 32, 32, 32),
dtype="int32",
)
verify_mod(x_shape=[1, 32, 32, 32], y_shape=[1, 32, 32, 32], fmod=1, out_shape=(1, 32, 32, 32))
@tvm.testing.parametrize_targets
def test_xor(target, dev):
"""test_xor"""
def verify_xor(x_shape, y_shape):
x_np = np.random.choice(a=[False, True], size=x_shape).astype("bool")
y_np = np.random.choice(a=[False, True], size=y_shape).astype("bool")
np_out = np.logical_xor(x_np, y_np)
out_shape = np_out.shape
xor_node = helper.make_node("Xor", inputs=["x", "y"], outputs=["z"])
onnx_dtype = TensorProto.BOOL
graph = helper.make_graph(
[xor_node],
"xor_test",
inputs=[
helper.make_tensor_value_info("x", onnx_dtype, list(x_shape)),
helper.make_tensor_value_info("y", onnx_dtype, list(y_shape)),
],
outputs=[helper.make_tensor_value_info("z", onnx_dtype, list(out_shape))],
)
model = helper.make_model(graph, producer_name="xor_test")
verify_with_ort_with_inputs(model, [x_np, y_np], [out_shape], target=target, dev=dev)
# XOR
verify_xor(x_shape=[1, 32, 32], y_shape=[1, 32, 32])
# Xor broadcast
verify_xor(x_shape=[1, 32, 32], y_shape=[1, 1, 32])
@tvm.testing.parametrize_targets
def test_max_roi_pool(target, dev):
"""test_max_roi_pool"""
def verify_max_roi_pool(x_shape, rois_shape, pooled_shape, spatial_scale, out_shape):
if spatial_scale is None:
pool_node = helper.make_node(
"MaxRoiPool", inputs=["x", "rois"], outputs=["y"], pooled_shape=pooled_shape
)
else:
pool_node = helper.make_node(
"MaxRoiPool",
inputs=["x", "rois"],
outputs=["y"],
pooled_shape=pooled_shape,
spatial_scale=spatial_scale,
)
graph = helper.make_graph(
[pool_node],
"pool_test",
inputs=[
helper.make_tensor_value_info("x", TensorProto.FLOAT, list(x_shape)),
helper.make_tensor_value_info("rois", TensorProto.FLOAT, list(rois_shape)),
],
outputs=[helper.make_tensor_value_info("y", TensorProto.FLOAT, list(out_shape))],
)
model = helper.make_model(graph, producer_name="pool_test")
verify_with_ort(model, [x_shape, rois_shape], [out_shape], target=target, dev=dev)
verify_max_roi_pool(
x_shape=[1, 3, 6, 6],
rois_shape=[3, 5],
pooled_shape=[1, 1],
spatial_scale=None,
out_shape=[3, 3, 1, 1],
)
verify_max_roi_pool(
x_shape=[1, 3, 10, 10],
rois_shape=[4, 5],
pooled_shape=[2, 2],
spatial_scale=2.0,
out_shape=[4, 3, 2, 2],
)
@tvm.testing.parametrize_targets
def test_lppool(target, dev):
"""test_lppool"""
def verify_lppool(x_shape, kernel_shape, p, strides, pads, out_shape, auto_pad="NOTSET"):
kwargs = {}
if p is not None:
kwargs["p"] = p
if pads is None:
pool_node = helper.make_node(
"LpPool",
inputs=["x"],
outputs=["y"],
kernel_shape=kernel_shape,
auto_pad=auto_pad,
strides=strides,
**kwargs,
)
else:
pool_node = helper.make_node(
"LpPool",
inputs=["x"],
outputs=["y"],
kernel_shape=kernel_shape,
pads=pads,
strides=strides,
**kwargs,
)
graph = helper.make_graph(
[pool_node],
"lppool_test",
inputs=[helper.make_tensor_value_info("x", TensorProto.FLOAT, list(x_shape))],
outputs=[helper.make_tensor_value_info("y", TensorProto.FLOAT, list(out_shape))],
)
model = helper.make_model(graph, producer_name="lppool_test")
verify_with_ort(
model,
[x_shape],
[out_shape],
use_vm=True,
target=target,
dev=dev,
)
# Pool1D
verify_lppool(
x_shape=[1, 1, 32], kernel_shape=[3], p=2, strides=[1], pads=[1, 1], out_shape=[1, 1, 32]
)
# Pool2D
verify_lppool(
x_shape=[1, 1, 32, 32],
kernel_shape=[3, 3],
p=2,
strides=[1, 1],
pads=[1, 1, 1, 1],
out_shape=[1, 1, 32, 32],
)
# Pool1D with stride
verify_lppool(
x_shape=[1, 1, 32], kernel_shape=[3], p=2, strides=[2], pads=[1, 1], out_shape=[1, 1, 16]
)
# Pool2D with stride
verify_lppool(
x_shape=[1, 1, 32, 32],
kernel_shape=[3, 3],
p=2,
strides=[2, 2],
pads=[1, 1, 1, 1],
out_shape=[1, 1, 16, 16],
)
# Pool1D with stride and autopadding
verify_lppool(
x_shape=[1, 1, 32],
kernel_shape=[3],
p=2,
strides=[2],
pads=None,
out_shape=[1, 1, 16],
auto_pad="SAME_UPPER",
)
# Pool2D with stride and autopadding
verify_lppool(
x_shape=[1, 1, 32, 32],
kernel_shape=[3, 3],
p=2,
strides=[2, 2],
pads=None,
out_shape=[1, 1, 16, 16],
auto_pad="SAME_UPPER",
)
# Pool2D with empty stride
verify_lppool(
x_shape=[1, 3, 32, 32],
kernel_shape=[2, 2],
p=4,
strides=None,
pads=None,
out_shape=[1, 3, 32, 32],
auto_pad="SAME_LOWER",
)
# Pool3D with stride
verify_lppool(
x_shape=[1, 1, 32, 32, 32],
kernel_shape=[3, 3, 3],
p=2,
strides=[2, 2, 2],
pads=[1, 1, 1, 1, 1, 1],
out_shape=[1, 1, 16, 16, 16],
)
# Pool3D with stride and autopadding
verify_lppool(
x_shape=[1, 1, 32, 32, 32],
kernel_shape=[3, 3, 3],
p=2,
strides=[2, 2, 2],
pads=None,
out_shape=[1, 1, 16, 16, 16],
auto_pad="SAME_UPPER",
)
# Pool2D with empty p
verify_lppool(
x_shape=[1, 1, 32, 32],
kernel_shape=[3, 3],
p=None,
strides=[1, 1],
pads=[1, 1, 1, 1],
out_shape=[1, 1, 32, 32],
)
def verify_global_lppool(x_shape, p, out_shape, target, dev):
"""verify_global_lppool"""
pool_node = helper.make_node(
"GlobalLpPool",
inputs=["x"],
outputs=["y"],
p=p,
)
graph = helper.make_graph(
[pool_node],
"global_lppool_test",
inputs=[helper.make_tensor_value_info("x", TensorProto.FLOAT, list(x_shape))],
outputs=[helper.make_tensor_value_info("y", TensorProto.FLOAT, list(out_shape))],
)
model = helper.make_model(graph, producer_name="global_lppool_test")
verify_with_ort(model, [x_shape], out_shape, use_vm=True, target=target, dev=dev)
@tvm.testing.parametrize_targets
def test_global_lppool(target, dev):
"""test_global_lppool"""
# LpPool1D
verify_global_lppool(x_shape=[1, 15, 16], p=2, out_shape=[1, 15, 1], target=target, dev=dev)
# LpPool2D
verify_global_lppool(
x_shape=[1, 15, 32, 32], p=2, out_shape=[1, 15, 1, 1], target=target, dev=dev
)
# LpPool2D
verify_global_lppool(
x_shape=[1, 15, 32, 32], p=3, out_shape=[1, 15, 1, 1], target=target, dev=dev
)
# LpPool3D
verify_global_lppool(
x_shape=[1, 15, 3, 32, 32], p=2, out_shape=[1, 15, 1, 1, 1], target=target, dev=dev
)
def verify_rnn(
seq_length,
batch_size,
input_size,
hidden_size,
rnn_type="LSTM",
use_bias=False,
activations=None,
alphas=None,
betas=None,
use_initial_state=False,
use_peep=False,
linear_before_reset=False,
directions=1,
layout=0,
rtol=1e-5,
atol=1e-5,
target=None,
dev=None,
use_sequence_lens=False,
):
"""verify_rnn"""
if rnn_type == "RNN":
multiplier = 1
elif rnn_type == "LSTM":
multiplier = 4
elif rnn_type == "GRU":
multiplier = 3
else:
raise NotImplementedError(f"{rnn_type} RNNs not yet supported.")
if directions not in [1, 2]:
raise ValueError(f"Direction should be either 1 or 2 (for bidirectional LSTMs)")
def get_inputs():
input_names = []
input_values = []
input_tensors = []
def register(np_arr, name, shape=None):
input_values.append(np_arr)
input_names.append(name)
# Map of numpy dtypes to the protobuf equivalent
dtype_map = {
"float32": TensorProto.FLOAT,
"int32": TensorProto.INT32,
"int8": TensorProto.INT8,
}
if np_arr.dtype.name not in dtype_map:
raise ValueError(f"Unknown dtype we don't know how to handle {np.dtype.name}")
if shape is None:
shape = list(np_arr.shape)
proto_type = dtype_map[np_arr.dtype.name]
input_tensors.append(helper.make_tensor_value_info(name, proto_type, shape))
if layout == 1:
x_np = np.random.uniform(size=(batch_size, seq_length, input_size)).astype("float32")
else:
x_np = np.random.uniform(size=(seq_length, batch_size, input_size)).astype("float32")
w_np = np.random.uniform(size=(directions, multiplier * hidden_size, input_size)).astype(
"float32"
)
r_np = np.random.uniform(size=(directions, multiplier * hidden_size, hidden_size)).astype(
"float32"
)
register(x_np, "X")
register(w_np, "W")
register(r_np, "R")
if use_bias:
b_np = np.random.uniform(size=(directions, multiplier * 2 * hidden_size)).astype(
"float32"
)
register(b_np, "B")
if use_sequence_lens:
sequence_np = np.random.uniform(0, seq_length, size=(batch_size)).astype("int32")
register(sequence_np, "sequence_lens")
if use_initial_state:
assert use_bias is True, "Initial states must have bias specified."
if not use_sequence_lens:
sequence_np = np.repeat(seq_length, batch_size).astype("int32")
register(sequence_np, "sequence_lens")
if layout == 1:
initial_h_np = np.random.uniform(size=(batch_size, directions, hidden_size)).astype(
"float32"
)
else:
initial_h_np = np.random.uniform(size=(directions, batch_size, hidden_size)).astype(
"float32"
)
register(initial_h_np, "initial_h")
if rnn_type == "LSTM":
if layout == 1:
initial_c_np = np.random.uniform(
size=(batch_size, directions, hidden_size)
).astype("float32")
else:
initial_c_np = np.random.uniform(
size=(directions, batch_size, hidden_size)
).astype("float32")
register(initial_c_np, "initial_c")
if use_peep and rnn_type == "LSTM":
assert use_initial_state is True, "Peepholes require initial state to be specified."
p_np = np.random.uniform(size=(directions, 3 * hidden_size)).astype("float32")
register(p_np, "P")
return input_names, input_tensors, input_values
input_names, input_tensors, input_values = get_inputs()
def get_outputs():
output_names = []
graph_outputs = []
output_shapes = []
def register(name, shape, proto_type):
output_names.append(name)
graph_outputs.append(helper.make_tensor_value_info(name, proto_type, list(shape)))
output_shapes.append(list(shape))
if layout == 1:
register("Y", [directions, seq_length, batch_size, hidden_size], TensorProto.FLOAT)
register("Y_h", [batch_size, directions, hidden_size], TensorProto.FLOAT)
else:
register("Y", [seq_length, directions, batch_size, hidden_size], TensorProto.FLOAT)
register("Y_h", [directions, batch_size, hidden_size], TensorProto.FLOAT)
if rnn_type == "LSTM":
if layout == 1:
register("Y_c", [batch_size, directions, hidden_size], TensorProto.FLOAT)
else:
register("Y_c", [directions, batch_size, hidden_size], TensorProto.FLOAT)
return output_names, graph_outputs, output_shapes
output_names, graph_outputs, output_shapes = get_outputs()
rnn_node = helper.make_node(
rnn_type, inputs=input_names, outputs=output_names, hidden_size=hidden_size
)
if activations is not None:
activations_attr = helper.make_attribute("activations", activations)
rnn_node.attribute.append(activations_attr)
if directions == 2:
direction_attr = helper.make_attribute("direction", "bidirectional")
rnn_node.attribute.append(direction_attr)
if alphas is not None:
alphas_attr = helper.make_attribute("activation_alpha", alphas)
rnn_node.attribute.append(alphas_attr)
if betas is not None:
betas_attr = helper.make_attribute("activation_beta", betas)
rnn_node.attribute.append(betas_attr)
if linear_before_reset and rnn_type == "GRU":
lbr_attr = helper.make_attribute("linear_before_reset", 1)
rnn_node.attribute.append(lbr_attr)
if layout == 1:
layout_attr = helper.make_attribute("layout", 1)
rnn_node.attribute.append(layout_attr)
graph = helper.make_graph([rnn_node], "rnn_test", inputs=input_tensors, outputs=graph_outputs)
model = helper.make_model(graph, producer_name="rnn_test")
verify_with_ort_with_inputs(
model, input_values, output_shapes, atol=atol, rtol=rtol, target=target, dev=dev
)
def verify_rnn_helper(target, dev, rnn_type):
num_activations = 1
if rnn_type == "GRU":
num_activations = 2
elif rnn_type == "LSTM":
num_activations = 3
for directions in [1, 2]:
# No bias.
verify_rnn(
seq_length=2,
batch_size=1,
input_size=16,
hidden_size=32,
use_bias=False,
rnn_type=rnn_type,
directions=directions,
target=target,
dev=dev,
)
# large batch.
verify_rnn(
seq_length=4,
batch_size=8,
input_size=16,
hidden_size=32,
use_bias=True,
rnn_type=rnn_type,
directions=directions,
target=target,
dev=dev,
)
# Non power of two.
verify_rnn(
seq_length=3,
batch_size=3,
input_size=16,
hidden_size=40,
use_bias=True,
rnn_type=rnn_type,
directions=directions,
target=target,
dev=dev,
)
# Long sequence.
verify_rnn(
seq_length=8,
batch_size=1,
input_size=16,
hidden_size=32,
use_bias=True,
rnn_type=rnn_type,
directions=directions,
target=target,
dev=dev,
)
# Large hidden.
verify_rnn(
seq_length=2,
batch_size=1,
input_size=16,
hidden_size=128,
use_bias=True,
rnn_type=rnn_type,
directions=directions,
target=target,
dev=dev,
)
# Large input.
verify_rnn(
seq_length=2,
batch_size=1,
input_size=64,
hidden_size=32,
use_bias=True,
rnn_type=rnn_type,
directions=directions,
target=target,
dev=dev,
)
# Different activation testing.
# Default value hardsigmoid.
# TODO: onnxruntime <= v1.12.0 has wrong default value of all activation functions
if rnn_type != "RNN":
activations = ["HardSigmoid", "Tanh", "Tanh"][0:num_activations] * directions
verify_rnn(
seq_length=2,
batch_size=1,
input_size=16,
hidden_size=32,
use_bias=False,
activations=activations,
rnn_type=rnn_type,
directions=directions,
target=target,
dev=dev,
)
# Multiple parametrized activations.
activations = ["HardSigmoid", "LeakyRelu", "Tanh"][0:num_activations] * directions
alphas = [2.0, 0.5, 0.0][0:num_activations] * directions
betas = [0.3, 0.0, 0.0][0:num_activations] * directions
verify_rnn(
seq_length=2,
batch_size=1,
input_size=16,
hidden_size=32,
use_bias=False,
activations=activations,
alphas=alphas,
betas=betas,
rnn_type=rnn_type,
directions=directions,
target=target,
dev=dev,
)
# All parametrized with new Affine activation.
activations = ["Affine", "LeakyRelu", "HardSigmoid"][0:num_activations] * directions
alphas = [0.8, 2.0, 0.5][0:num_activations] * directions
betas = [0.0, 0.3, 0.0][0:num_activations] * directions
verify_rnn(
seq_length=2,
batch_size=1,
input_size=16,
hidden_size=32,
use_bias=False,
activations=activations,
alphas=alphas,
betas=betas,
rnn_type=rnn_type,
directions=directions,
target=target,
dev=dev,
)
# Testing with initial state
verify_rnn(
seq_length=2,
batch_size=1,
input_size=16,
hidden_size=32,
use_bias=True,
use_initial_state=True,
rnn_type=rnn_type,
directions=directions,
target=target,
dev=dev,
)
# Testing layout
# TODO: onnxruntime <= 1.12.0 doesn't support layout == 1
# verify_rnn(
# seq_length=2,
# batch_size=1,
# input_size=16,
# hidden_size=32,
# use_bias=True,
# rnn_type="RNN",
# directions=directions,
# layout=1,
# target=target,
# dev=dev,
# )
# Testing with initial state
if rnn_type == "GRU":
verify_rnn(
seq_length=2,
batch_size=1,
input_size=16,
hidden_size=32,
use_bias=True,
use_initial_state=True,
rnn_type=rnn_type,
directions=directions,
target=target,
dev=dev,
use_sequence_lens=True,
)
verify_rnn(
seq_length=8,
batch_size=8,
input_size=16,
hidden_size=32,
use_bias=True,
use_initial_state=True,
rnn_type=rnn_type,
directions=directions,
target=target,
dev=dev,
use_sequence_lens=True,
)
# Testing with peepholes
if rnn_type == "LSTM":
verify_rnn(
seq_length=2,
batch_size=1,
input_size=16,
hidden_size=32,
use_bias=True,
use_initial_state=True,
use_peep=True,
rnn_type="LSTM",
directions=directions,
target=target,
dev=dev,
)
@tvm.testing.parametrize_targets
def test_rnn(target, dev):
verify_rnn_helper(target, dev, "RNN")
@tvm.testing.parametrize_targets
def test_lstm(target, dev):
verify_rnn_helper(target, dev, "LSTM")
@tvm.testing.parametrize_targets
def test_gru(target, dev):
verify_rnn_helper(target, dev, "GRU")
@tvm.testing.parametrize_targets
def test_resize(target, dev):
"""test_resize"""
def verify(ishape, oshape, scales, mode, coord_trans="asymmetric", alpha=0.5, exclude=False):
nodes = [
make_constant_node("roi", onnx.TensorProto.FLOAT, (0,), []),
make_constant_node("scales", onnx.TensorProto.FLOAT, (len(scales),), scales),
]
input_names = ["X", "roi", "scales"]
if oshape != []:
nodes.append(
make_constant_node("sizes", onnx.TensorProto.INT64, (len(oshape),), oshape)
)
input_names.append("sizes")
nodes.append(
helper.make_node(
"Resize",
inputs=input_names,
outputs=["Y"],
mode=mode,
coordinate_transformation_mode=coord_trans,
cubic_coeff_a=alpha,
exclude_outside=exclude,
)
)
if oshape == []:
oshape = [round(dim * scale) for (dim, scale) in zip(ishape, scales)]
graph = helper.make_graph(
nodes,
"resize_test",
inputs=[helper.make_tensor_value_info("X", TensorProto.FLOAT, ishape)],
outputs=[helper.make_tensor_value_info("Y", TensorProto.FLOAT, oshape)],
)
model = helper.make_model(graph, producer_name="resize_test")
verify_with_ort(
model,
[ishape],
[oshape],
use_vm=True,
opset=11,
freeze_params=True,
target=target,
dev=dev,
)
for ndim in [1, 2, 3]:
method = "nearest"
for coord_trans in ["asymmetric", "align_corners", "half_pixel"]:
# upsampling
verify([1, 16] + [32] * ndim, [1, 16] + [64] * ndim, [], method, coord_trans)
# downsampling
verify([1, 16] + [32] * ndim, [1, 16] + [16] * ndim, [], method, coord_trans)
# scales are specified instead of sizes
verify([1, 16] + [32] * ndim, [], [1, 1] + [0.5] * ndim, method, coord_trans)
verify([1, 16] + [32] * ndim, [], [1, 1] + [2] * ndim, method, coord_trans)
method = "linear"
# upsampling
verify([1, 16] + [32] * ndim, [1, 16] + [64] * ndim, [], method)
# downsampling
verify([1, 16] + [32] * ndim, [1, 16] + [16] * ndim, [], method)
# scales are specified instead of sizes
verify([1, 16] + [32] * ndim, [], [1, 1] + [0.5] * ndim, method)
verify([1, 16] + [32] * ndim, [], [1, 1] + [2] * ndim, method)
if ndim == 2:
# ONNX Runtime only supports cubic interpolation for 2D images
method = "cubic"
for alpha in [0.5, 0.75]:
for exclude in [True, False]:
# upsampling
verify(
[1, 16] + [32] * ndim,
[1, 16] + [64] * ndim,
[],
method,
alpha=alpha,
exclude=exclude,
)
# downsampling
verify(
[1, 16] + [32] * ndim,
[1, 16] + [16] * ndim,
[],
method,
alpha=alpha,
exclude=exclude,
)
# scales are specified instead of sizes
verify(
[1, 16] + [32] * ndim,
[],
[1, 1] + [0.5] * ndim,
method,
alpha=alpha,
exclude=exclude,
)
verify(
[1, 16] + [32] * ndim,
[],
[1, 1] + [2] * ndim,
method,
alpha=alpha,
exclude=exclude,
)
def verify_opset_10(ishape, scales, mode):
nodes = [
make_constant_node("scales", onnx.TensorProto.FLOAT, (len(scales),), scales),
]
input_names = ["X", "scales"]
nodes.append(
helper.make_node(
"Resize",
inputs=input_names,
outputs=["Y"],
mode=mode,
)
)
oshape = [round(dim * scale) for (dim, scale) in zip(ishape, scales)]
graph = helper.make_graph(
nodes,
"resize_test",
inputs=[helper.make_tensor_value_info("X", TensorProto.FLOAT, ishape)],
outputs=[helper.make_tensor_value_info("Y", TensorProto.FLOAT, oshape)],
)
model = helper.make_model(graph, producer_name="resize_test")
verify_with_ort(
model,
[ishape],
[oshape],
use_vm=True,
freeze_params=True,
opset=10,
target=target,
dev=dev,
)
verify_opset_10([1, 16, 32, 32], [1, 1, 2, 2], "nearest")
verify_opset_10([1, 16, 32, 32], [1, 1, 0.5, 0.5], "linear")
@tvm.testing.parametrize_targets
def test_nonzero(target, dev):
"""test_nonzero"""
def verify_nonzero(indata, outdata, dtype):
node = helper.make_node(
"NonZero",
inputs=["X"],
outputs=["Y"],
)
graph = helper.make_graph(
[node],
"nonzero_test",
inputs=[helper.make_tensor_value_info("X", TensorProto.INT64, list(indata.shape))],
outputs=[helper.make_tensor_value_info("Y", TensorProto.INT64, list(outdata.shape))],
)
model = helper.make_model(graph, producer_name="nonzero_test")
verify_with_ort_with_inputs(
model, [indata], dtype="int64", use_vm=True, opset=9, target=target, dev=dev
)
input_data = np.array([[1, 0], [1, 1]], dtype=np.int64)
result = np.array((np.nonzero(input_data))) # expected output [[0, 1, 1], [0, 0, 1]]
verify_nonzero(input_data, result, dtype=np.int64)
input_data = np.array([[3, 0, 0], [0, 4, 0], [5, 6, 0]], dtype=np.int64)
result = np.array((np.nonzero(input_data))) # expected output [[0, 1, 2, 2], [0, 1, 0, 1]]
verify_nonzero(input_data, result, dtype=np.int64)
@tvm.testing.parametrize_targets
def test_topk(target, dev):
"""test_topk"""
def verify_topk(input_dims, k, axis=-1):
output_dims = list(input_dims)
output_dims[axis] = k
node = helper.make_node("TopK", inputs=["X", "K"], outputs=["Values", "Indices"], axis=axis)
graph = helper.make_graph(
[node],
"topk_test",
inputs=[
helper.make_tensor_value_info("X", TensorProto.FLOAT, list(input_dims)),
helper.make_tensor_value_info(
"K",
TensorProto.INT64,
[
1,
],
),
],
outputs=[
helper.make_tensor_value_info("Values", TensorProto.FLOAT, output_dims),
helper.make_tensor_value_info("Indices", TensorProto.INT64, output_dims),
],
)
model = helper.make_model(graph, producer_name="topk_test")
indata = np.random.uniform(-10, 10, input_dims).astype(np.float32)
verify_with_ort_with_inputs(
model, [indata, np.array([k])], use_vm=True, target=target, dev=dev
)
for n in [12, 32]:
for shape in [[n], [n, n], [n, n, n]]:
for k in [1, 5, 10]:
verify_topk(shape, k)
verify_topk([n, n, n], 5, 0)
verify_topk([n, n, n], 5, 1)
verify_topk([n, n, n], 5, 2)
@tvm.testing.parametrize_targets
def test_roi_align(target, dev):
"""test_roi_align"""
def verify_roi_align(
input_dims,
num_roi,
output_height,
output_width,
sampling_ratio=0,
spatial_scale=1.0,
mode="avg",
):
output_dims = [num_roi, input_dims[1], output_height, output_width]
node = helper.make_node(
"RoiAlign",
coordinate_transformation_mode="output_half_pixel",
inputs=["X", "rois", "batch_indices"],
outputs=["Y"],
mode=mode,
output_height=output_height,
output_width=output_width,
sampling_ratio=sampling_ratio,
spatial_scale=spatial_scale,
)
graph = helper.make_graph(
[node],
"roialign_test",
inputs=[
helper.make_tensor_value_info("X", TensorProto.FLOAT, list(input_dims)),
helper.make_tensor_value_info("rois", TensorProto.FLOAT, [num_roi, 4]),
helper.make_tensor_value_info(
"batch_indices",
TensorProto.INT64,
[
num_roi,
],
),
],
outputs=[helper.make_tensor_value_info("Y", TensorProto.FLOAT, output_dims)],
)
model = helper.make_model(graph, producer_name="roialign_test")
np_data = np.random.uniform(size=input_dims).astype("float32")
np_rois = np.random.uniform(size=[num_roi, 4]).astype("float32") * input_dims[2]
np_batch_indices = np.random.randint(low=0, high=input_dims[0], size=num_roi)
verify_with_ort_with_inputs(
model,
[np_data, np_rois, np_batch_indices],
out_shape=[output_dims],
target=target,
dev=dev,
)
verify_roi_align((1, 4, 16, 16), 32, 7, 7, sampling_ratio=0, spatial_scale=1.0)
verify_roi_align((4, 4, 16, 32), 32, 7, 7, sampling_ratio=0, spatial_scale=1.0)
verify_roi_align((1, 8, 16, 16), 32, 7, 7, sampling_ratio=0, spatial_scale=1.0)
verify_roi_align((1, 4, 8, 8), 32, 7, 7, sampling_ratio=0, spatial_scale=1.0)
verify_roi_align((1, 4, 16, 16), 16, 5, 7, sampling_ratio=0, spatial_scale=1.0)
verify_roi_align((1, 4, 16, 12), 8, 7, 3, sampling_ratio=0, spatial_scale=1.0)
verify_roi_align((1, 4, 16, 16), 32, 7, 7, sampling_ratio=0, spatial_scale=0.5)
verify_roi_align((3, 4, 12, 16), 32, 7, 7, sampling_ratio=0, spatial_scale=1.5)
verify_roi_align((5, 4, 16, 14), 32, 7, 7, sampling_ratio=1, spatial_scale=1.0)
verify_roi_align((1, 4, 16, 16), 32, 7, 7, sampling_ratio=2, spatial_scale=1.0)
# ONNX implementation of roi_align with max mode is incorrect, so we don't compare outputs here.
@tvm.testing.parametrize_targets
def test_non_max_suppression(target, dev):
"""test_non_max_suppression"""
def verify_nms(
boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, output_dims
):
input_names = ["boxes", "scores", "max_output_boxes_per_class", "iou_threshold"]
input_nodes = [
helper.make_tensor_value_info("boxes", TensorProto.FLOAT, boxes.shape),
helper.make_tensor_value_info("scores", TensorProto.FLOAT, scores.shape),
helper.make_tensor_value_info(
"max_output_boxes_per_class", TensorProto.INT64, max_output_boxes_per_class.shape
),
helper.make_tensor_value_info("iou_threshold", TensorProto.FLOAT, iou_threshold.shape),
]
inputs = [boxes, scores, max_output_boxes_per_class, iou_threshold]
if score_threshold is not None:
input_names.append("score_threshold")
input_nodes.append(
helper.make_tensor_value_info(
"score_threshold", TensorProto.FLOAT, score_threshold.shape
)
)
inputs.append(score_threshold)
node = helper.make_node(
"NonMaxSuppression",
inputs=input_names,
outputs=["Y"],
center_point_box=0,
)
graph = helper.make_graph(
[node],
"nms_test",
inputs=input_nodes,
outputs=[helper.make_tensor_value_info("Y", TensorProto.INT64, output_dims)],
)
model = helper.make_model(graph, producer_name="nms_test")
verify_with_ort_with_inputs(model, inputs, use_vm=True, target=target, dev=dev)
boxes = np.array(
[
[
[0.0, 0.0, 0.3, 0.3],
[0.0, 0.0, 0.4, 0.4],
[0.0, 0.0, 0.5, 0.5],
[0.5, 0.5, 0.9, 0.9],
[0.5, 0.5, 1.0, 1.0],
],
[
[0.0, 0.0, 0.3, 0.3],
[0.0, 0.0, 0.4, 0.4],
[0.5, 0.5, 0.95, 0.95],
[0.5, 0.5, 0.96, 0.96],
[0.5, 0.5, 1.0, 1.0],
],
]
).astype("float32")
scores = np.array(
[
[[0.1, 0.2, 0.6, 0.3, 0.9], [0.1, 0.2, 0.6, 0.3, 0.9]],
[[0.1, 0.2, 0.6, 0.3, 0.9], [0.1, 0.2, 0.6, 0.3, 0.9]],
]
).astype("float32")
max_output_boxes_per_class = np.array(2).astype("int64")
iou_threshold = np.array(0.8).astype("float32")
output_dims = [8, 3]
verify_nms(boxes, scores, max_output_boxes_per_class, iou_threshold, None, output_dims)
boxes = np.array(
[
[
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.1, 1.0, 1.1],
[0.0, -0.1, 1.0, 0.9],
[0.0, 10.0, 1.0, 11.0],
[0.0, 10.1, 1.0, 11.1],
[0.0, 100.0, 1.0, 101.0],
]
]
).astype(np.float32)
scores = np.array([[[0.9, 0.75, 0.6, 0.95, 0.5, 0.3]]]).astype(np.float32)
max_output_boxes_per_class = np.array([3]).astype(np.int64)
iou_threshold = np.array([0.5]).astype(np.float32)
score_threshold = np.array([0.4]).astype(np.float32)
output_dims = [2, 3]
verify_nms(
boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, output_dims
)
@tvm.testing.parametrize_targets
def test_loop(target, dev):
"""test_loop"""
def verify_cond_loop():
y_in = helper.make_tensor_value_info("y_in", TensorProto.FLOAT, [1])
y_out = helper.make_tensor_value_info("y_out", TensorProto.FLOAT, [1])
scan_out = helper.make_tensor_value_info("scan_out", TensorProto.FLOAT, [1])
cond_in = helper.make_tensor_value_info("cond_in", TensorProto.BOOL, [])
cond_out = helper.make_tensor_value_info("cond_out", TensorProto.BOOL, [])
iter_count = helper.make_tensor_value_info("iter_count", TensorProto.INT64, [])
y = np.array([-2]).astype(np.float32)
five_const_node = helper.make_node(
"Constant",
inputs=[],
outputs=["five"],
value=helper.make_tensor(
name="const_tensor_five", data_type=TensorProto.FLOAT, dims=(), vals=[5]
),
)
iter_cast_node = helper.make_node(
"Cast", inputs=["iter_count"], outputs=["iter_cast"], to=onnx.TensorProto.FLOAT
)
y_add_node = helper.make_node("Add", inputs=["y_in", "iter_cast"], outputs=["y_out"])
less_node = helper.make_node("Less", inputs=["y_out", "five"], outputs=["cond_less"])
squeeze_node = helper.make_node("Squeeze", inputs=["cond_less"], outputs=["cond_squeeze"])
cond_cast_node = helper.make_node(
"Cast", inputs=["cond_squeeze"], outputs=["cond_out"], to=onnx.TensorProto.BOOL
)
scan_identity_node = helper.make_node("Identity", inputs=["y_out"], outputs=["scan_out"])
loop_body = helper.make_graph(
[
five_const_node,
iter_cast_node,
y_add_node,
less_node,
squeeze_node,
cond_cast_node,
scan_identity_node,
],
"loop_body",
[iter_count, cond_in, y_in],
[cond_out, y_out, scan_out],
)
loop_node = helper.make_node(
"Loop",
inputs=["trip_count", "cond", "y"],
outputs=["res_y", "res_scan"],
body=loop_body,
)
trip_count = np.array(5).astype(np.int64)
_ = np.array([13]).astype(np.float32)
cond = np.array(1).astype(bool)
loop_graph = onnx.helper.make_graph(
[loop_node],
"loop_outer",
inputs=[
onnx.helper.make_tensor_value_info("trip_count", onnx.TensorProto.INT64, []),
onnx.helper.make_tensor_value_info("cond", onnx.TensorProto.BOOL, []),
onnx.helper.make_tensor_value_info("y", onnx.TensorProto.FLOAT, [1]),
],
outputs=[
onnx.helper.make_tensor_value_info("res_y", onnx.TensorProto.FLOAT, [1]),
onnx.helper.make_tensor_value_info("res_scan", onnx.TensorProto.FLOAT, [5, 1]),
],
)
loop_model = onnx.helper.make_model(loop_graph)
# Set a high trip count so that condition trips first.
trip_count = np.array(40).astype(np.int64)
cond = np.array(1).astype(bool)
input_vals = [trip_count, cond, y]
verify_with_ort_with_inputs(
loop_model,
input_vals,
use_vm=True,
freeze_params=True,
opset=11,
target=target,
dev=dev,
)
def verify_count_loop():
y_in = helper.make_tensor_value_info("y_in", TensorProto.FLOAT, [])
y_out = helper.make_tensor_value_info("y_out", TensorProto.FLOAT, [])
scan_out = helper.make_tensor_value_info("scan_out", TensorProto.FLOAT, [])
cond_in = helper.make_tensor_value_info("cond_in", TensorProto.BOOL, [])
cond_out = helper.make_tensor_value_info("cond_out", TensorProto.BOOL, [])
iter_count = helper.make_tensor_value_info("iter_count", TensorProto.INT64, [])
y = np.array(-2).astype(np.float32)
iter_cast_node = helper.make_node(
"Cast", inputs=["iter_count"], outputs=["iter_cast"], to=onnx.TensorProto.FLOAT
)
y_add_node = helper.make_node("Add", inputs=["y_in", "iter_cast"], outputs=["y_out"])
identity_node = helper.make_node("Identity", inputs=["cond_in"], outputs=["cond_out"])
scan_identity_node = helper.make_node("Identity", inputs=["y_out"], outputs=["scan_out"])
loop_body = helper.make_graph(
[identity_node, iter_cast_node, y_add_node, scan_identity_node],
"loop_body",
[iter_count, cond_in, y_in],
[cond_out, y_out, scan_out],
)
loop_node = helper.make_node(
"Loop",
inputs=["trip_count", "cond", "y"],
outputs=["res_y", "res_scan"],
body=loop_body,
)
trip_count = np.array(5).astype(np.int64)
_ = np.array([13]).astype(np.float32)
cond = np.array(1).astype(bool)
loop_graph = onnx.helper.make_graph(
[loop_node],
"loop_outer",
inputs=[
onnx.helper.make_tensor_value_info("trip_count", onnx.TensorProto.INT64, []),
onnx.helper.make_tensor_value_info("cond", onnx.TensorProto.BOOL, []),
onnx.helper.make_tensor_value_info("y", onnx.TensorProto.FLOAT, []),
],
outputs=[
onnx.helper.make_tensor_value_info("res_y", onnx.TensorProto.FLOAT, []),
onnx.helper.make_tensor_value_info("res_scan", onnx.TensorProto.FLOAT, [5]),
],
)
loop_model = onnx.helper.make_model(loop_graph)
trip_count = np.array(5).astype(np.int64)
cond = np.array(1).astype(bool)
input_vals = [trip_count, cond, y]
verify_with_ort_with_inputs(
loop_model,
input_vals,
use_vm=True,
freeze_params=True,
opset=11,
target=target,
dev=dev,
)
def verify_tensor_loop(shapeless_output=False):
y_in = helper.make_tensor_value_info("y_in", TensorProto.FLOAT, [3, 3, 3, 3])
y_out = helper.make_tensor_value_info("y_out", TensorProto.FLOAT, [3, 3, 3, 3])
scan_out = helper.make_tensor_value_info("scan_out", TensorProto.FLOAT, [3, 3, 3, 3])
cond_in = helper.make_tensor_value_info("cond_in", TensorProto.BOOL, [])
cond_out = helper.make_tensor_value_info("cond_out", TensorProto.BOOL, [])
iter_count = helper.make_tensor_value_info("iter_count", TensorProto.INT64, [])
y = np.random.normal(size=[3, 3, 3, 3]).astype(np.float32)
iter_cast_node = helper.make_node(
"Cast", inputs=["iter_count"], outputs=["iter_cast"], to=onnx.TensorProto.FLOAT
)
y_add_node = helper.make_node("Add", inputs=["y_in", "iter_cast"], outputs=["y_out"])
identity_node = helper.make_node("Identity", inputs=["cond_in"], outputs=["cond_out"])
scan_identity_node = helper.make_node("Identity", inputs=["y_out"], outputs=["scan_out"])
loop_body = helper.make_graph(
[identity_node, iter_cast_node, y_add_node, scan_identity_node],
"loop_body",
[iter_count, cond_in, y_in],
[cond_out, y_out, scan_out],
)
loop_node = helper.make_node(
"Loop",
inputs=["trip_count", "cond", "y"],
outputs=["res_y", "res_scan"],
body=loop_body,
)
trip_count = np.array(5).astype(np.int64)
cond = np.array(1).astype(bool)
# Allow testing of malformed nodes since pytorch likes to create these.
if shapeless_output:
scan_shape = None
else:
scan_shape = [5, 3, 3, 3, 3]
loop_graph = onnx.helper.make_graph(
[loop_node],
"loop_outer",
inputs=[
onnx.helper.make_tensor_value_info("trip_count", onnx.TensorProto.INT64, []),
onnx.helper.make_tensor_value_info("cond", onnx.TensorProto.BOOL, []),
onnx.helper.make_tensor_value_info("y", onnx.TensorProto.FLOAT, [3, 3, 3, 3]),
],
outputs=[
onnx.helper.make_tensor_value_info("res_y", onnx.TensorProto.FLOAT, [3, 3, 3, 3]),
onnx.helper.make_tensor_value_info("res_scan", onnx.TensorProto.FLOAT, scan_shape),
],
)
loop_model = onnx.helper.make_model(loop_graph)
trip_count = np.array(5).astype(np.int64)
cond = np.array(1).astype(bool)
input_vals = [trip_count, cond, y]
verify_with_ort_with_inputs(
loop_model,
input_vals,
use_vm=True,
freeze_params=True,
opset=11,
target=target,
dev=dev,
)
# Test a loop that exits once a condition is met.
verify_cond_loop()
# Test a loop that exits after a fixed number of iterations with scalar outputs.
verify_count_loop()
# Test a loop that uses an array output.
verify_tensor_loop()
# Test a loop that is malformed and has no output shape defined.
verify_tensor_loop(shapeless_output=True)
@tvm.testing.parametrize_targets
def test_if(target, dev):
"""test_if"""
def verify_if(cond_array, num_outputs):
# Given a bool scalar input cond.
# return constant tensor x if cond is True, otherwise return constant tensor y.
def append_constant_nodes(nodes, outputs, expected, name):
outputs.append(onnx.helper.make_tensor_value_info(name, onnx.TensorProto.FLOAT, [5]))
expected.append(np.random.randn(5).astype("float32"))
nodes.append(
onnx.helper.make_node(
"Constant",
inputs=[],
outputs=[name],
value=numpy_helper.from_array(expected[-1]),
)
)
if_outputs = []
graph_outputs = []
then_nodes, then_outs, then_expected = [], [], []
else_nodes, else_outs, else_expected = [], [], []
for i in range(num_outputs):
append_constant_nodes(then_nodes, then_outs, then_expected, f"then_out{i}")
append_constant_nodes(else_nodes, else_outs, else_expected, f"else_out{i}")
if_outputs.append(f"res{i}")
graph_outputs.append(
onnx.helper.make_tensor_value_info(f"res{i}", onnx.TensorProto.FLOAT, [5]),
)
then_body = onnx.helper.make_graph(then_nodes, "then_body", [], then_outs)
else_body = onnx.helper.make_graph(else_nodes, "else_body", [], else_outs)
if_node = onnx.helper.make_node(
"If", inputs=["cond"], outputs=if_outputs, then_branch=then_body, else_branch=else_body
)
if_graph = onnx.helper.make_graph(
[if_node],
"if_outer",
inputs=[
onnx.helper.make_tensor_value_info("cond", onnx.TensorProto.BOOL, []),
],
outputs=graph_outputs,
)
if_model = onnx.helper.make_model(if_graph)
if cond_array:
cond = np.array([1]).astype("bool")
else:
cond = np.array(1).astype("bool")
correct_out = then_expected if cond else else_expected
# TODO(jwfromm): Onnxruntime 1.0.0 is buggy with If statements. Replace this with
# verify_with_ort once we update versions.
tvm_out = get_tvm_output_with_vm(if_model, [cond], target, dev, freeze_params=True)
if not isinstance(tvm_out, list):
tvm_out = [tvm_out]
for i, _ in enumerate(tvm_out):
tvm.testing.assert_allclose(
correct_out[i],
tvm_out[i], # pylint: disable=unnecessary-list-index-lookup
rtol=1e-05,
atol=1e-05,
)
# Confirm that if works with cond as an array or scalar.
verify_if(cond_array=False, num_outputs=1)
verify_if(cond_array=False, num_outputs=2)
verify_if(cond_array=True, num_outputs=1)
verify_if(cond_array=True, num_outputs=2)
@tvm.testing.parametrize_targets
def test_size(target, dev):
"""test_size"""
def verify_size(indata):
node = helper.make_node(
"Size",
inputs=["X"],
outputs=["Y"],
)
graph = helper.make_graph(
[node],
"size_test",
inputs=[helper.make_tensor_value_info("X", TensorProto.INT64, list(indata.shape))],
outputs=[helper.make_tensor_value_info("Y", TensorProto.INT64, [])],
)
model = helper.make_model(graph, producer_name="size_test")
verify_with_ort_with_inputs(
model, [indata], dtype="int64", use_vm=True, opset=11, target=target, dev=dev
)
input_data = np.array([[1, 0], [1, 1]], dtype=np.int64)
verify_size(input_data)
input_data = np.array([[3, 0, 0], [0, 4, 0], [5, 6, 0]], dtype=np.int64)
verify_size(input_data)
@tvm.testing.parametrize_targets
def test_maxunpool(target, dev):
"""test_maxunpool"""
def verify_maxunpool(data, indices, kernel_shape, strides, output_shape=None, pads=None):
input_names = ["xT", "xI"]
input_info = [
helper.make_tensor_value_info("xT", TensorProto.FLOAT, list(data.shape)),
helper.make_tensor_value_info("xI", TensorProto.INT64, list(indices.shape)),
]
input_values = [data, indices]
if output_shape is not None:
input_names.append("output_shape")
input_info.append(
helper.make_tensor_value_info(
"output_shape", TensorProto.INT64, list(output_shape.shape)
)
)
input_values.append(output_shape)
else:
# Compute expected output shape
output_shape = np.asarray(([1, 1] + list(strides))) * np.asarray(list(data.shape))
output_shape += np.asarray(([0, 0] + list(kernel_shape))) - np.asarray(
([0, 0] + list(strides))
)
if pads is not None:
output_shape -= np.asarray(
[0, 0] + list(np.sum(np.reshape(list(pads), [-1, 2]), axis=-1))
)
output_shape = [int(i) for i in output_shape]
node = helper.make_node(
"MaxUnpool", inputs=input_names, outputs=["y"], kernel_shape=kernel_shape
)
if pads is not None:
pad_attr = helper.make_attribute("pads", pads)
node.attribute.append(pad_attr)
if strides is not None:
strides_attr = helper.make_attribute("strides", strides)
node.attribute.append(strides_attr)
graph = helper.make_graph(
[node],
"maxunpool_test",
inputs=input_info,
outputs=[helper.make_tensor_value_info("y", TensorProto.FLOAT, output_shape)],
)
model = helper.make_model(graph, producer_name="size_test")
verify_with_ort_with_inputs(
model, input_values, use_vm=True, opset=11, target=target, dev=dev
)
# Basic test
x_t = np.array([[[[5, 6], [7, 8]]]], dtype=np.float32)
x_i = np.array([[[[0, 7], [13, 15]]]], dtype=np.int64)
verify_maxunpool(x_t, x_i, [2, 2], strides=[2, 2])
# Small stride
verify_maxunpool(x_t, x_i, [2, 2], strides=[1, 1])
# Big kernel
verify_maxunpool(x_t, x_i, [3, 3], strides=[2, 2])
# With output shape
output_shape = np.array((1, 1, 5, 5), dtype=np.int64)
verify_maxunpool(x_t, x_i, [2, 2], strides=[2, 2], output_shape=output_shape)
# With explicit reverse padding
pads = np.asarray([1, 1, 1, 1]).astype(np.int64)
verify_maxunpool(x_t, x_i, [2, 2], strides=[2, 2], pads=pads)
@tvm.testing.parametrize_targets
def test_softplus(target, dev):
"""test_softplus"""
def verify_softplus(indata):
node = helper.make_node(
"Softplus",
inputs=["X"],
outputs=["Y"],
)
graph = helper.make_graph(
[node],
"softplus_test",
inputs=[helper.make_tensor_value_info("X", TensorProto.FLOAT, list(indata.shape))],
outputs=[helper.make_tensor_value_info("Y", TensorProto.FLOAT, list(indata.shape))],
)
model = helper.make_model(graph, producer_name="softplus_test")
verify_with_ort_with_inputs(
model, [indata], dtype="float32", use_vm=True, opset=11, target=target, dev=dev
)
# Simple case with all signs.
input_data = np.array([[-1, 0, 1]], dtype=np.float32)
verify_softplus(input_data)
# More fancy case.
input_data = np.random.randn(1, 32, 32, 3).astype("float32")
verify_softplus(input_data)
@tvm.testing.parametrize_targets
def test_cumsum(target, dev):
"""test_cumsum"""
def verify_cumsum(indata, axis, exclusive=0, reverse=0, dtype="float32"):
cumsum_node = onnx.helper.make_node(
"CumSum",
inputs=["X", "axis"],
outputs=["Y"],
)
if exclusive != 0:
exclusive_attr = helper.make_attribute("exclusive", exclusive)
cumsum_node.attribute.append(exclusive_attr)
if reverse != 0:
reverse_attr = helper.make_attribute("reverse", reverse)
cumsum_node.attribute.append(reverse_attr)
nodes = [
make_constant_node("axis", onnx.TensorProto.INT32, [1], [axis]),
cumsum_node,
]
if dtype == "float32":
tensor_type = TensorProto.FLOAT
else:
tensor_type = TensorProto.INT32
dtype = "int32"
graph = helper.make_graph(
nodes,
"cumsum_test",
inputs=[
helper.make_tensor_value_info("X", tensor_type, list(indata.shape)),
],
outputs=[helper.make_tensor_value_info("Y", tensor_type, list(indata.shape))],
)
model = helper.make_model(graph, producer_name="cumsum_test")
verify_with_ort_with_inputs(
model, [indata], dtype=dtype, use_vm=True, opset=11, target=target, dev=dev
)
data = (
np.array(
[
1.0,
2.0,
3.0,
4.0,
5.0,
6.0,
7.0,
8.0,
9.0,
10.0,
11.0,
12.0,
]
)
.astype(np.float32)
.reshape((3, 4))
)
verify_cumsum(data, 0)
verify_cumsum(data, 1)
verify_cumsum(data, 0, 1, 0)
verify_cumsum(data, 1, 1, 0)
verify_cumsum(data, 0, 0, 1)
verify_cumsum(data, 1, 0, 1)
verify_cumsum(data, 1, 1, 1)
data = np.random.randn(1, 32, 32, 3).astype("float32")
verify_cumsum(data, 1)
data = np.random.randn(1, 32, 32, 3).astype("int32")
verify_cumsum(data, 0, dtype="int32")
verify_cumsum(data, 1, dtype="int32")
verify_cumsum(data, 0, 1, 0, dtype="int32")
verify_cumsum(data, 1, 1, 0, dtype="int32")
verify_cumsum(data, 0, 0, 1, dtype="int32")
verify_cumsum(data, 1, 0, 1, dtype="int32")
verify_cumsum(data, 1, 1, 1, dtype="int32")
@tvm.testing.parametrize_targets
def test_eyelike(target, dev):
"""test_eyelike"""
def verify_eyelike(indata, dynamic=False):
node_list = []
eyelike_inputs = ["X"]
input_node_list = [
helper.make_tensor_value_info("X", TensorProto.FLOAT, list(indata.shape))
]
input_list = [indata]
if dynamic:
input_node_list.append(
helper.make_tensor_value_info("shape", TensorProto.INT64, [len(indata.shape)])
)
input_list.append(np.asarray(indata.shape))
reshape_node = helper.make_node("Reshape", ["X", "shape"], ["X_dyn"])
eyelike_inputs[0] = "X_dyn"
node_list += [reshape_node]
node = helper.make_node(
"EyeLike",
inputs=eyelike_inputs,
outputs=["Y"],
)
node_list.append(node)
graph = helper.make_graph(
node_list,
"eyelike_test",
inputs=input_node_list,
outputs=[helper.make_tensor_value_info("Y", TensorProto.FLOAT, list(indata.shape))],
)
model = helper.make_model(graph, producer_name="eyelike_test")
verify_with_ort_with_inputs(
model, input_list, dtype="float32", opset=9, target=target, dev=dev, use_vm=True
)
input_data = np.zeros((5, 5), dtype=np.float32)
verify_eyelike(input_data)
verify_eyelike(input_data, True)
# The following parametrized tests loads the tests that ONNX ships as
# serialized ONNX files, inputs, and outputs. The goal of this test
# is to ensure the ONNX importer is in line with the ONNX specification.
# To allow these tests to run in CI before all pass, a number of tests
# that are not yet supported are skipped.
onnx_test_node_dir = os.path.join(os.path.dirname(onnx.__file__), "backend", "test", "data", "node")
onnx_test_folders = sorted(
dirname
for dirname in os.listdir(onnx_test_node_dir)
if dirname.startswith("test") and os.path.isdir(os.path.join(onnx_test_node_dir, dirname))
)
unsupported_onnx_tests = [
"test_batchnorm_epsilon_training_mode",
"test_batchnorm_example_training_mode",
"test_bernoulli",
"test_bernoulli_expanded",
"test_bernoulli_double",
"test_bernoulli_double_expanded",
"test_bernoulli_seed",
"test_bernoulli_seed_expanded",
"test_blackmanwindow",
"test_blackmanwindow_expanded",
"test_blackmanwindow_symmetric",
"test_blackmanwindow_symmetric_expanded",
# the follow cast and castlike cases have lowering issues
"test_cast_FLOAT_to_STRING",
"test_cast_STRING_to_FLOAT",
"test_castlike_FLOAT_to_STRING",
"test_castlike_FLOAT_to_STRING_expanded",
"test_castlike_STRING_to_FLOAT",
"test_castlike_STRING_to_FLOAT_expanded",
# the following cast and castlike cases segfault
"test_cast_DOUBLE_to_FLOAT16",
"test_castlike_DOUBLE_to_FLOAT16",
"test_castlike_DOUBLE_to_FLOAT16_expanded",
"test_convtranspose_autopad_same",
"test_convtranspose_dilations",
"test_cumsum_1d",
"test_cumsum_1d_exclusive",
"test_cumsum_1d_reverse",
"test_cumsum_1d_reverse_exclusive",
"test_cumsum_2d_axis_0",
"test_cumsum_2d_axis_1",
"test_cumsum_2d_negative_axis",
"test_det_2d",
"test_det_nd",
"test_dropout_default",
"test_dropout_default_mask",
"test_dropout_default_mask_ratio",
"test_dropout_default_ratio",
"test_gru_batchwise",
"test_hammingwindow",
"test_hammingwindow_expanded",
"test_hammingwindow_symmetric",
"test_hammingwindow_symmetric_expanded",
"test_hannwindow",
"test_hannwindow_expanded",
"test_hannwindow_symmetric",
"test_hannwindow_symmetric_expanded",
"test_identity_opt",
"test_identity_sequence",
"test_if_opt",
"test_if_seq",
"test_loop13_seq",
"test_loop16_seq_none",
"test_lstm_batchwise",
"test_maxpool_with_argmax_2d_precomputed_pads",
"test_maxpool_with_argmax_2d_precomputed_strides",
"test_maxunpool_export_with_output_shape",
"test_melweightmatrix",
# This test fails llvm with a lowering error:
"test_nllloss_NCd1d2d3_none_no_weight_negative_ii_expanded",
"test_qlinearmatmul_3D",
"test_range_float_type_positive_delta_expanded",
"test_range_int32_type_negative_delta_expanded",
"test_reduce_sum_do_not_keepdims_example",
"test_reduce_sum_do_not_keepdims_random",
"test_reduce_sum_keepdims_example",
"test_reduce_sum_keepdims_random",
"test_reduce_sum_negative_axes_keepdims_example",
"test_reduce_sum_negative_axes_keepdims_random",
"test_roialign_aligned_true",
"test_sequence_insert_at_back",
"test_sequence_insert_at_front",
"test_sequence_map_add_1_sequence_1_tensor",
"test_sequence_map_add_1_sequence_1_tensor_expanded",
"test_sequence_map_add_2_sequences",
"test_sequence_map_add_2_sequences_expanded",
"test_sequence_map_extract_shapes",
"test_sequence_map_extract_shapes_expanded",
"test_sequence_map_identity_1_sequence",
"test_sequence_map_identity_1_sequence_1_tensor",
"test_sequence_map_identity_1_sequence_1_tensor_expanded",
"test_sequence_map_identity_1_sequence_expanded",
"test_sequence_map_identity_2_sequences",
"test_sequence_map_identity_2_sequences_expanded",
"test_simple_rnn_batchwise",
"test_simple_rnn_defaults",
"test_simple_rnn_with_initial_bias",
"test_split_variable_parts_1d",
"test_split_variable_parts_2d",
"test_split_variable_parts_default_axis",
"test_split_zero_size_splits",
"test_stft",
"test_stft_with_window",
"test_strnormalizer_export_monday_casesensintive_lower",
"test_strnormalizer_export_monday_casesensintive_nochangecase",
"test_strnormalizer_export_monday_casesensintive_upper",
"test_strnormalizer_export_monday_empty_output",
"test_strnormalizer_export_monday_insensintive_upper_twodim",
"test_strnormalizer_nostopwords_nochangecase",
"test_tfidfvectorizer_tf_batch_onlybigrams_skip0",
"test_tfidfvectorizer_tf_batch_onlybigrams_skip5",
"test_tfidfvectorizer_tf_batch_uniandbigrams_skip5",
"test_tfidfvectorizer_tf_only_bigrams_skip0",
"test_tfidfvectorizer_tf_onlybigrams_levelempty",
"test_tfidfvectorizer_tf_onlybigrams_skip5",
"test_tfidfvectorizer_tf_uniandbigrams_skip5",
"test_training_dropout",
"test_training_dropout_default",
"test_training_dropout_default_mask",
"test_training_dropout_mask",
"test_training_dropout_zero_ratio",
"test_training_dropout_zero_ratio_mask",
"test_tril_zero",
"test_triu_zero",
"test_unique_sorted_with_axis",
"test_unique_sorted_with_axis_3d",
"test_unique_sorted_with_negative_axis",
"test_upsample_nearest",
]
target_skips = {
"cuda": [
"test_range_float_type_positive_delta_expanded",
"test_range_int32_type_positive_delta_expanded",
"test_mod_mixed_sign_float16",
"test_qlinearconv",
"test_qlinearmatmul",
"test_resize_upsample_sizes_nearest",
]
}
def _load_proto(proto_filename, target_list, model_type_proto):
with open(proto_filename, "rb") as fin:
protobuf_content = fin.read()
if model_type_proto.HasField("sequence_type"):
sequence = onnx.SequenceProto()
sequence.ParseFromString(protobuf_content)
target_list.append(numpy_helper.to_list(sequence))
elif model_type_proto.HasField("tensor_type"):
tensor = onnx.TensorProto()
tensor.ParseFromString(protobuf_content)
target_list.append(numpy_helper.to_array(tensor))
elif model_type_proto.HasField("optional_type"):
optional = onnx.OptionalProto()
optional.ParseFromString(protobuf_content)
target_list.append(numpy_helper.to_optional(optional))
else:
raise ValueError(
"Loading proto of that specific type (Map/Sparse Tensor) is currently not supported"
)
@pytest.mark.parametrize("onnx_test", onnx_test_folders)
@tvm.testing.parametrize_targets
def test_onnx_nodes(target, dev, onnx_test):
"""test_onnx_nodes"""
if platform.machine() == "aarch64" and onnx_test == "test_resize_upsample_sizes_nearest":
pytest.skip("Currently failing on AArch64")
target_kind = tvm.target.Target(target).kind.name
if onnx_test in unsupported_onnx_tests:
pytest.skip(f"Onnx test '{onnx_test}' not yet supported by TVM")
target_specific_skips = target_skips.get(target_kind, [])
if onnx_test in target_specific_skips:
pytest.skip(f"Onnx test '{onnx_test}' not yet supported by TVM on {target_kind} targets")
test_dir = os.path.join(onnx_test_node_dir, onnx_test)
atol = 1e-5
rtol = 1e-5
if "roialign" in test_dir:
# for some reason the ONNX test crops the
# roialign results to 4 decimal places
atol = 1e-4
if "to_BFLOAT16" in test_dir:
# the tolerance here is for the comparison in uint16 space, but is not as significant
# of a delta in bfloat16 space because it's representing the mantissa being off by 1
atol = 1
if "_sce_" in test_dir:
# complicated loss functions like SoftmaxCrossEntropy can have minor variations
# in accuracy depending on implementation
atol = 1e-4
if "bicubic" in test_dir:
# satisfies onnx precision for bicubic interpolation
atol = 1e-4
if "dft" in test_dir:
atol = 1e-3
model = onnx.load(os.path.join(test_dir, "model.onnx"))
for test_data_dir in glob.glob(os.path.join(test_dir, "test_data_set*")):
inputs = []
n_inputs = len(glob.glob(os.path.join(test_data_dir, "input_*.pb")))
for i in range(n_inputs):
input_file = os.path.join(test_data_dir, f"input_{i}.pb")
_load_proto(input_file, inputs, model.graph.input[i].type)
outputs = []
n_outputs = len(glob.glob(os.path.join(test_data_dir, "output_*.pb")))
for i in range(n_outputs):
output_file = os.path.join(test_data_dir, f"output_{i}.pb")
_load_proto(output_file, outputs, model.graph.output[i].type)
tvm_val = get_tvm_output_with_vm(model, inputs, target, dev)
if len(outputs) == 1:
tvm.testing.assert_allclose(outputs[0], tvm_val, rtol=rtol, atol=atol)
else:
for output, val in zip(outputs, tvm_val):
tvm.testing.assert_allclose(output, val, rtol=rtol, atol=atol)
def test_wrong_input():
"""test_wrong_input"""
node = helper.make_node(
"Softplus",
inputs=["X"],
outputs=["Y"],
)
graph = helper.make_graph(
[node],
"softplus_test",
inputs=[helper.make_tensor_value_info("X", TensorProto.FLOAT, list([5]))],
outputs=[helper.make_tensor_value_info("Y", TensorProto.FLOAT, list([5]))],
)
model = helper.make_model(graph, producer_name="softplus_test")
# Check that the graph can import correctly with proper shape definitions.
correct_shape_dict = {"X": [5]}
relay.frontend.from_onnx(model, shape=correct_shape_dict)
# Check that an assertion is triggered when an input not in the graph is provided.
wrong_shape_dict = {"Z": [5]}
with pytest.raises(AssertionError):
relay.frontend.from_onnx(model, shape=wrong_shape_dict)
@pytest.mark.skip(reason="unsupported op numel")
@tvm.testing.parametrize_targets
def test_aten(target, dev):
"""test_aten"""
torch.set_grad_enabled(False)
def _convert_to_onnx(model, inputs):
file_name = "aten_model.onnx"
torch.onnx.export(
model,
inputs,
file_name,
export_params=True,
verbose=False,
opset_version=10,
operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN,
)
onnx_model = onnx.load(file_name)
return onnx_model
def verify_embedding_bag(num_embedding, embedding_dim, data_shape, num_bags=None):
dummy_data = torch.randint(0, num_embedding - 1, data_shape)
tvm_inputs = [dummy_data.numpy()]
model = torch.nn.EmbeddingBag(num_embedding, embedding_dim)
onnx_model = _convert_to_onnx(model, dummy_data)
torch_out = model(dummy_data)
tvm_out = get_tvm_output_with_vm(
onnx_model,
tvm_inputs,
freeze_params=True,
target=target,
dev=dev,
)
tvm.testing.assert_allclose(torch_out.numpy(), tvm_out, atol=5e-7)
verify_embedding_bag(10, 3, [2, 10])
verify_embedding_bag(32, 2, [3, 3])
@tvm.testing.parametrize_targets
def test_index_put(target, dev):
"""test_index_put"""
class IndexPutModel(torch.nn.Module):
def __init__(self, indices, values, accumulate):
super().__init__()
self.indices = indices
self.values = values
self.accumulate = accumulate
def forward(self, x):
return x.index_put(self.indices, self.values, self.accumulate)
def _convert_to_onnx(model, dummy_data):
file_name = "aten_model.onnx"
torch.onnx.export(
model,
dummy_data,
file_name,
export_params=True,
verbose=False,
opset_version=11,
operator_export_type=torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK,
)
onnx_model = onnx.load(file_name)
return onnx_model
def verify_index_put(data_shape, indices, accumulate):
dummy_data = torch.ones(data_shape)
tvm_inputs = [dummy_data.numpy()]
values = torch.rand(indices[0].size())
model = IndexPutModel(indices, values, accumulate)
onnx_model = _convert_to_onnx(model, dummy_data)
torch_out = model(dummy_data)
tvm_out = get_tvm_output_with_vm(onnx_model, tvm_inputs, target, dev, freeze_params=True)
tvm.testing.assert_allclose(torch_out.numpy(), tvm_out)
shape = (3, 5)
xidx = torch.tensor([0, 1, 2, 2])
yidx = torch.tensor([0, 1, 3, 4])
verify_index_put(shape, [xidx, yidx], True)
shape = (3, 5, 3)
xidx = torch.tensor([0, 1, 2, 2, 0])
yidx = torch.tensor([0, 1, 3, 4, 0])
zidx = torch.tensor([0, 1, 1, 2, 0])
verify_index_put(shape, [xidx, yidx, zidx], False)
def verify_index_put_slice(data_shape, value_shape, accumulate):
dummy_data = torch.ones(data_shape)
tvm_inputs = [dummy_data.numpy()]
indices = []
index_shape = [1] * len(value_shape)
index_shape[0] = -1
for _, v_shape in enumerate(value_shape):
indices.append(torch.arange(0, v_shape).reshape(tuple(index_shape)))
index_shape.pop()
values = torch.rand(value_shape)
model = IndexPutModel(indices, values, accumulate)
onnx_model = _convert_to_onnx(model, dummy_data)
torch_out = model(dummy_data)
tvm_out = get_tvm_output_with_vm(onnx_model, tvm_inputs, target, dev, freeze_params=True)
tvm.testing.assert_allclose(torch_out.numpy(), tvm_out)
verify_index_put_slice((3, 3), (2, 2), False)
verify_index_put_slice((2, 3, 4), (1, 2, 3), True)
verify_index_put_slice((2, 3, 4, 5), (1, 2, 3, 1), False)
@tvm.testing.parametrize_targets
def test_reverse_sequence(target, dev):
"""test_reverse_sequence"""
def verify_reverse_sequence(x, sequence_lens, batch_axis, time_axis):
node = onnx.helper.make_node(
"ReverseSequence",
inputs=["x", "sequence_lens"],
outputs=["y"],
time_axis=time_axis,
batch_axis=batch_axis,
)
graph = helper.make_graph(
[node],
"reverse_sequence_test",
inputs=[
helper.make_tensor_value_info("x", TensorProto.FLOAT, list(x.shape)),
helper.make_tensor_value_info(
"sequence_lens", TensorProto.INT64, list(sequence_lens.shape)
),
],
outputs=[helper.make_tensor_value_info("y", TensorProto.FLOAT, list(x.shape))],
)
model = helper.make_model(graph, producer_name="reverse_sequence_test")
verify_with_ort_with_inputs(model, [x, sequence_lens], [x.shape], target=target, dev=dev)
x = np.array(
[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]],
dtype=np.float32,
)
sequence_lens = np.array([1, 2, 3, 4], dtype=np.int64)
verify_reverse_sequence(x, sequence_lens, 0, 1)
sequence_lens = np.array([4, 3, 2, 1], dtype=np.int64)
verify_reverse_sequence(x, sequence_lens, 1, 0)
@pytest.mark.parametrize("op_name", ["Gelu", "FastGelu"], scope="session")
@pytest.mark.parametrize("data_type", ["float16", "float32"], scope="session")
@tvm.testing.parametrize_targets
def test_gelu(target, dev, data_type, op_name):
"""test_gelu"""
dtype = np.dtype(data_type)
tensor_type = mapping.NP_TYPE_TO_TENSOR_TYPE[dtype]
absolute_tolerance = 1e-3 if data_type == "float16" else 1e-5
def verify_gelu(x):
node = onnx.helper.make_node(
op_name,
inputs=["x"],
outputs=["y"],
domain="com.microsoft",
)
graph = helper.make_graph(
[node],
f"{op_name}_test",
inputs=[helper.make_tensor_value_info("x", tensor_type, list(x.shape))],
outputs=[helper.make_tensor_value_info("y", tensor_type, list(x.shape))],
)
model = helper.make_model(graph, producer_name=f"{op_name}_test")
verify_with_ort_with_inputs(
model, [x], [x.shape], atol=absolute_tolerance, dtype=data_type, target=target, dev=dev
)
x = np.array([-1.0, 0, 1.0, 100.0, -100.0, 1000.0, -1000.0], dtype=dtype)
verify_gelu(x)
x = np.array([[1, 2], [3, 4]], dtype=dtype)
verify_gelu(x)
@pytest.mark.parametrize("op_name", ["BiasGelu", "FastGelu"], scope="session")
@pytest.mark.parametrize("data_type", ["float16", "float32"], scope="session")
@tvm.testing.parametrize_targets
def test_biasgelu(target, dev, data_type, op_name):
"""test_biasgelu"""
dtype = np.dtype(data_type)
tensor_type = mapping.NP_TYPE_TO_TENSOR_TYPE[dtype]
absolute_tolerance = 1e-2 if data_type == "float16" else 1e-5
def verify_biasgelu(x, bias):
node = onnx.helper.make_node(
op_name,
inputs=["x", "bias"],
outputs=["y"],
domain="com.microsoft",
)
graph = helper.make_graph(
[node],
f"{op_name}_test",
inputs=[
helper.make_tensor_value_info("x", tensor_type, list(x.shape)),
helper.make_tensor_value_info("bias", tensor_type, list(bias.shape)),
],
outputs=[helper.make_tensor_value_info("y", tensor_type, list(x.shape))],
)
model = helper.make_model(graph, producer_name=f"{op_name}_test")
verify_with_ort_with_inputs(
model,
[x, bias],
[x.shape],
atol=absolute_tolerance,
dtype=data_type,
target=target,
dev=dev,
)
x = np.array([-1.0, 0, 1.0, 100.0, -100.0, 1000.0, -1000.0], dtype=dtype)
bias = np.repeat(2.0, 7).astype(dtype)
verify_biasgelu(x, bias)
x = np.array([[1, 2], [3, 4]], dtype=dtype)
bias = np.array([0.3, 4.0], dtype=dtype)
verify_biasgelu(x, bias)
@tvm.testing.parametrize_targets
def test_embedlayernormalization(target, dev):
"""test_embedlayernormalization"""
def verify_embedlayernormalization(
input_ids,
segment_ids,
word_embedding,
position_embedding,
segment_embedding,
gamma,
beta,
):
node = onnx.helper.make_node(
"EmbedLayerNormalization",
inputs=[
"input_ids",
"" if segment_ids is None else "segment_ids",
"word_embedding",
"position_embedding",
"" if segment_embedding is None else "segment_embedding",
"gamma",
"beta",
],
outputs=["output", "mask_index"],
domain="com.microsoft",
)
node.attribute.append(onnx.helper.make_attribute("epsilon", 1e-4))
segment_ids_shape = [] if segment_ids is None else segment_ids.shape
segment_embedding_shape = [] if segment_embedding is None else segment_embedding.shape
graph = helper.make_graph(
[node],
"embedlayernormalization_test",
inputs=[
helper.make_tensor_value_info(
"input_ids", TensorProto.INT32, list(input_ids.shape)
),
helper.make_tensor_value_info("segment_ids", TensorProto.INT32, segment_ids_shape),
helper.make_tensor_value_info(
"word_embedding", TensorProto.FLOAT, list(word_embedding.shape)
),
helper.make_tensor_value_info(
"position_embedding", TensorProto.FLOAT, list(position_embedding.shape)
),
helper.make_tensor_value_info(
"segment_embedding", TensorProto.FLOAT, segment_embedding_shape
),
helper.make_tensor_value_info("gamma", TensorProto.FLOAT, list(gamma.shape)),
helper.make_tensor_value_info("beta", TensorProto.FLOAT, list(beta.shape)),
],
outputs=[
helper.make_tensor_value_info(
"output", TensorProto.FLOAT, list((batch_size, sequence_length, hidden_size))
),
helper.make_tensor_value_info("mask_index", TensorProto.INT32, [batch_size]),
],
)
model = helper.make_model(graph, producer_name="embedlayernormalization_test")
# TODO(@anwang2009): onnxruntime v1.9.0 requires empty list for optional argument,
# but v1.10.0+ requires None instead.
verify_with_ort_with_inputs(
model,
[
input_ids,
np.empty(0, dtype="int32") if segment_ids is None else segment_ids,
word_embedding,
position_embedding,
np.empty(0, dtype="float32") if segment_embedding is None else segment_embedding,
gamma,
beta,
],
[
(batch_size, sequence_length, hidden_size),
batch_size,
],
target=target,
dev=dev,
rtol=1e-4,
atol=1e-4,
)
hidden_size = 384
batch_size = 4
sequence_length = 3
vocab_size = 5
input_ids = np.full((batch_size, sequence_length), 3).astype("int32")
segment_ids = np.zeros((batch_size, sequence_length)).astype("int32")
word_embedding = np.full((vocab_size, hidden_size), 1).astype("float32")
position_embedding = np.full((sequence_length, hidden_size), 2).astype("float32")
segment_embedding = np.full((vocab_size, hidden_size), 3).astype("float32")
gamma = np.random.uniform(0.5, 0.7, hidden_size).astype("float32")
beta = np.random.randn(hidden_size).astype("float32") * 0.1
verify_embedlayernormalization(
input_ids, segment_ids, word_embedding, position_embedding, segment_embedding, gamma, beta
)
# Test with undefined segment embedding
verify_embedlayernormalization(
input_ids, None, word_embedding, position_embedding, None, gamma, beta
)
@tvm.testing.parametrize_targets
def test_attention(target, dev):
"""test_attention"""
def verify_attention(_unidirectional, _input, _weight, _bias, _mask_index=None, _past=None):
input_names = ["input", "weight", "bias"]
if _mask_index is not None:
input_names.append("mask_index")
if _past is not None:
input_names.append("past")
node = onnx.helper.make_node(
"Attention",
inputs=input_names,
outputs=["output", "present"],
domain="com.microsoft",
num_heads=num_heads,
unidirectional=_unidirectional,
)
past_shape = (2, batch_size, num_heads, past_sequence_length, head_size)
present_output_shape = (2, batch_size, num_heads, sequence_length, head_size)
inputs_info = [
helper.make_tensor_value_info("input", TensorProto.FLOAT, list(_input.shape)),
helper.make_tensor_value_info("weight", TensorProto.FLOAT, list(_weight.shape)),
helper.make_tensor_value_info("bias", TensorProto.FLOAT, list(_bias.shape)),
]
if _mask_index is not None:
inputs_info.append(
helper.make_tensor_value_info(
"mask_index", TensorProto.INT32, list(_mask_index.shape)
),
)
if _past is not None:
inputs_info.append(
helper.make_tensor_value_info("past", TensorProto.FLOAT, list(past_shape))
)
graph = helper.make_graph(
[node],
"attention_test",
inputs=inputs_info,
outputs=[
helper.make_tensor_value_info("output", TensorProto.FLOAT, list(_input.shape)),
helper.make_tensor_value_info(
"present", TensorProto.FLOAT, list(present_output_shape)
),
],
)
model = helper.make_model(graph, producer_name="attention_test")
inputs = [_input, _weight, _bias]
if _mask_index is not None:
inputs.append(_mask_index)
if _past is not None:
inputs.append(_past)
# "present" output should be nullptr when the "past" input isn't included,
# but ort requires an output shape to be specified?
verify_with_ort_with_inputs(
model,
inputs,
[_input.shape, present_output_shape],
target=target,
dev=dev,
rtol=1e-4,
atol=1e-4,
)
batch_size = 11
num_heads = 13
head_size = 37
sequence_length = 7
input_hidden_size = 147
weight_hidden_size = num_heads * head_size
past_sequence_length = 17
total_sequence_length = past_sequence_length + sequence_length
# Required inputs
input_array = np.random.normal(size=(batch_size, sequence_length, input_hidden_size)).astype(
"float32"
)
weight = (
np.random.normal(size=(input_hidden_size, 3 * weight_hidden_size)).astype("float32") * 0.1
)
bias = np.random.randn(3 * weight_hidden_size).astype("float32")
# Optional inputs
past = np.random.random((2, batch_size, num_heads, past_sequence_length, head_size)).astype(
"float32"
)
for unidirectional in [0, 1]:
for have_past in [False, True]:
if not have_past:
mask_index = np.random.randint(0, 2, (batch_size, sequence_length)).astype("int32")
verify_attention(unidirectional, input_array, weight, bias, mask_index)
else:
mask_index = np.random.randint(0, 2, (batch_size, total_sequence_length)).astype(
"int32"
)
verify_attention(unidirectional, input_array, weight, bias, mask_index, past)
@tvm.testing.parametrize_targets
def test_qattention(target, dev):
"""test_qattention"""
def verify_attention(
_unidirectional,
_input,
_weight,
_bias,
_input_scale,
_weight_scale,
_mask_index=None,
_input_zero_point=None,
_weight_zero_point=None,
_past=None,
):
input_names = ["input", "weight", "bias", "input_scale", "weight_scale"]
if _mask_index is not None:
input_names.append("mask_index")
if _input_zero_point is not None:
input_names.append("input_zero_point")
if _weight_zero_point is not None:
input_names.append("weight_zero_point")
if _past is not None:
input_names.append("past")
node = onnx.helper.make_node(
"QAttention",
inputs=input_names,
outputs=["output", "present"],
domain="com.microsoft",
num_heads=num_heads,
unidirectional=_unidirectional,
)
past_shape = (2, batch_size, num_heads, past_sequence_length, head_size)
present_output_shape = (
2,
batch_size,
num_heads,
past_sequence_length + sequence_length,
head_size,
)
inputs_info = [
helper.make_tensor_value_info("input", TensorProto.UINT8, list(_input.shape)),
helper.make_tensor_value_info("weight", TensorProto.UINT8, list(_weight.shape)),
helper.make_tensor_value_info("bias", TensorProto.FLOAT, list(_bias.shape)),
helper.make_tensor_value_info("input_scale", TensorProto.FLOAT, ()),
helper.make_tensor_value_info("weight_scale", TensorProto.FLOAT, ()),
]
if _mask_index is not None:
inputs_info.append(
helper.make_tensor_value_info(
"mask_index", TensorProto.INT32, list(_mask_index.shape)
)
)
if _input_zero_point is not None:
inputs_info.append(
helper.make_tensor_value_info("input_zero_point", TensorProto.UINT8, ())
)
if _weight_zero_point is not None:
inputs_info.append(
helper.make_tensor_value_info("weight_zero_point", TensorProto.UINT8, ())
)
if _past is not None:
inputs_info.append(
helper.make_tensor_value_info("past", TensorProto.FLOAT, list(past_shape))
)
graph = helper.make_graph(
[node],
"qattention_test",
inputs=inputs_info,
outputs=[
helper.make_tensor_value_info("output", TensorProto.FLOAT, list(_input.shape)),
helper.make_tensor_value_info(
"present", TensorProto.FLOAT, list(present_output_shape)
),
],
)
model = helper.make_model(graph, producer_name="qattention_test")
inputs = [_input, _weight, _bias, _input_scale, _weight_scale]
if _mask_index is not None:
inputs.append(_mask_index)
if _input_zero_point is not None:
inputs.append(_input_zero_point)
if _weight_zero_point is not None:
inputs.append(_weight_zero_point)
if _past is not None:
inputs.append(_past)
verify_with_ort_with_inputs(
model,
inputs,
[_input.shape, present_output_shape],
target=target,
dev=dev,
rtol=1e-3,
atol=1e-3,
)
batch_size = 11
num_heads = 13
head_size = 37
sequence_length = 7
input_hidden_size = 147
weight_hidden_size = num_heads * head_size
past_sequence_length = 17
total_sequence_length = past_sequence_length + sequence_length
# Required inputs
input_array = np.random.randint(
0, 255, (batch_size, sequence_length, input_hidden_size)
).astype("uint8")
weight = np.random.randint(0, 255, (input_hidden_size, 3 * weight_hidden_size)).astype("uint8")
bias = np.random.randn(3 * weight_hidden_size).astype("float32")
input_scale = np.random.random(1).astype("float32")
weight_scale = np.random.random(1).astype("float32")
# Optional inputs
input_zero_point = np.random.randint(0, 255, 1).astype("uint8")
weight_zero_point = np.random.randint(0, 255, 1).astype("uint8")
past = np.random.random((2, batch_size, num_heads, past_sequence_length, head_size)).astype(
"float32"
)
for unidirectional in [0, 1]:
for have_past in [False, True]:
if not have_past:
mask_index = np.random.randint(0, 2, (batch_size, sequence_length)).astype("int32")
verify_attention(
unidirectional,
input_array,
weight,
bias,
input_scale,
weight_scale,
mask_index,
)
verify_attention(
unidirectional,
input_array,
weight,
bias,
input_scale,
weight_scale,
mask_index,
input_zero_point,
)
verify_attention(
unidirectional,
input_array,
weight,
bias,
input_scale,
weight_scale,
mask_index,
input_zero_point,
weight_zero_point,
)
else:
mask_index = np.random.randint(0, 2, (batch_size, total_sequence_length)).astype(
"int32"
)
verify_attention(
unidirectional,
input_array,
weight,
bias,
input_scale,
weight_scale,
mask_index,
input_zero_point,
weight_zero_point,
past,
)
@tvm.testing.parametrize_targets
def test_skiplayernormalization(target, dev):
"""test_skiplayernormalization"""
def verify_skiplayernormalization(input_, skip, gamma, beta, bias):
node = onnx.helper.make_node(
"SkipLayerNormalization",
inputs=["input", "skip", "gamma", "beta", "bias"],
outputs=["output"],
domain="com.microsoft",
)
node.attribute.append(onnx.helper.make_attribute("epsilon", 1e-4))
graph = helper.make_graph(
[node],
"skiplayernormalization_test",
inputs=[
helper.make_tensor_value_info("input", TensorProto.FLOAT, list(input_.shape)),
helper.make_tensor_value_info("skip", TensorProto.FLOAT, list(skip.shape)),
helper.make_tensor_value_info("gamma", TensorProto.FLOAT, list(gamma.shape)),
helper.make_tensor_value_info("beta", TensorProto.FLOAT, list(beta.shape)),
helper.make_tensor_value_info("bias", TensorProto.FLOAT, list(bias.shape)),
],
outputs=[
helper.make_tensor_value_info("output", TensorProto.FLOAT, list(input_.shape)),
],
)
model = helper.make_model(graph, producer_name="skiplayernormalization_test")
verify_with_ort_with_inputs(
model, [input_, skip, gamma, beta, bias], [input_.shape], target=target, dev=dev
)
hidden_size = 384
batch_size = 4
sequence_length = 4
dtype = "float32"
input_array = np.random.random((batch_size, sequence_length, hidden_size)).astype(dtype)
skip = np.random.random((batch_size, sequence_length, hidden_size)).astype(dtype)
gamma = np.random.uniform(0.5, 0.7, hidden_size).astype(dtype)
beta = np.random.randn(hidden_size).astype(dtype) * 0.1
bias = np.random.randn(hidden_size).astype(dtype)
verify_skiplayernormalization(input_array, skip, gamma, beta, bias)
@tvm.testing.known_failing_targets("cuda")
@tvm.testing.parametrize_targets
def test_qgemm(target, dev):
"""test_qgemm"""
def verify_qgemm(
a_shape,
b_shape,
y_shape,
C=False,
y_zp=False,
b_per_tensor_quantization=False,
alpha=1.0,
transA=0,
transB=1,
):
a_array = np.random.randint(low=0, high=255, size=a_shape).astype("uint8")
b_array = np.random.uniform(low=0, high=255, size=b_shape).astype("uint8")
input_nodes = [
helper.make_tensor_value_info("a", TensorProto.UINT8, list(a_shape)),
helper.make_tensor_value_info("b", TensorProto.UINT8, list(b_shape)),
]
initializer = [
helper.make_tensor("a_scale", TensorProto.FLOAT, (), [np.random.rand()]),
helper.make_tensor("a_zero_point", TensorProto.UINT8, (), [np.random.randint(0, 255)]),
]
input_names = [
"a",
"a_scale",
"a_zero_point",
"b",
"b_scale",
"b_zero_point",
]
input_values = [a_array, b_array]
if b_per_tensor_quantization:
initializer.append(
helper.make_tensor("b_scale", TensorProto.FLOAT, (), [np.random.rand()])
)
initializer.append(
helper.make_tensor(
"b_zero_point", TensorProto.UINT8, (), [np.random.randint(0, 255)]
)
)
else: # per_colume_quantization
shape_value = b_shape[0] if transB else b_shape[1]
b_scale_array = np.random.random(shape_value).astype("float32")
w_zero_point_array = np.random.randint(0, 255, size=shape_value).astype("uint8")
initializer.append(
helper.make_tensor(
"b_scale", TensorProto.FLOAT, list(b_scale_array.shape), b_scale_array
)
)
initializer.append(
helper.make_tensor(
"b_zero_point",
TensorProto.UINT8,
list(w_zero_point_array.shape),
w_zero_point_array,
)
)
output_tensor = helper.make_tensor_value_info("output", TensorProto.FLOAT, list(y_shape))
if C is True:
C_shape = (b_shape[0] if transB else b_shape[1],)
C_array = np.random.randint(low=0, high=65536, size=C_shape).astype("int32")
input_nodes.append(helper.make_tensor_value_info("C", TensorProto.INT32, list(C_shape)))
input_names.append("C")
input_values.append(C_array)
if y_zp is True:
input_names.append("y_scale")
initializer.append(
helper.make_tensor("y_scale", TensorProto.FLOAT, (), [np.random.rand()])
)
input_names.append("y_zero_point")
initializer.append(
helper.make_tensor(
"y_zero_point", TensorProto.UINT8, (), [np.random.randint(0, 255)]
)
)
output_tensor = helper.make_tensor_value_info(
"output", TensorProto.UINT8, list(y_shape)
)
kwargs = {}
kwargs["alpha"] = alpha
kwargs["transA"] = transA
kwargs["transB"] = transB
node = helper.make_node(
"QGemm",
inputs=input_names,
outputs=["output"],
domain="com.microsoft",
# Default values for other attributes:
**kwargs,
)
graph = helper.make_graph(
[node],
"QGemm",
inputs=input_nodes,
outputs=[output_tensor],
initializer=initializer,
)
model = helper.make_model(
graph,
producer_name="QGemm",
opset_imports=[
onnx.helper.make_opsetid("com.microsoft", 1),
],
)
verify_with_ort_with_inputs(model, input_values, target=target, dev=dev)
# B per tensor quantization
verify_qgemm(
(20, 30),
(50, 30),
(20, 50),
True,
True,
True,
)
# B per column quantization
verify_qgemm(
(20, 30),
(50, 30),
(20, 50),
True,
True,
False,
)
# test alpha
verify_qgemm(
(20, 30),
(50, 30),
(20, 50),
True,
True,
True,
0.5,
)
# test transpose A
verify_qgemm(
(20, 50),
(20, 80),
(50, 80),
True,
True,
True,
0.5,
1,
0,
)
@tvm.testing.known_failing_targets("cuda")
@tvm.testing.parametrize_targets
def test_qlinearconv(target, dev):
"""test_qlinearconv"""
def verify_qlinearconv(
x_shape,
w_shape,
y_shape,
padding,
kernel_shape,
strides,
dilations,
auto_pad="NOTSET",
bias=False,
per_channel_quantization=False,
):
x_array = np.random.randint(low=0, high=255, size=x_shape).astype("uint8")
w_array = np.random.uniform(low=0, high=255, size=w_shape).astype("uint8")
initializer = [
helper.make_tensor("x_scale", TensorProto.FLOAT, (), [np.random.rand()]),
helper.make_tensor("x_zero_point", TensorProto.UINT8, (), [np.random.randint(0, 255)]),
helper.make_tensor("y_scale", TensorProto.FLOAT, (), [np.random.rand()]),
helper.make_tensor("y_zero_point", TensorProto.UINT8, (), [np.random.randint(0, 255)]),
]
input_nodes = [
helper.make_tensor_value_info("x", TensorProto.UINT8, list(x_shape)),
helper.make_tensor_value_info("w", TensorProto.UINT8, list(w_shape)),
]
input_names = [
"x",
"x_scale",
"x_zero_point",
"w",
"w_scale",
"w_zero_point",
"y_scale",
"y_zero_point",
]
input_values = [x_array, w_array]
if per_channel_quantization:
w_scale_array = np.random.random(w_shape[0]).astype("float32")
w_zero_point_array = np.random.randint(0, 255, size=w_shape[0]).astype("uint8")
initializer.append(
helper.make_tensor("w_scale", TensorProto.FLOAT, [w_shape[0]], w_scale_array)
)
initializer.append(
helper.make_tensor(
"w_zero_point", TensorProto.UINT8, [w_shape[0]], w_zero_point_array
)
)
else:
initializer.append(
helper.make_tensor("w_scale", TensorProto.FLOAT, (), [np.random.rand()])
)
initializer.append(
helper.make_tensor(
"w_zero_point", TensorProto.UINT8, (), [np.random.randint(0, 255)]
)
)
if bias is True:
b_shape = w_shape[0:1]
b_array = np.random.randint(low=0, high=65536, size=b_shape).astype("int32")
input_nodes.append(helper.make_tensor_value_info("B", TensorProto.INT32, list(b_shape)))
input_names.append("B")
input_values.append(b_array)
if padding is None:
## autopadding with unset default attributes
kwargs = {}
if not all(list(s == 1 for s in strides)):
kwargs["strides"] = strides
if not all(list(d == 1 for d in dilations)):
kwargs["dilations"] = dilations
node = helper.make_node(
"QLinearConv",
inputs=input_names,
outputs=["y"],
# Default values for other attributes:
auto_pad=auto_pad,
**kwargs,
)
else:
node = helper.make_node(
"QLinearConv",
inputs=input_names,
outputs=["y"],
kernel_shape=kernel_shape,
# Default values for other attributes:
strides=strides,
dilations=dilations,
# groups=1
pads=padding,
)
graph = helper.make_graph(
[node],
"conv_test",
inputs=input_nodes,
outputs=[helper.make_tensor_value_info("y", TensorProto.UINT8, list(y_shape))],
initializer=initializer,
)
model = helper.make_model(graph, producer_name="qlinearconv_test")
# opt_level=1 will cause error
verify_with_ort_with_inputs(model, input_values, opt_level=2, target=target, dev=dev)
def repeat(num, dims):
return tuple(num for _ in range(dims))
# only support QLinearConv2d because only support qnn.conv2d
dims = 2
# Convolution with padding
verify_qlinearconv(
(1, 1) + repeat(5, dims),
(1, 1) + repeat(3, dims),
(1, 1) + repeat(5, dims),
2 * repeat(1, dims),
repeat(3, dims),
repeat(1, dims),
repeat(1, dims),
)
# Convolution with bias
verify_qlinearconv(
(1, 1) + repeat(5, dims),
(1, 1) + repeat(3, dims),
(1, 1) + repeat(5, dims),
2 * repeat(1, dims),
repeat(3, dims),
repeat(1, dims),
repeat(1, dims),
bias=True,
)
# Convolution with asymmetric padding
verify_qlinearconv(
(1, 1) + repeat(5, dims),
(1, 1) + repeat(3, dims),
(1, 1) + repeat(4, dims),
repeat(0, dims) + repeat(1, dims),
repeat(3, dims),
repeat(1, dims),
repeat(1, dims),
)
# Convolution without padding
verify_qlinearconv(
(1, 1) + repeat(5, dims),
(1, 1) + repeat(3, dims),
(1, 1) + repeat(3, dims),
2 * repeat(0, dims),
repeat(3, dims),
repeat(1, dims),
repeat(1, dims),
)
# Convolution with autopadding
verify_qlinearconv(
(1, 1) + repeat(5, dims),
(1, 1) + repeat(3, dims),
(1, 1) + repeat(5, dims),
None,
repeat(3, dims),
repeat(1, dims),
repeat(1, dims),
auto_pad="SAME_UPPER",
)
# Convolution with valid autopadding
verify_qlinearconv(
(1, 1) + repeat(5, dims),
(1, 1) + repeat(3, dims),
(1, 1) + repeat(3, dims),
None,
repeat(3, dims),
repeat(1, dims),
repeat(1, dims),
auto_pad="VALID",
)
# Convolution with non uniform stride
verify_qlinearconv(
(1, 1) + repeat(5, dims),
(1, 1) + repeat(3, dims),
(1, 1) + repeat(3, dims),
None,
repeat(3, dims),
repeat(2, dims),
repeat(1, dims),
auto_pad="SAME_UPPER",
)
# Convolution with dilation
verify_qlinearconv(
(1, 1) + repeat(5, dims),
(1, 1) + repeat(3, dims),
(1, 1) + repeat(5, dims),
2 * repeat(2, dims),
repeat(3, dims),
repeat(1, dims),
repeat(2, dims),
)
# Convolution with per channel quantization
verify_qlinearconv(
(1, 1) + repeat(5, dims),
(1, 1) + repeat(3, dims),
(1, 1) + repeat(3, dims),
None,
repeat(3, dims),
repeat(1, dims),
repeat(1, dims),
per_channel_quantization=True,
)
# TODO(vvchernov): fix problem with quantization on cuda
@tvm.testing.known_failing_targets("cuda")
@tvm.testing.parametrize_targets
def test_qlinearmatmul(target, dev):
"""test_qlinearmatmul"""
def verify_qlinearmatmul(
x_shape,
w_shape,
y_shape,
x_dtype="uint8",
w_dtype="uint8",
):
def get_randint_numpy_scalar(dtype="uint8"):
if dtype == "uint8":
return np.random.randint(0, 255)
else: # "int8"
return np.random.randint(-128, 127)
if x_dtype == "uint8":
x_array = np.random.randint(low=0, high=255, size=x_shape).astype("uint8")
else: # "int8"
x_array = np.random.randint(low=-128, high=127, size=x_shape).astype("int8")
if w_dtype == "uint8":
w_array = np.random.uniform(low=0, high=255, size=w_shape).astype("uint8")
else: # "int8"
w_array = np.random.uniform(low=-128, high=127, size=w_shape).astype("int8")
x_proto_type = mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(x_dtype)]
w_proto_type = mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(w_dtype)]
y_dtype = "int8"
if x_dtype == "uint8" and w_dtype == "uint8":
y_dtype = "uint8"
y_proto_type = mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(y_dtype)]
initializer = [
helper.make_tensor("x_scale", TensorProto.FLOAT, (), [np.random.rand()]),
# TODO: 0 value for int8?
helper.make_tensor(
"x_zero_point", x_proto_type, (), [get_randint_numpy_scalar(x_dtype)]
),
helper.make_tensor("w_scale", TensorProto.FLOAT, (), [np.random.rand()]),
# TODO: 0 value for int8?
helper.make_tensor(
"w_zero_point", w_proto_type, (), [get_randint_numpy_scalar(w_dtype)]
),
helper.make_tensor("y_scale", TensorProto.FLOAT, (), [np.random.rand()]),
helper.make_tensor(
"y_zero_point", y_proto_type, (), [get_randint_numpy_scalar(y_dtype)]
),
]
input_nodes = [
helper.make_tensor_value_info("x", x_proto_type, list(x_shape)),
helper.make_tensor_value_info("w", w_proto_type, list(w_shape)),
]
input_names = [
"x",
"x_scale",
"x_zero_point",
"w",
"w_scale",
"w_zero_point",
"y_scale",
"y_zero_point",
]
input_values = [x_array, w_array]
node = helper.make_node(
"QLinearMatMul",
inputs=input_names,
outputs=["y"],
)
y_proto_type = mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype("int8")]
if x_dtype == "uint8" and w_dtype == "uint8":
y_proto_type = mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype("uint8")]
graph = helper.make_graph(
[node],
"qmatmul_test",
inputs=input_nodes,
outputs=[helper.make_tensor_value_info("y", y_proto_type, list(y_shape))],
initializer=initializer,
)
model = helper.make_model(graph, producer_name="qlinearmatmul_test")
# opt_level=1 will cause error
verify_with_ort_with_inputs(model, input_values, opt_level=2, target=target, dev=dev)
# Default matmul both ranks = 2 (x_dtype = "uint8", w_dtype = "uint8")
verify_qlinearmatmul((2, 3), (3, 2), (2, 2))
# Default matmul both ranks = 2 (x_dtype = "int8", w_dtype = "int8")
verify_qlinearmatmul((2, 3), (3, 2), (2, 2), "int8", "int8")
# TODO(vvchernov): problems on ONNX Runtime side and type check (onnx.py:L4763) on TVM side
# Default matmul both ranks = 2 (x_dtype = "uint8", w_dtype = "int8")
# verify_qlinearmatmul((2, 3), (3, 2), (2, 2), "uint8", "int8")
# TODO(vvchernov): problems on ONNX Runtime side and type check (onnx.py:L4763) on TVM side
# Default matmul both ranks = 2 (x_dtype = "int8", w_dtype = "uint8")
# verify_qlinearmatmul((2, 3), (3, 2), (2, 2), "int8", "uint8")
# Reduced matmul: x_ranks = 1, w_rank = 2 (x_dtype = "uint8", w_dtype = "uint8")
verify_qlinearmatmul((3,), (3, 2), (2,))
# Special case matmul: x_ranks = 3, w_rank = 2 (x_dtype = "uint8", w_dtype = "uint8")
verify_qlinearmatmul((2, 3, 4), (4, 3), (2, 3, 3))
# GPT2-style matmul both ranks = 4 (x_dtype = "uint8", w_dtype = "uint8")
verify_qlinearmatmul((2, 4, 3, 3), (2, 4, 3, 3), (2, 4, 3, 3))
# Asymetric matmul: x_ranks = 4, w_rank = 3 (x_dtype = "uint8", w_dtype = "uint8")
verify_qlinearmatmul((2, 4, 3, 3), (4, 3, 3), (2, 4, 3, 3))
# Asymetric matmul: x_ranks = 2, w_rank = 3 (x_dtype = "uint8", w_dtype = "uint8")
# verify_qlinearmatmul((3, 3), (4, 3, 3), (4, 3, 3))
@tvm.testing.parametrize_targets
def test_qlinearconcat(target, dev):
"""test_qlinearconcat"""
def verify_qlinearconcat(shapes, out_shape, axis=None):
input_names = []
input_values = []
input_nodes = []
for i, shape in enumerate(shapes):
tensor_name = chr(ord("a") + i)
node = helper.make_tensor_value_info(tensor_name, TensorProto.FLOAT, list(shape))
input_names.append(tensor_name)
input_values.append(np.random.random(shape).astype("float32"))
input_nodes.append(node)
node = helper.make_node("Concat", input_names, ["C"])
if axis is not None:
axis_attr = helper.make_attribute("axis", axis)
node.attribute.append(axis_attr)
graph = helper.make_graph(
[node],
"qlinearconcat_test",
inputs=input_nodes,
outputs=[helper.make_tensor_value_info("C", TensorProto.FLOAT, list(out_shape))],
)
model = helper.make_model(graph, producer_name="qlinearconcat_test")
quantize_and_verify_with_ort(model, input_names, shapes, target, dev)
verify_qlinearconcat([[2, 1], [2, 1]], [4, 1], 0)
verify_qlinearconcat([[2, 1], [2, 1]], [2, 2], 1)
verify_qlinearconcat([[1, 2], [2, 2], [3, 2]], [6, 2], 0)
@tvm.testing.parametrize_targets
def test_qlinearadd(target, dev):
"""test_qlinearadd"""
def verify_qlinearadd(a_shape, b_shape, c_shape):
_ = np.random.random(a_shape).astype("float32")
_ = np.random.random(b_shape).astype("float32")
input_nodes = [
helper.make_tensor_value_info("a", TensorProto.FLOAT, list(a_shape)),
helper.make_tensor_value_info("b", TensorProto.FLOAT, list(b_shape)),
]
input_names = [
"a",
"b",
]
node = helper.make_node("Add", ["a", "b"], ["C"])
graph = helper.make_graph(
[node],
"qlinearadd_test",
inputs=input_nodes,
outputs=[helper.make_tensor_value_info("C", TensorProto.FLOAT, list(c_shape))],
)
model = helper.make_model(graph, producer_name="qlinearadd_test")
quantize_and_verify_with_ort(model, input_names, [a_shape, b_shape], target, dev)
verify_qlinearadd([4, 2], [4, 2], [4, 2])
verify_qlinearadd([4, 2], [2], [4, 2])
verify_qlinearadd([5, 1, 7], [2, 7], [5, 2, 7])
@tvm.testing.parametrize_targets
def test_qlinearmul(target, dev):
"""test_qlinearmul"""
def verify_qlinearmul(a_shape, b_shape, c_shape):
_ = np.random.random(a_shape).astype("float32")
_ = np.random.random(b_shape).astype("float32")
input_nodes = [
helper.make_tensor_value_info("a", TensorProto.FLOAT, list(a_shape)),
helper.make_tensor_value_info("b", TensorProto.FLOAT, list(b_shape)),
]
input_names = [
"a",
"b",
]
node = helper.make_node("Mul", input_names, ["C"])
graph = helper.make_graph(
[node],
"qlinearmul_test",
inputs=input_nodes,
outputs=[helper.make_tensor_value_info("C", TensorProto.FLOAT, list(c_shape))],
)
model = helper.make_model(graph, producer_name="qlinearmul_test")
quantize_and_verify_with_ort(model, input_names, [a_shape, b_shape], target, dev)
verify_qlinearmul([7], [7], [7])
verify_qlinearmul([4, 2], [4, 2], [4, 2])
verify_qlinearmul([4, 2], [2], [4, 2])
verify_qlinearmul([5, 1, 7], [2, 7], [5, 2, 7])
@pytest.mark.skip(reason="See https://github.com/apache/tvm/issues/11375")
@tvm.testing.parametrize_targets
def test_qlinearleakyrelu(target, dev):
"""test_qlinearleakyrelu"""
def verify_qlinearleakyrelu(inshape, kwargs):
in_array = np.random.random(inshape).astype("float32")
node = helper.make_node("LeakyRelu", ["X"], ["Y"], **kwargs)
graph = helper.make_graph(
[node],
"qlinearRelu_test",
inputs=[helper.make_tensor_value_info("X", TensorProto.FLOAT, list(in_array.shape))],
outputs=[helper.make_tensor_value_info("Y", TensorProto.FLOAT, list(in_array.shape))],
)
model = helper.make_model(graph, producer_name="qlinearRelu_test")
args = (model, ["X"], [in_array.shape], target, dev)
if dev == "cuda":
quantize_and_verify_with_ort(*args, rtol=1e-2, atol=1e-2)
else:
quantize_and_verify_with_ort(*args)
verify_qlinearleakyrelu([2, 4, 5, 6], {"alpha": 0.25})
verify_qlinearleakyrelu([6, 5, 6, 7], {"alpha": 0.35})
verify_qlinearleakyrelu([5, 1, 4, 6], {"alpha": 0.65})
@pytest.mark.skip(reason="See https://github.com/apache/tvm/issues/11375")
@tvm.testing.parametrize_targets
def test_qlinearsigmoid(target, dev):
"""test_qlinearsigmoid"""
def verify_qlinearsigmoid(a_shape):
_ = np.random.random(a_shape).astype("float32")
input_nodes = [helper.make_tensor_value_info("a", TensorProto.FLOAT, list(a_shape))]
node = helper.make_node("Sigmoid", ["a"], ["B"])
graph = helper.make_graph(
[node],
"qlinearsigmoid_test",
inputs=input_nodes,
outputs=[helper.make_tensor_value_info("B", TensorProto.FLOAT, list(a_shape))],
)
model = helper.make_model(graph, producer_name="qlinearsigmoid_test")
quantize_and_verify_with_ort(model, ["a"], [a_shape], target, dev)
verify_qlinearsigmoid([4, 2])
verify_qlinearsigmoid([5])
verify_qlinearsigmoid([3, 4, 5])
verify_qlinearsigmoid([])
@tvm.testing.parametrize_targets
def test_qlinearsoftmax(target, dev):
"""test_qlinearsoftmax"""
def verify_qlinearsoftmax(a_shape):
_ = np.random.random(a_shape).astype("float32")
input_nodes = [helper.make_tensor_value_info("a", TensorProto.FLOAT, list(a_shape))]
node = helper.make_node("Softmax", ["a"], ["B"])
graph = helper.make_graph(
[node],
"qlinearsoftmax_test",
inputs=input_nodes,
outputs=[helper.make_tensor_value_info("B", TensorProto.FLOAT, list(a_shape))],
)
model = helper.make_model(graph, producer_name="qlinearsoftmax_test")
quantize_and_verify_with_ort(model, ["a"], [a_shape], target, dev)
verify_qlinearsoftmax([4, 2])
verify_qlinearsoftmax([5])
verify_qlinearsoftmax([3, 4, 5])
@tvm.testing.parametrize_targets("llvm")
def test_random_bernoulli(target, dev):
"""test_random_bernoulli"""
def _get_tvm_output(
inputs,
out_dtype="int32",
seed=None,
target=target,
dev=dev,
use_vm=False,
freeze_params=False,
):
def get_bernoulli_model(shape, in_dtype="float32", out_dtype="int32", seed=None):
onnx_itype = mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(in_dtype)]
onnx_otype = mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(out_dtype)]
node = helper.make_node(
"Bernoulli",
["input"],
["output"],
)
dtype_attr = helper.make_attribute("dtype", onnx_otype)
node.attribute.append(dtype_attr)
if seed is not None:
seed_attr = helper.make_attribute("seed", float(seed))
node.attribute.append(seed_attr)
graph = helper.make_graph(
[node],
"random_bernoulli_test",
inputs=[helper.make_tensor_value_info("input", onnx_itype, list(shape))],
outputs=[helper.make_tensor_value_info("output", onnx_otype, list(shape))],
)
return helper.make_model(graph, producer_name="random_bernoulli_test")
shape = inputs.shape
in_dtype = inputs.dtype
model = get_bernoulli_model(shape, in_dtype, out_dtype, seed)
if use_vm:
return get_tvm_output_with_vm(
model,
inputs,
target,
dev,
freeze_params=freeze_params,
)
else:
return get_tvm_output(
model,
inputs,
target,
dev,
)
def binom_test(input, ideal_mean, threshold=0.05):
# This test is strictly appropriate when input probabilities are all identical.
# In that case, it should lead to flaky failures in only one run in a million (p>=1e-6).
# The test should be over-conservative when input probabilities are not identical.
# (i.e., It should have a rate of flaky failures lower than one run in a million.)
# If this test starts repeatedly throwing flaky failures, consult a statistician
# in addition to your regular debugging.
bnm_test_res = scipy.stats.binomtest(
k=np.sum(input, dtype="int32"), n=len(input), p=ideal_mean
)
return bnm_test_res.pvalue > threshold
def verify_bernoulli(
inputs=None,
shape=[],
in_dtype="float32",
out_dtype="int32",
seed=None,
target=target,
dev=dev,
use_vm=False,
freeze_params=False,
in_out_equal=False,
):
if inputs is None:
assert len(shape) != 0
inputs = np.random.uniform(size=shape).astype(in_dtype)
tvm_out = _get_tvm_output(
inputs,
out_dtype,
seed,
target,
dev,
use_vm,
freeze_params,
)
if isinstance(tvm_out, list):
tvm_out = tvm_out[0]
# check that values are 0 or 1
tvm_flat = tvm_out.flatten()
assert np.array_equal(tvm_flat, tvm_flat.astype("bool"))
if in_out_equal:
tvm.testing.assert_allclose(inputs, tvm_out)
else:
# check that mean value is close to the theoretical one by binomial test
ideal_mean = np.mean(inputs)
repeats = 3
check = False
for i in range(repeats):
if binom_test(tvm_flat, ideal_mean):
check = True
break
else:
# repeat with new seed
seed = np.random.randint(1e6)
tvm_flat = _get_tvm_output(
inputs,
out_dtype,
seed,
target,
dev,
use_vm,
freeze_params,
).flatten()
assert check, "Binomial test failed"
# Test input sequence of 0 and 1
inputs = np.random.randint(2, size=[10000]).astype("float32")
verify_bernoulli(inputs, in_out_equal=True)
# Binomial test input with 0.5 values
val_num = 10000
inputs = np.ones([val_num], dtype="float32") * 0.5
verify_bernoulli(inputs)
# Binomial test input with 0.1 values
inputs = np.ones([val_num], dtype="float32") * 0.1
verify_bernoulli(inputs)
# Simple test
verify_bernoulli(shape=[val_num])
# Floating output type
verify_bernoulli(shape=[val_num], out_dtype="float32")
# Double input type
verify_bernoulli(shape=[val_num], in_dtype="float64")
# Test N-D tensor generation
verify_bernoulli(shape=[2, 4, 100, 100])
# Test with seed
verify_bernoulli(shape=[val_num], seed=np.random.randint(1e6))
# Test result determinism with the same seeds
inputs = np.random.uniform(size=[val_num])
fixed_seed = np.random.randint(1e6)
tvm_out_1 = _get_tvm_output(inputs, seed=fixed_seed)
tvm_out_2 = _get_tvm_output(inputs, seed=fixed_seed)
tvm.testing.assert_allclose(tvm_out_1, tvm_out_2)
@tvm.testing.parametrize_targets("llvm")
def test_random_uniform(target, dev):
"""test_random_uniform"""
def get_random_uniform(shape, dtype="float32", high=1.0, low=0.0, seed=None):
ONNX_DTYPE = mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(dtype)]
node = helper.make_node(
"RandomUniform", [], ["out"], shape=shape, dtype=ONNX_DTYPE, high=high, low=low
)
if seed is not None:
seed_attr = helper.make_attribute("seed", seed)
node.attribute.append(seed_attr)
graph = helper.make_graph(
[node],
"random_uniform_test",
inputs=[],
outputs=[helper.make_tensor_value_info("out", ONNX_DTYPE, shape)],
)
model = helper.make_model(graph, producer_name="random_uniform_test")
return get_tvm_output_with_vm(
model,
[],
target=target,
dev=dev,
validate_structural_equal=(seed is not None),
)
# Check that function runs and produces proper shape.
vals = get_random_uniform([10], dtype="float32")
assert list(vals.shape) == [10]
assert vals.dtype == "float32"
# Test N-D tensor generation.
vals = get_random_uniform([1, 3, 100, 100], dtype="float32")
assert list(vals.shape) == [1, 3, 100, 100]
# Check that bounds aren't exceeded.
vals = get_random_uniform(shape=[100], high=100.0, low=-100.0)
assert list(vals.shape) == [100]
assert all(vals >= -100) and all(vals <= 100)
# Check that a fixed seed produces the same values when run twice.
vals_1 = get_random_uniform(shape=[10], seed=1)
vals_2 = get_random_uniform(shape=[10], seed=1)
assert all(vals_1 == vals_2)
# Test against an expected output with a fixed seed.
real = get_random_uniform(shape=[10], seed=5.0)
expected = np.asarray(
[
0.043976,
0.96656,
0.292199,
0.904297,
0.25167,
0.521778,
0.778985,
0.085463,
0.939846,
0.194201,
]
)
tvm.testing.assert_allclose(real, expected, rtol=1e-5)
@tvm.testing.parametrize_targets("llvm")
def test_random_uniform_like(target, dev):
"""test_random_uniform_like"""
def get_random_uniform_like(input_, shape, dtype=None, high=1.0, low=0.0, seed=None):
node = helper.make_node("RandomUniformLike", ["in"], ["out"], high=high, low=low)
if seed is not None:
seed_attr = helper.make_attribute("seed", seed)
node.attribute.append(seed_attr)
ONNX_DTYPE = None
if dtype is not None:
ONNX_DTYPE = mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(dtype)]
dtype_attr = helper.make_attribute("dtype", ONNX_DTYPE)
node.attribute.append(dtype_attr)
else:
dtype = input_.dtype
ONNX_DTYPE = mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(dtype)]
graph = helper.make_graph(
[node],
"random_uniform_test",
inputs=[helper.make_tensor_value_info("in", ONNX_DTYPE, shape)],
outputs=[helper.make_tensor_value_info("out", ONNX_DTYPE, shape)],
)
model = helper.make_model(graph, producer_name="random_uniform_like_test")
return get_tvm_output_with_vm(
model,
[input_],
target=target,
dev=dev,
validate_structural_equal=(seed is not None),
)
# Check that function runs and produces proper shape and dtype.
shape = [10]
input_array = np.random.random(shape).astype("float32")
vals = get_random_uniform_like(input_array, shape, dtype="float32")
assert list(vals.shape) == [10]
assert vals.dtype == "float32"
# Test N-D tensor generation.
shape = [1, 3, 100, 100]
input_array = np.random.random(shape).astype("float32")
vals = get_random_uniform_like(input_array, shape, dtype="float64")
assert list(vals.shape) == shape
assert vals.dtype == "float64"
# Check that bounds aren't exceeded.
shape = [100]
input_array = np.random.random(shape).astype("float64")
vals = get_random_uniform_like(input_array, shape, high=100.0, low=-100.0)
assert list(vals.shape) == shape
assert all(vals >= -100) and all(vals <= 100)
# Test against an expected output with a fixed seed.
shape = [10]
input_array = np.random.random(shape).astype("float32")
real = get_random_uniform_like(input_array, shape=[10], seed=5.0)
expected = np.asarray(
[
0.043976,
0.96656,
0.292199,
0.904297,
0.25167,
0.521778,
0.778985,
0.085463,
0.939846,
0.194201,
]
)
tvm.testing.assert_allclose(real, expected, rtol=1e-5)
@tvm.testing.parametrize_targets("llvm")
def test_random_normal(target, dev):
"""test_random_normal"""
def get_random_normal(shape, dtype="float32", scale=1.0, mean=0.0, seed=None):
ONNX_DTYPE = mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(dtype)]
node = helper.make_node(
"RandomNormal", [], ["out"], shape=shape, dtype=ONNX_DTYPE, scale=scale, mean=mean
)
if seed is not None:
seed_attr = helper.make_attribute("seed", seed)
node.attribute.append(seed_attr)
graph = helper.make_graph(
[node],
"random_normal_test",
inputs=[],
outputs=[helper.make_tensor_value_info("out", ONNX_DTYPE, shape)],
)
model = helper.make_model(graph, producer_name="random_normal_test")
return get_tvm_output_with_vm(
model,
[],
target=target,
dev=dev,
validate_structural_equal=(seed is not None),
)
# Test N-D tensor generation.
vals = get_random_normal([1, 3, 100, 100], dtype="float32")
assert list(vals.shape) == [1, 3, 100, 100]
tvm.testing.assert_allclose(vals.mean(), 0.0, rtol=0.1, atol=0.1)
tvm.testing.assert_allclose(np.std(vals), 1.0, rtol=0.1, atol=0.1)
# Test mean=2.0 scale=10.0
vals = get_random_normal([1, 3, 100, 100], mean=2.0, scale=10.0, dtype="float32")
assert list(vals.shape) == [1, 3, 100, 100]
tvm.testing.assert_allclose(vals.mean(), 2.0, rtol=0.1, atol=0.1)
tvm.testing.assert_allclose(np.std(vals), 10.0, rtol=0.1, atol=0.1)
# Check that a fixed seed produces the same values when run twice.
vals_1 = get_random_normal(shape=[10], seed=1.0)
vals_2 = get_random_normal(shape=[10], seed=1.0)
assert all(vals_1 == vals_2)
@tvm.testing.parametrize_targets("llvm")
def test_random_normal_like(target, dev):
"""test_random_normal_like"""
def get_random_normal_like(input_, shape, dtype="float32", scale=1.0, mean=0.0, seed=None):
ONNX_DTYPE = mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(dtype)]
node = helper.make_node(
"RandomNormalLike", ["in"], ["out"], dtype=ONNX_DTYPE, scale=scale, mean=mean
)
if seed is not None:
seed_attr = helper.make_attribute("seed", seed)
node.attribute.append(seed_attr)
graph = helper.make_graph(
[node],
"random_normal_like_test",
inputs=[helper.make_tensor_value_info("in", ONNX_DTYPE, shape)],
outputs=[helper.make_tensor_value_info("out", ONNX_DTYPE, shape)],
)
model = helper.make_model(graph, producer_name="random_normal_like_test")
return get_tvm_output_with_vm(
model,
[input_],
target=target,
dev=dev,
validate_structural_equal=(seed is not None),
)
# Test N-D tensor generation.
shape = [1, 3, 100, 100]
input_array = np.random.random(shape).astype("float32")
vals = get_random_normal_like(input_array, [1, 3, 100, 100], dtype="float32")
assert list(vals.shape) == [1, 3, 100, 100]
tvm.testing.assert_allclose(vals.mean(), 0.0, rtol=0.1, atol=0.1)
tvm.testing.assert_allclose(np.std(vals), 1.0, rtol=0.1, atol=0.1)
# Test mean=2.0 scale=10.0
shape = [1, 3, 100, 100]
input_array = np.random.random(shape).astype("float32")
vals = get_random_normal_like(
input_array, [1, 3, 100, 100], mean=2.0, scale=10.0, dtype="float32"
)
assert list(vals.shape) == [1, 3, 100, 100]
tvm.testing.assert_allclose(vals.mean(), 2.0, rtol=0.1, atol=0.1)
tvm.testing.assert_allclose(np.std(vals), 10.0, rtol=0.1, atol=0.1)
@tvm.testing.parametrize_targets("llvm")
def test_multinomial(target, dev):
def get_multinomial(input, shape, sample_size, seed=None):
IN_DTYPE = mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype("float32")]
OUT_DTYPE = mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype("int32")]
node = helper.make_node("Multinomial", ["in"], ["out"], sample_size=sample_size)
if seed is not None:
seed_attr = helper.make_attribute("seed", seed)
node.attribute.append(seed_attr)
graph = helper.make_graph(
[node],
"multinomial_test",
inputs=[helper.make_tensor_value_info("in", IN_DTYPE, shape)],
outputs=[helper.make_tensor_value_info("out", OUT_DTYPE, shape)],
)
model = helper.make_model(graph, producer_name="multinomial_test")
return get_tvm_output_with_vm(
model,
[input],
target=target,
dev=dev,
validate_structural_equal=(seed is not None),
)
# Test N-D tensor generation.
shape = [3]
sample_size = 2
probs = np.random.random(shape).astype("float32")
indices = get_multinomial(probs, shape, sample_size)
# Since specific values are random, we'll check that the output shape is
# correct and the values chosen are all valid indices.
assert list(indices.shape) == [sample_size]
assert np.max(indices) < shape[-1]
# Test 2d multinomial
shape = [10, 5]
sample_size = 4
probs = np.random.random(shape).astype("float32")
indices = get_multinomial(probs, shape, sample_size)
assert list(indices.shape) == [10, sample_size]
assert np.max(indices) < shape[-1]
@tvm.testing.parametrize_targets
def test_convinteger(target, dev):
"""test_convinteger"""
def verify_convinteger(
x_shape,
w_shape,
y_shape,
padding,
kernel_shape,
strides,
dilations,
auto_pad="NOTSET",
dtype="uint8",
):
x_array = np.random.randint(low=0, high=255, size=x_shape).astype(dtype)
w_array = np.random.uniform(low=0, high=255, size=w_shape).astype(dtype)
x_zero_point_array = np.random.randint(0, 255, size=[1]).astype(dtype)
w_zero_point_array = np.random.randint(0, 255, size=[1]).astype(dtype)
ONNX_DTYPE = mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(dtype)]
input_nodes = [
helper.make_tensor_value_info("x", ONNX_DTYPE, list(x_shape)),
helper.make_tensor_value_info("w", ONNX_DTYPE, list(w_shape)),
]
initializer = [
helper.make_tensor("x_zero_point", ONNX_DTYPE, [], x_zero_point_array),
helper.make_tensor("w_zero_point", ONNX_DTYPE, [], w_zero_point_array),
]
input_names = ["x", "w", "x_zero_point", "w_zero_point"]
input_values = [x_array, w_array]
if padding is None:
## autopadding with unset default attributes
kwargs = {}
if not all(list(s == 1 for s in strides)):
kwargs["strides"] = strides
if not all(list(d == 1 for d in dilations)):
kwargs["dilations"] = dilations
node = helper.make_node(
"ConvInteger",
inputs=input_names,
outputs=["y"],
# Default values for other attributes:
auto_pad=auto_pad,
**kwargs,
)
else:
node = helper.make_node(
"ConvInteger",
inputs=input_names,
outputs=["y"],
kernel_shape=kernel_shape,
# Default values for other attributes:
strides=strides,
dilations=dilations,
# groups=1
pads=padding,
)
graph = helper.make_graph(
[node],
"convinteger_test",
inputs=input_nodes,
initializer=initializer,
outputs=[helper.make_tensor_value_info("y", TensorProto.INT32, list(y_shape))],
)
model = helper.make_model(graph, producer_name="convinteger_test")
# opt_level=1 will cause error
verify_with_ort_with_inputs(model, input_values, target=target, dev=dev, opt_level=2)
def repeat(num, dims):
return tuple(num for _ in range(dims))
# only support 2D ConvInteger because we only support qnn.conv2d for now.
dims = 2
# Convolution with padding
verify_convinteger(
(1, 1) + repeat(5, dims),
(1, 1) + repeat(3, dims),
(1, 1) + repeat(5, dims),
2 * repeat(1, dims),
repeat(3, dims),
repeat(1, dims),
repeat(1, dims),
)
# Convolution with asymmetric padding
verify_convinteger(
(1, 1) + repeat(5, dims),
(1, 1) + repeat(3, dims),
(1, 1) + repeat(4, dims),
repeat(0, dims) + repeat(1, dims),
repeat(3, dims),
repeat(1, dims),
repeat(1, dims),
)
# Convolution without padding
verify_convinteger(
(1, 1) + repeat(5, dims),
(1, 1) + repeat(3, dims),
(1, 1) + repeat(3, dims),
2 * repeat(0, dims),
repeat(3, dims),
repeat(1, dims),
repeat(1, dims),
)
# Convolution with autopadding
verify_convinteger(
(1, 1) + repeat(5, dims),
(1, 1) + repeat(3, dims),
(1, 1) + repeat(5, dims),
None,
repeat(3, dims),
repeat(1, dims),
repeat(1, dims),
auto_pad="SAME_UPPER",
)
# Convolution with valid autopadding
verify_convinteger(
(1, 1) + repeat(5, dims),
(1, 1) + repeat(3, dims),
(1, 1) + repeat(3, dims),
None,
repeat(3, dims),
repeat(1, dims),
repeat(1, dims),
auto_pad="VALID",
)
# Convolution with non uniform stride
verify_convinteger(
(1, 1) + repeat(5, dims),
(1, 1) + repeat(3, dims),
(1, 1) + repeat(3, dims),
None,
repeat(3, dims),
repeat(2, dims),
repeat(1, dims),
auto_pad="SAME_UPPER",
)
# Convolution with dilation
verify_convinteger(
(1, 1) + repeat(5, dims),
(1, 1) + repeat(3, dims),
(1, 1) + repeat(5, dims),
2 * repeat(2, dims),
repeat(3, dims),
repeat(1, dims),
repeat(2, dims),
)
@tvm.testing.parametrize_targets
def test_bitshift(target, dev):
"""test_bitshift"""
def verify_bitshift(in_shape, shift_shape, high=1000000000, in_dtype="uint64"):
in_shape = list(in_shape)
shift_shape = list(shift_shape)
# Create an input for each tensor.
tensor_values = [
np.random.randint(high, size=in_shape).astype(in_dtype),
np.random.randint(16, size=shift_shape).astype(in_dtype),
np.random.randint(16, size=shift_shape).astype(in_dtype),
]
bitshift_left_node = helper.make_node(
"BitShift",
inputs=["input", "shift_left"],
outputs=["shifted"],
direction="LEFT",
)
bitshift_right_node = helper.make_node(
"BitShift",
inputs=["shifted", "shift_right"],
outputs=["output"],
direction="RIGHT",
)
# Create input and output tensors.
proto_type = mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(in_dtype)]
graph_inputs = [
helper.make_tensor_value_info("input", proto_type, in_shape),
helper.make_tensor_value_info("shift_left", proto_type, shift_shape),
helper.make_tensor_value_info("shift_right", proto_type, shift_shape),
]
graph_outputs = [helper.make_tensor_value_info("output", proto_type, in_shape)]
graph_nodes = [bitshift_left_node, bitshift_right_node]
graph = helper.make_graph(
graph_nodes,
"BitShift_test",
inputs=graph_inputs,
outputs=graph_outputs,
)
model = helper.make_model(
graph,
producer_name="BitShift_test",
)
verify_with_ort_with_inputs(model, tensor_values, target=target, dev=dev)
shape = (100, 4, 2)
broadcast_shape = (100, 1, 1)
# Common bitwise test
verify_bitshift(shape, shape)
# Bitwise test with broadcasting
verify_bitshift(shape, broadcast_shape)
# TODO(vvchernov): return test back than ONNX Runtime in CI will support domain version of 18
@pytest.mark.skip("Currently ONNX Runtime in CI does not support domain version of 18")
@tvm.testing.parametrize_targets
def test_bitwise(target, dev):
"""test_bitwise"""
def verify_bitwise_ops(A_shape, B_shape, C_shape, D_shape, high=128, in_dtype="int32"):
A_shape = list(A_shape)
B_shape = list(B_shape)
C_shape = list(C_shape)
D_shape = list(D_shape)
# Create an input for each tensor.
tensor_values = [
np.random.randint(high, size=A_shape).astype(in_dtype),
np.random.randint(high, size=B_shape).astype(in_dtype),
np.random.randint(high, size=C_shape).astype(in_dtype),
np.random.randint(high, size=D_shape).astype(in_dtype),
]
or_node = helper.make_node(
"BitwiseOr",
inputs=["A", "B"],
outputs=["OR"],
)
and_node = helper.make_node(
"BitwiseAnd",
inputs=["OR", "C"],
outputs=["AND"],
)
xor_node = helper.make_node(
"BitwiseXor",
inputs=["AND", "D"],
outputs=["XOR"],
)
not_node = helper.make_node(
"BitwiseNot",
inputs=["XOR"],
outputs=["output"],
)
# Create input and output tensors.
proto_type = mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(in_dtype)]
graph_inputs = [
helper.make_tensor_value_info("A", proto_type, A_shape),
helper.make_tensor_value_info("B", proto_type, B_shape),
helper.make_tensor_value_info("C", proto_type, C_shape),
helper.make_tensor_value_info("D", proto_type, D_shape),
]
graph_outputs = [
helper.make_tensor_value_info("output", proto_type, A_shape),
]
graph_nodes = [
or_node,
and_node,
xor_node,
not_node,
]
graph = helper.make_graph(
graph_nodes,
"Bitwise_test",
inputs=graph_inputs,
outputs=graph_outputs,
)
model = helper.make_model(
graph,
producer_name="Bitwise_test",
)
verify_with_ort_with_inputs(model, tensor_values, target=target, dev=dev)
shape = (100, 4, 2)
broadcast_shape = (100, 1, 1)
dtypes = ["int8", "uint8", "int32", "uint32"]
high_vals = [128, 128, 2147483648, 2147483648]
for high, dtype in zip(high_vals, dtypes):
# Common bitwise test
verify_bitwise_ops(shape, shape, shape, shape, high, dtype)
# Bitwise test with broadcasting
verify_bitwise_ops(shape, broadcast_shape, broadcast_shape, broadcast_shape, high, dtype)
@tvm.testing.parametrize_targets
def test_scan(target, dev):
"""test_scan"""
def verify_scan(
input_shapes,
output_shapes,
num_scan_inputs,
scan_input_axes,
scan_input_directions,
scan_output_axes,
scan_output_directions,
opset,
):
body_input_shapes = copy.deepcopy(input_shapes)
num_state_inputs = len(input_shapes) - num_scan_inputs
if opset == 8:
for i in range(len(input_shapes)):
body_input_shapes[i].pop(0)
for i in range(num_state_inputs, len(input_shapes)):
body_input_shapes[i].pop(0)
else:
for i in range(num_state_inputs, len(input_shapes)):
body_input_shapes[i].pop(scan_input_axes[i - num_state_inputs])
initial0 = onnx.helper.make_tensor_value_info(
"initial0", onnx.TensorProto.FLOAT, body_input_shapes[0]
)
initial1 = onnx.helper.make_tensor_value_info(
"initial1", onnx.TensorProto.FLOAT, body_input_shapes[1]
)
input0 = onnx.helper.make_tensor_value_info(
"input0", onnx.TensorProto.FLOAT, body_input_shapes[2]
)
input1 = onnx.helper.make_tensor_value_info(
"input1", onnx.TensorProto.FLOAT, body_input_shapes[3]
)
input2 = onnx.helper.make_tensor_value_info(
"input2", onnx.TensorProto.FLOAT, body_input_shapes[4]
)
state0 = onnx.helper.make_tensor_value_info(
"state0", onnx.TensorProto.FLOAT, body_input_shapes[0]
)
scan_out0 = onnx.helper.make_tensor_value_info(
"scan_out0", onnx.TensorProto.FLOAT, body_input_shapes[0]
)
state1 = onnx.helper.make_tensor_value_info(
"state1", onnx.TensorProto.FLOAT, body_input_shapes[1]
)
scan_out1 = onnx.helper.make_tensor_value_info(
"scan_out1", onnx.TensorProto.FLOAT, body_input_shapes[1]
)
add_node = onnx.helper.make_node(
"Add",
inputs=["initial0", "input0"],
outputs=["state0"],
)
id_node_0 = onnx.helper.make_node(
"Identity",
inputs=["state0"],
outputs=["scan_out0"],
)
matmul_node = onnx.helper.make_node(
"MatMul",
inputs=["input1", "input2"],
outputs=["matmul_out"],
)
sub_node = onnx.helper.make_node(
"Sub",
inputs=["initial1", "matmul_out"],
outputs=["state1"],
)
id_node_1 = onnx.helper.make_node(
"Identity",
inputs=["state1"],
outputs=["scan_out1"],
)
scan_body = onnx.helper.make_graph(
[add_node, id_node_0, matmul_node, sub_node, id_node_1],
"scan_body",
[initial0, initial1, input0, input1, input2],
[state0, state1, scan_out0, scan_out1],
)
# create scan op node
scan_node = None
if opset == 8:
scan_node = onnx.helper.make_node(
"Scan",
inputs=["", "init0", "init1", "in0", "in1", "in2"],
outputs=["s0", "s1", "scan0", "scan1"],
num_scan_inputs=num_scan_inputs,
body=scan_body,
)
else:
scan_node = onnx.helper.make_node(
"Scan",
inputs=["init0", "init1", "in0", "in1", "in2"],
outputs=["s0", "s1", "scan0", "scan1"],
num_scan_inputs=num_scan_inputs,
body=scan_body,
scan_input_axes=scan_input_axes,
scan_input_directions=scan_input_directions,
scan_output_axes=scan_output_axes,
scan_output_directions=scan_output_directions,
)
input_info = [
helper.make_tensor_value_info("init0", TensorProto.FLOAT, input_shapes[0]),
helper.make_tensor_value_info("init1", TensorProto.FLOAT, input_shapes[1]),
helper.make_tensor_value_info("in0", TensorProto.FLOAT, input_shapes[2]),
helper.make_tensor_value_info("in1", TensorProto.FLOAT, input_shapes[3]),
helper.make_tensor_value_info("in2", TensorProto.FLOAT, input_shapes[4]),
]
out_info = [
helper.make_tensor_value_info("s0", TensorProto.FLOAT, output_shapes[0]),
helper.make_tensor_value_info("s1", TensorProto.FLOAT, output_shapes[1]),
helper.make_tensor_value_info("scan0", TensorProto.FLOAT, output_shapes[2]),
helper.make_tensor_value_info("scan1", TensorProto.FLOAT, output_shapes[3]),
]
graph = helper.make_graph(
nodes=[scan_node],
name="scan_test",
inputs=input_info,
outputs=out_info,
)
model = onnx.helper.make_model(graph, producer_name="scan-test")
init0 = np.random.uniform(low=0, high=255, size=input_shapes[0]).astype(np.float32)
init1 = np.random.uniform(low=0, high=255, size=input_shapes[1]).astype(np.float32)
in0 = np.random.uniform(low=0, high=255, size=input_shapes[2]).astype(np.float32)
in1 = np.random.uniform(low=0, high=255, size=input_shapes[3]).astype(np.float32)
in2 = np.random.uniform(low=0, high=255, size=input_shapes[4]).astype(np.float32)
input_values = [init0, init1, in0, in1, in2]
verify_with_ort_with_inputs(
model,
input_values,
target=target,
dev=dev,
opt_level=2,
use_vm=True,
opset=opset,
)
# opset 8
input_shapes = [[2, 6, 7, 8], [2, 3, 3], [2, 5, 6, 7, 8], [2, 5, 3, 4], [2, 5, 4, 3]]
output_shapes = [[2, 6, 7, 8], [2, 3, 3], [2, 5, 6, 7, 8], [2, 5, 3, 3]]
# input_shapes, output_shapes, num_scan_inputs, scan_input_axes, scan_input_directions,
# scan_output_axes, scan_output_directions, opset
verify_scan(input_shapes, output_shapes, 3, [0] * 3, [0] * 3, [0] * 2, [0] * 2, 8)
# opset 9
input_shapes = [[6, 7, 8], [3, 3], [5, 6, 7, 8], [5, 3, 4], [5, 4, 3]]
output_shapes = [[6, 7, 8], [3, 3], [5, 6, 7, 8], [5, 3, 3]]
verify_scan(input_shapes, output_shapes, 3, [0] * 3, [0] * 3, [0] * 2, [0] * 2, 9)
input_shapes = [[6, 7, 8], [3, 3], [5, 6, 7, 8], [3, 4, 5], [4, 5, 3]]
output_shapes = [[6, 7, 8], [3, 3], [6, 5, 7, 8], [3, 5, 3]]
verify_scan(input_shapes, output_shapes, 3, [0, 2, 1], [1] * 3, [1] * 2, [1] * 2, 9)
# Negative axes
input_shapes = [[6, 7, 8], [3, 3], [5, 6, 7, 8], [3, 4, 5], [4, 5, 3]]
output_shapes = [[6, 7, 8], [3, 3], [6, 5, 7, 8], [3, 5, 3]]
verify_scan(input_shapes, output_shapes, 3, [-4, -1, -2], [1] * 3, [-3, -2], [1] * 2, 9)
@tvm.testing.parametrize_targets
def test_linear_regressor(target, dev):
"""test_linear_regressor"""
def verify_linear_regressor(a_shape, c_shape, i_shape, targets=1, batch=1):
a_array = np.random.uniform(size=a_shape).astype("float32")
out_shape = (batch, targets)
coefficients = np.random.uniform(size=c_shape).astype("float32")
intercepts = np.random.uniform(size=i_shape).astype("float32")
mul_node = helper.make_node(
"LinearRegressor",
["a"],
["out"],
coefficients=coefficients,
intercepts=intercepts,
targets=targets,
domain="ai.onnx.ml",
)
graph = helper.make_graph(
[mul_node],
"LinearRegressor_test",
inputs=[
helper.make_tensor_value_info("a", TensorProto.FLOAT, list(a_shape)),
],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, out_shape)],
)
model = helper.make_model(
graph,
producer_name="LinearRegressor_test",
opset_imports=[
onnx.helper.make_opsetid("ai.onnx.ml", 1),
],
)
verify_with_ort_with_inputs(model, [a_array], target=target, dev=dev)
verify_linear_regressor((1, 3), (3), (1))
verify_linear_regressor((2, 10), (10), (1), batch=2)
verify_linear_regressor((1, 3), (30), (10), targets=10)
verify_linear_regressor((10, 3), (30), (10), targets=10, batch=10)
verify_linear_regressor((1, 4), (3), (1))
@tvm.testing.parametrize_targets
def test_dft(target, dev):
"""test_dft"""
def verify_dft(
_axis,
_inverse,
_onesided,
_dft_length,
_input_shape,
_output_shape,
):
input_names = ["input"]
if _dft_length is not None:
input_names.append("dft_length")
node = onnx.helper.make_node(
"DFT",
inputs=input_names,
outputs=["output"],
axis=_axis,
inverse=_inverse,
onesided=_onesided,
)
nodes = []
if _dft_length is not None:
nodes.append(
make_constant_node("dft_length", TensorProto.INT32, [], [_dft_length]),
)
nodes.append(node)
graph = helper.make_graph(
nodes,
"dft_test",
inputs=[
helper.make_tensor_value_info("input", TensorProto.FLOAT, _input_shape),
],
outputs=[
helper.make_tensor_value_info("output", TensorProto.FLOAT, _output_shape),
],
)
model = helper.make_model(graph, producer_name="dft_test")
_input = np.random.normal(size=_input_shape).astype("float32")
verify_with_ort_with_inputs(
model,
[_input],
[_input_shape],
target=target,
dev=dev,
rtol=1e-4,
atol=1e-4,
use_vm=False,
)
batch_size = 5
n = 2
D = 7
for axis in list(range(1, n)) + [-2]:
for inverse, onesided in [(0, 0), (0, 1), (1, 0)]:
for n_fft in [D, D - 1, D + 1]:
for c in [1, 2]:
input_shape = [batch_size] + n * [D] + [c]
output_shape = [batch_size] + n * [D] + [2]
if onesided == 1:
output_shape[axis] = output_shape[axis] // 2 + 1
verify_dft(axis, inverse, onesided, n_fft, input_shape, output_shape)
@tvm.testing.parametrize_targets
def test_sequence(target, dev):
"""test_sequence"""
def verify_sequence_ops(tensor_shape, num_tensors, axis=0, position=0, new_axis=None):
tensor_shape = list(tensor_shape)
tensor_values = []
for i in range(num_tensors):
tensor_values.append(np.random.uniform(size=tensor_shape).astype("float32"))
# Create an input for each tensor.
input_tensor_names = []
for i in range(num_tensors):
name = f"input_tensor_{i}"
input_tensor_names.append(name)
# Test creating a tensor sequence.
construct_node = helper.make_node(
"SequenceConstruct",
inputs=input_tensor_names,
outputs=["sequence"],
)
position_node = make_constant_node("position", TensorProto.INT32, (), [position])
# Test sequence insertion.
insert_node = helper.make_node(
"SequenceInsert",
inputs=["sequence", input_tensor_names[0], "position"],
outputs=["inserted_sequence"],
)
# Test sequence erase.
erase_node = helper.make_node(
"SequenceErase",
inputs=["inserted_sequence", "position"],
outputs=["erased_sequence"],
)
# Test sequence concatenation.
concat_node = helper.make_node(
"ConcatFromSequence",
inputs=["erased_sequence"],
outputs=["concat_sequence"],
axis=axis,
)
# Test splitting a tensor into a sequence.
split_node = helper.make_node(
"SplitToSequence", inputs=["concat_sequence"], outputs=["split_sequence"], axis=axis
)
# Test tensor extraction from sequence
at_node = helper.make_node(
"SequenceAt", inputs=["split_sequence", "position"], outputs=["output"]
)
# Test sequence length
length_node = helper.make_node(
"SequenceLength", inputs=["split_sequence"], outputs=["output_2"]
)
if new_axis is not None:
new_axis_attr = helper.make_attribute("new_axis", new_axis)
concat_node.attribute.append(new_axis_attr)
# Create input and output tensors.
graph_inputs = []
for name in input_tensor_names:
input_tensor = helper.make_tensor_value_info(name, TensorProto.FLOAT, tensor_shape)
graph_inputs.append(input_tensor)
# Construct output tensor.
output_shape = tensor_shape
if new_axis is not None:
output_shape.insert(axis, 1)
output_shape[axis] = num_tensors + 1
else:
output_shape[axis] = (num_tensors + 1) * output_shape[axis]
graph_outputs = [
helper.make_tensor_value_info("output", TensorProto.FLOAT, output_shape),
helper.make_tensor_value_info("output_2", TensorProto.INT64, []),
]
graph_nodes = [
position_node,
construct_node,
insert_node,
erase_node,
concat_node,
split_node,
at_node,
length_node,
]
graph = helper.make_graph(
graph_nodes,
"Sequence_test",
inputs=graph_inputs,
outputs=graph_outputs,
)
model = helper.make_model(
graph,
producer_name="Sequence_test",
)
verify_with_ort_with_inputs(model, tensor_values, target=target, dev=dev)
verify_sequence_ops((10, 3), 2)
verify_sequence_ops((3, 3, 3, 3), 4, position=3)
verify_sequence_ops((3, 3, 3, 3), 4, axis=2)
verify_sequence_ops((3, 3, 3, 3), 4, axis=2, new_axis=1)
@tvm.testing.parametrize_targets
def test_empty_sequence(target, dev):
"""test_empty_sequence"""
# Test creating an empty tensor sequence.
empty_node = helper.make_node(
"SequenceEmpty",
inputs=[],
outputs=["empty_sequence"],
)
length_node = helper.make_node("SequenceLength", inputs=["empty_sequence"], outputs=["output"])
graph_outputs = [helper.make_tensor_value_info("output", TensorProto.INT64, [])]
graph_nodes = [empty_node, length_node]
graph = helper.make_graph(
graph_nodes,
"Sequence_empty_test",
inputs=[],
outputs=graph_outputs,
)
model = helper.make_model(
graph,
producer_name="Sequence_empty_test",
)
verify_with_ort_with_inputs(model, [], target=target, dev=dev)
def test_exporting_node_renamed_model():
"""test exproting model when export_node_renamed_model is set"""
a_name, a_shape = "a", (4, 3)
b_name, b_shape = "b", (3, 4)
out_name, out_shape = "out", [a_shape[0], b_shape[1]]
temp_dir = utils.tempdir().path
# model definition
mul_node = helper.make_node("MatMul", [a_name, b_name], [out_name])
graph = helper.make_graph(
[mul_node],
"matmul_test",
inputs=[
helper.make_tensor_value_info(a_name, TensorProto.FLOAT, a_shape),
helper.make_tensor_value_info(b_name, TensorProto.FLOAT, b_shape),
],
outputs=[helper.make_tensor_value_info(out_name, TensorProto.FLOAT, out_shape)],
)
model = helper.make_model(graph, producer_name="matmul_test")
# get frontend model
shape_dict = {a_name: a_shape, b_name: b_shape}
_, _ = relay.frontend.from_onnx(model, shape_dict, export_node_renamed_model_path=temp_dir)
exported_model_name = os.listdir(temp_dir)[0]
assert "tvm_exported_model_" in exported_model_name
exported_model = onnx.load(os.path.join(temp_dir, exported_model_name))
assert exported_model.graph.node[0].name == "MatMul_0"
class TestSetSpan:
"""test structural equal between translated / hand-crafted relay IR with span tagged."""
def _verify(self, res_fptr, golden_fptr):
with tvm.testing.enable_span_filling():
with_span = res_fptr()
with tvm.testing.disable_span_filling():
without_span = res_fptr()
assert tvm.ir.structural_equal(with_span, without_span)
_verify_structural_equal_with_span(with_span, golden_fptr())
def test_conv2d_bias_add_span(self):
padding = [0, 0, 0, 0]
k_shape = [7, 7]
y_shape, y_name = [1, 6, 10, 10], "y"
x_shape, x_name = [1, 3, 10, 10], "x"
b_shape, b_name = [6], "b"
b_val = np.random.random(b_shape).astype(np.float32)
w_shape, w_name = [6, 3, 7, 7], "w"
w_val = np.random.random(w_shape).astype(np.float32)
group, strides, dilations = 1, [1, 1], [1, 1]
conv_name = "conv2d"
def _res():
# model definition
node = helper.make_node(
"Conv",
inputs=[x_name, w_name, b_name],
outputs=[y_name],
kernel_shape=k_shape,
strides=strides,
dilations=dilations,
group=group,
pads=padding,
name=conv_name,
)
graph = helper.make_graph(
[node],
"conv_test",
inputs=[helper.make_tensor_value_info(x_name, TensorProto.FLOAT, x_shape)],
outputs=[helper.make_tensor_value_info(y_name, TensorProto.FLOAT, y_shape)],
initializer=[
helper.make_tensor(
w_name,
TensorProto.FLOAT,
dims=w_shape,
vals=w_val.flatten(),
),
helper.make_tensor(
b_name,
TensorProto.FLOAT,
dims=b_shape,
vals=b_val.flatten(),
),
],
)
model = helper.make_model(graph, producer_name="conv_test")
# get frontend model
shape_dict = {x_name: x_shape}
mod, _ = relay.frontend.from_onnx(model, shape_dict)
return mod["main"]
def _golden():
conv_si = conv_name
x = relay.var(
x_name,
shape=tuple(x_shape),
span=_create_span(f"{conv_si}.{x_name}"),
)
conv_weight = relay.const(
w_val,
span=_create_span(f"{conv_si}.{w_name}"),
)
conv_bias = relay.const(
b_val,
span=_create_span(f"{conv_si}.{b_name}"),
)
conv_out = _set_span(
relay.nn.conv2d(
x,
conv_weight,
padding=[0] * 4,
channels=y_shape[1],
kernel_size=k_shape,
),
conv_si,
)
bias_out = _set_span(relay.nn.bias_add(conv_out, conv_bias), conv_si)
return infer_type(relay.Function([x], bias_out))
self._verify(_res, _golden)
def test_batchnorm_span(self):
input_name, in_shape = "x", [1, 16, 10, 10]
bn_name = "bn"
output_name = "y"
scale_name = "scale"
bias_name = "b"
mean_name = "mean"
var_name = "var"
def _res():
# model definition
batchnorm = onnx.helper.make_node(
"BatchNormalization",
inputs=[input_name, scale_name, bias_name, mean_name, var_name],
outputs=[output_name],
name=bn_name,
)
graph = helper.make_graph(
[batchnorm],
"batchnorm_test",
inputs=[
helper.make_tensor_value_info(input_name, TensorProto.FLOAT, in_shape),
helper.make_tensor_value_info(scale_name, TensorProto.FLOAT, [in_shape[1]]),
helper.make_tensor_value_info(bias_name, TensorProto.FLOAT, [in_shape[1]]),
helper.make_tensor_value_info(mean_name, TensorProto.FLOAT, [in_shape[1]]),
helper.make_tensor_value_info(var_name, TensorProto.FLOAT, [in_shape[1]]),
],
outputs=[helper.make_tensor_value_info(output_name, TensorProto.FLOAT, in_shape)],
)
model = helper.make_model(graph, producer_name="batchnorm_test")
# get frontend model
shape_dict = {input_name: in_shape}
mod, _ = relay.frontend.from_onnx(model, shape_dict)
return mod["main"]
def _golden():
bn_si = bn_name
x = relay.var(
input_name,
shape=tuple(in_shape),
span=_create_span(f"{bn_si}.{input_name}"),
)
bn_scale = relay.var(
scale_name,
shape=(in_shape[1],),
span=_create_span(f"{bn_si}.{scale_name}"),
)
bn_bias = relay.var(
bias_name,
shape=(in_shape[1],),
span=_create_span(f"{bn_si}.{bias_name}"),
)
bn_rm = relay.var(
mean_name,
shape=(in_shape[1],),
span=_create_span(f"{bn_si}.{mean_name}"),
)
bn_rv = relay.var(
var_name,
shape=(in_shape[1],),
span=_create_span(f"{bn_si}.{var_name}"),
)
bn_out = _set_span(
relay.nn.batch_norm(x, bn_scale, bn_bias, bn_rm, bn_rv),
bn_si,
)
bn_tuple_get_item = _set_span(relay.TupleGetItem(bn_out.tuple_value, 0), bn_si)
return infer_type(
relay.Function([x, bn_scale, bn_bias, bn_rm, bn_rv], bn_tuple_get_item)
)
self._verify(_res, _golden)
def test_reshape_span(self):
input_shape = [2, 1, 10, 1, 10]
new_shape = [2, 1, 10, 10]
input_name = "in"
output_name = "out"
ref_name = "ref_in"
const_name = "const"
reshape_name = "reshape"
def _res():
# model definition
ref_array = np.array(new_shape)
ref_node = helper.make_node(
"Constant",
inputs=[],
outputs=[ref_name],
value=helper.make_tensor(
name="const_tensor",
data_type=TensorProto.INT32,
dims=ref_array.shape,
vals=ref_array.flatten().astype(int),
),
name=const_name,
)
reshape_node = helper.make_node(
"Reshape",
[input_name, ref_name],
[output_name],
name=reshape_name,
)
graph = helper.make_graph(
[ref_node, reshape_node],
"reshape_test",
inputs=[helper.make_tensor_value_info(input_name, TensorProto.FLOAT, input_shape)],
outputs=[helper.make_tensor_value_info(output_name, TensorProto.FLOAT, new_shape)],
)
model = helper.make_model(graph, producer_name="reshape_test")
# get frontend model
shape_dict = {input_name: input_shape}
mod, _ = relay.frontend.from_onnx(model, shape_dict)
return mod["main"]
def _golden():
reshape_si = reshape_name
x = relay.var(
input_name,
shape=tuple(input_shape),
span=_create_span(f"{reshape_si}.{input_name}"),
)
reshape_out = _set_span(
relay.reshape(x, newshape=new_shape),
reshape_si,
)
return infer_type(relay.Function([x], reshape_out))
self._verify(_res, _golden)
def test_matmul_span(self):
a_name, a_shape = "a", (4, 3)
b_name, b_shape = "b", (3, 4)
out_name, out_shape = "out", [a_shape[0], b_shape[1]]
matmul_name = "matmul"
def _res():
# model definition
mul_node = helper.make_node("MatMul", [a_name, b_name], [out_name], name=matmul_name)
graph = helper.make_graph(
[mul_node],
"matmul_test",
inputs=[
helper.make_tensor_value_info(a_name, TensorProto.FLOAT, a_shape),
helper.make_tensor_value_info(b_name, TensorProto.FLOAT, b_shape),
],
outputs=[helper.make_tensor_value_info(out_name, TensorProto.FLOAT, out_shape)],
)
model = helper.make_model(graph, producer_name="matmul_test")
# get frontend model
shape_dict = {a_name: a_shape, b_name: b_shape}
mod, _ = relay.frontend.from_onnx(model, shape_dict)
return mod["main"]
def _golden():
matmul_si = matmul_name
a = relay.var(
a_name,
shape=tuple(a_shape),
span=_create_span(f"{matmul_si}.{a_name}"),
)
b = relay.var(
b_name,
shape=tuple(b_shape),
span=_create_span(f"{matmul_si}.{b_name}"),
)
b_t = _set_span(relay.transpose(b, axes=[1, 0]), matmul_si)
matmul_out = _set_span(
relay.nn.dense(a, b_t, out_dtype="float32"),
matmul_si,
)
return infer_type(relay.Function([a, b], matmul_out))
self._verify(_res, _golden)
@tvm.testing.parametrize_targets
def test_pad_constant_value(target, dev):
"""test_pad_constant_value"""
def verify_pad_constant_value(constant_value):
tensor_shape = [1, 2, 257, 126]
tensor_values = [np.random.uniform(size=tensor_shape).astype("float32")]
graph_inputs = [helper.make_tensor_value_info("input", TensorProto.FLOAT, tensor_shape)]
graph_outputs = [helper.make_tensor_value_info("output", TensorProto.FLOAT, None)]
pads = helper.make_tensor("pads", TensorProto.INT64, [8], [0, 0, 0, 2, 0, 0, 0, 0])
pad_node = helper.make_node(
"Pad", ["input", "pads", constant_value], ["output"], mode="constant"
)
graph_nodes = [pad_node]
graph = helper.make_graph(
graph_nodes,
"test_pad_constant_value",
inputs=graph_inputs,
outputs=graph_outputs,
initializer=[pads],
)
model = helper.make_model(
graph,
producer_name="test_pad_constant_value",
)
verify_with_ort_with_inputs(model, tensor_values, target=target, dev=dev)
verify_pad_constant_value("")
if __name__ == "__main__":
tvm.testing.main()
| 296,438 | 33.732162 | 100 | py |
tvm | tvm-main/tests/python/frontend/keras/test_forward.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for various models and operators"""
from packaging import version as package_version
import numpy as np
try:
import tensorflow.compat.v1 as tf
except ImportError:
import tensorflow as tf
from tensorflow import keras as tf_keras
# prevent Keras from using up all gpu memory
import keras
import tvm
from tvm import relay
from tvm.contrib import graph_executor
import tvm.testing
if tf.executing_eagerly():
GPUS = tf.config.experimental.list_physical_devices("GPU")
for gpu in GPUS:
tf.config.experimental.set_memory_growth(gpu, True)
else:
from keras.backend.tensorflow_backend import set_session
CONFIG = tf.ConfigProto()
CONFIG.gpu_options.per_process_gpu_memory_fraction = 0.5
set_session(tf.Session(config=CONFIG))
def pytest_generate_tests(metafunc):
"""
This function generates the list of tests for pytest, based
on scenarios that will change the parameters in which the
tests use to run.
https://docs.pytest.org/en/latest/example/parametrize.html
"""
idlist = []
argvalues = []
for scenario in metafunc.cls.scenarios:
idlist.append(scenario[0])
items = scenario[1].items()
argnames = [x[0] for x in items]
argvalues.append([x[1] for x in items])
metafunc.parametrize(argnames, argvalues, ids=idlist, scope="class")
# Scenarios:
# - classic keras, using keras from "import keras"
# - tensorflow keras, using keras from "from tensorflow import keras as tf_keras"
USING_CALSSIC_KERAS = ("keras", {"keras_mod": keras})
USING_TENSORFLOW_KERAS = ("tf_keras", {"keras_mod": tf_keras})
def verify_keras_frontend(keras_model, need_transpose=True, layout="NCHW"):
"""Generic function to generate and compare Keras and TVM output"""
# Keras frontend currently supports tensorflow backend only.
assert keras.backend.backend() == "tensorflow"
if layout != "NCHW":
need_transpose = False
in_shapes = []
for layer in keras_model._input_layers:
if tf.executing_eagerly():
in_shapes.append(tuple(dim if dim is not None else 1 for dim in layer.input.shape))
else:
in_shapes.append(
tuple(dim.value if dim.value is not None else 1 for dim in layer.input.shape)
)
def get_keras_output(in_data):
return keras_model.predict(in_data)
def get_tvm_output(in_data, target, dev, dtype="float32"):
shape_dict = {name: x.shape for (name, x) in zip(keras_model.input_names, in_data)}
mod, params = relay.frontend.from_keras(keras_model, shape_dict, layout=layout)
with tvm.transform.PassContext(opt_level=2):
lib = relay.build(mod, target, params=params)
m = graph_executor.GraphModule(lib["default"](dev))
for name, x in zip(keras_model.input_names, in_data):
m.set_input(name, tvm.nd.array(x.astype(dtype)))
m.run()
return [m.get_output(i).numpy() for i in range(m.get_num_outputs())]
def to_channels_first(arr):
return arr.transpose([0, -1] + list(range(1, arr.ndim - 1)))
def to_channels_last(arr):
return arr.transpose([0] + list(range(2, arr.ndim)) + [1])
in_data = [np.random.uniform(size=shape, low=-1.0, high=1.0) for shape in in_shapes]
keras_out = get_keras_output(in_data)
keras_out = keras_out if isinstance(keras_out, list) else [keras_out]
for target, dev in tvm.testing.enabled_targets():
inputs = [to_channels_first(x) for x in in_data] if need_transpose else in_data
tvm_out = get_tvm_output(inputs, target, dev)
for kout, tout in zip(keras_out, tvm_out):
if need_transpose:
tout = to_channels_last(tout)
tvm.testing.assert_allclose(kout, tout, rtol=1e-5, atol=1e-5)
def get_mobilenet(keras_mod):
if hasattr(keras_mod.applications, "MobileNet"):
# Keras 2.4.x and older
mobilenet_mod = keras_mod.applications.MobileNet
else:
# Keras 2.6.x and newer
mobilenet_mod = keras_mod.applications.mobilenet.MobileNet
return mobilenet_mod
@tvm.testing.uses_gpu
class TestKeras:
"""Keras test"""
scenarios = [USING_CALSSIC_KERAS, USING_TENSORFLOW_KERAS]
def test_forward_merge(self, keras_mod):
"""test_forward_merge"""
data = keras_mod.layers.Input(shape=(32, 32, 3))
conv2d_x = keras_mod.layers.Conv2D(8, (3, 3), padding="same")(data)
conv2d_y = keras_mod.layers.Conv2D(8, (3, 3), padding="same")(conv2d_x)
conv2d_z = keras_mod.layers.Conv2D(8, (3, 3), padding="same")(conv2d_y)
merge_funcs = [
keras_mod.layers.Add(),
keras_mod.layers.Subtract(),
keras_mod.layers.Multiply(),
keras_mod.layers.Maximum(),
keras_mod.layers.Minimum(),
keras_mod.layers.Average(),
keras_mod.layers.Concatenate(),
]
for merge_func in merge_funcs:
class_name = type(merge_func).__name__
if class_name in ("Subtract", "Dot"):
out = merge_func([conv2d_x, conv2d_y])
else:
out = merge_func([conv2d_x, conv2d_y, conv2d_z])
keras_model = keras_mod.models.Model(data, out)
verify_keras_frontend(keras_model)
def test_forward_concatenate(self, keras_mod):
"""test_forward_concatenate"""
data1 = keras_mod.layers.Input(shape=(1, 2, 2))
data2 = keras_mod.layers.Input(shape=(1, 1, 2))
merge_func = keras_mod.layers.Concatenate(axis=2)
out = merge_func([data1, data2])
keras_model = keras_mod.models.Model([data1, data2], out)
verify_keras_frontend(keras_model, layout="NHWC")
verify_keras_frontend(keras_model, layout="NCHW")
# test default axis (e.g., -1)
data1 = keras_mod.layers.Input(shape=(1, 2, 2))
data2 = keras_mod.layers.Input(shape=(1, 2, 3))
merge_func = keras_mod.layers.Concatenate()
out = merge_func([data1, data2])
keras_model = keras_mod.models.Model([data1, data2], out)
verify_keras_frontend(keras_model, layout="NHWC")
verify_keras_frontend(keras_model, layout="NCHW")
def test_forward_merge_dot(self, keras_mod):
"""test_forward_merge_dot"""
data1 = keras_mod.layers.Input(shape=(2, 2))
data2 = keras_mod.layers.Input(shape=(2, 2))
merge_funcs = [
keras_mod.layers.Dot(axes=[1, 2]),
keras_mod.layers.Dot(axes=[2, 1]),
keras_mod.layers.Dot(axes=[1, 1]),
keras_mod.layers.Dot(axes=[2, 2]),
keras_mod.layers.Dot(axes=1),
keras_mod.layers.Dot(axes=2),
]
for merge_func in merge_funcs:
out = merge_func([data1, data2])
keras_model = keras_mod.models.Model([data1, data2], out)
verify_keras_frontend(keras_model)
def test_forward_activations(self, keras_mod):
"""test_forward_activations"""
data = keras_mod.layers.Input(shape=(32, 32, 3))
act_funcs = [
keras_mod.layers.Activation("softmax"),
keras_mod.layers.Softmax(),
keras_mod.layers.Softmax(axis=-1),
keras_mod.layers.Softmax(axis=1),
keras_mod.layers.Softmax(axis=2),
keras_mod.layers.Softmax(axis=3),
keras_mod.layers.Activation("softplus"),
keras_mod.layers.Activation("relu"),
keras_mod.layers.Activation("softsign"),
keras_mod.layers.Activation("hard_sigmoid"),
keras_mod.layers.Activation("sigmoid"),
keras_mod.layers.Activation("tanh"),
keras_mod.layers.Activation("linear"),
keras_mod.layers.Activation("selu"),
keras_mod.layers.ReLU(),
keras_mod.layers.ReLU(max_value=6.0),
keras_mod.layers.ReLU(max_value=6.0, threshold=0.0),
keras_mod.layers.ReLU(max_value=6.0, threshold=1.0),
keras_mod.layers.ReLU(max_value=6.0, threshold=1.0, negative_slope=0.0),
keras_mod.layers.ReLU(max_value=6.0, threshold=1.0, negative_slope=0.5),
keras_mod.layers.ReLU(max_value=6.0, threshold=1.0, negative_slope=1.0),
keras_mod.layers.LeakyReLU(alpha=0.3),
keras_mod.layers.PReLU(weights=np.random.rand(1, 32, 32, 3)),
keras_mod.layers.ELU(alpha=0.5),
keras_mod.layers.ThresholdedReLU(theta=0.5),
]
for act_func in act_funcs:
x = act_func(data)
keras_model = keras_mod.models.Model(data, x)
verify_keras_frontend(keras_model)
verify_keras_frontend(keras_model, need_transpose=False, layout="NHWC")
# Test the input dimension = 1
data = keras_mod.layers.Input(shape=(11,))
act_func = keras_mod.layers.Softmax()
x = act_func(data)
keras_model = keras_mod.models.Model(data, x)
verify_keras_frontend(keras_model)
verify_keras_frontend(keras_model, need_transpose=False, layout="NHWC")
def test_forward_activations_except(self, keras_mod):
"""
test invalid attribute alpha=None for LeakyReLU and ELU.
after version 2.3.1 in keras, checking was added to reject the invalid api call:
LeakyReLU(alpha=None) and ELU(alpha=None),
(see issue: https://github.com/tensorflow/tensorflow/pull/47017)
Thus, it's necessary to check the keras version to avoid crash at LeakyReLU(alpha=None)
and ELU(alpha=None)
"""
if package_version.parse(keras_mod.__version__.split("-tf")[0]) <= package_version.parse(
"2.3.1"
):
act_funcs = [
keras_mod.layers.LeakyReLU(alpha=None),
keras_mod.layers.LEU(2, 3, 4),
keras_mod.layers.ReLU(threshold=None),
]
data = keras_mod.layers.Input(shape=(2, 3, 4))
for act_func in act_funcs:
layer = act_func(data)
keras_model = keras_mod.models.Model(data, layer)
with pytest.raises(tvm.error.OpAttributeInvalid):
verify_keras_frontend(keras_model)
def test_forward_dense(self, keras_mod):
"""test_forward_dense"""
data = keras_mod.layers.Input(shape=(32, 32, 1))
x = keras_mod.layers.Flatten()(data)
x = keras_mod.layers.Dropout(0.5)(x)
x = keras_mod.layers.Dense(10, activation="relu", kernel_initializer="uniform")(x)
keras_model = keras_mod.models.Model(data, x)
verify_keras_frontend(keras_model)
# RNN dense
data = keras_mod.layers.Input(shape=(1, 32))
x = keras_mod.layers.Dense(32, activation="relu", kernel_initializer="uniform")(data)
keras_model = keras_mod.models.Model(data, x)
verify_keras_frontend(keras_model, need_transpose=False)
def test_forward_permute(self, keras_mod):
data = keras_mod.layers.Input(shape=(2, 3, 4))
x = keras_mod.layers.Permute([2, 3, 1])(data)
keras_model = keras_mod.models.Model(data, x)
verify_keras_frontend(keras_model, need_transpose=False)
def test_forward_sequential(self, keras_mod):
"""test_forward_sequential"""
keras_model = keras_mod.models.Sequential(
[
keras_mod.layers.Dense(16, input_dim=32, activation="relu"),
keras_mod.layers.Dropout(0.5),
keras_mod.layers.Dense(8, activation="relu"),
keras_mod.layers.Dropout(0.5),
keras_mod.layers.Dense(1, activation="sigmoid"),
]
)
verify_keras_frontend(keras_model)
def test_forward_pool(self, keras_mod):
data = keras_mod.layers.Input(shape=(32, 32, 1))
# maxpool
x = keras_mod.layers.MaxPooling2D((3, 3), strides=(1, 1), padding="same")(data)
keras_model = keras_mod.models.Model(data, x)
verify_keras_frontend(keras_model)
# avgpool
y = keras_mod.layers.AveragePooling2D((3, 3), strides=(1, 1), padding="same")(data)
keras_model = keras_mod.models.Model(data, y)
verify_keras_frontend(keras_model)
def test_forward_conv1d(self, keras_mod):
"""test_forward_conv1d"""
data = keras_mod.layers.Input(shape=(32, 3))
conv_funcs = [
keras_mod.layers.Conv1D(filters=10, kernel_size=(3,), strides=(2,), padding="same"),
keras_mod.layers.Conv1D(
filters=10, kernel_size=(3,), dilation_rate=(2,), padding="same"
),
keras_mod.layers.Conv1D(filters=1, kernel_size=(3,), padding="valid", use_bias=False),
keras_mod.layers.Conv1D(filters=10, kernel_size=(2,), padding="valid"),
# Enable when relay conv1dtranspose handles NWC
# keras.layers.Conv1DTranspose(filters=10, kernel_size=(3), padding="valid"),
]
for conv_func in conv_funcs:
x = conv_func(data)
keras_model = keras_mod.models.Model(data, x)
verify_keras_frontend(keras_model, layout="NWC")
def test_forward_conv(self, keras_mod):
"""test_forward_conv"""
data = keras_mod.layers.Input(shape=(32, 32, 3))
conv_funcs = [
keras_mod.layers.Conv2D(filters=10, kernel_size=(3, 3), strides=(2, 2), padding="same"),
keras_mod.layers.Conv2D(
filters=10, kernel_size=(3, 3), dilation_rate=(2, 2), padding="same"
),
keras_mod.layers.Conv2D(filters=1, kernel_size=(3, 3), padding="same"),
keras_mod.layers.DepthwiseConv2D(kernel_size=(3, 3), padding="same"),
keras_mod.layers.Conv2DTranspose(filters=10, kernel_size=(3, 3), padding="valid"),
keras_mod.layers.SeparableConv2D(filters=10, kernel_size=(3, 3), padding="same"),
keras_mod.layers.SeparableConv2D(filters=10, kernel_size=(3, 3), dilation_rate=(2, 2)),
keras_mod.layers.SeparableConv2D(filters=2, kernel_size=(3, 3), dilation_rate=2),
]
for conv_func in conv_funcs:
x = conv_func(data)
keras_model = keras_mod.models.Model(data, x)
verify_keras_frontend(keras_model)
def test_forward_conv_transpose(self, keras_mod):
"""test_forward_conv_transpose"""
data = keras_mod.layers.Input(shape=(32, 32, 128))
conv_funcs = [
keras_mod.layers.Conv2DTranspose(filters=64, kernel_size=(2, 2), padding="valid"),
keras_mod.layers.Conv2DTranspose(
filters=2, kernel_size=(3, 3), strides=(2, 2), output_padding=(1, 1)
),
]
for conv_func in conv_funcs:
x = conv_func(data)
keras_model = keras_mod.models.Model(data, x)
verify_keras_frontend(keras_model, layout="NHWC")
def test_forward_batch_norm(self, keras_mod):
"""test_forward_batch_norm"""
data = keras_mod.layers.Input(shape=(32, 32, 3))
batch_norm_funcs = [
keras_mod.layers.BatchNormalization(
axis=-1,
momentum=0.99,
epsilon=0.001,
center=True,
scale=False,
beta_initializer="zeros",
gamma_initializer="ones",
moving_mean_initializer="zeros",
moving_variance_initializer="ones",
),
keras_mod.layers.BatchNormalization(
axis=-1,
momentum=0.99,
epsilon=0.001,
center=True,
scale=True,
beta_initializer="zeros",
gamma_initializer="ones",
moving_mean_initializer="zeros",
moving_variance_initializer="ones",
),
keras_mod.layers.BatchNormalization(
axis=-1,
momentum=0.99,
epsilon=0.001,
center=False,
scale=True,
beta_initializer="zeros",
gamma_initializer="ones",
moving_mean_initializer="zeros",
moving_variance_initializer="ones",
),
keras_mod.layers.BatchNormalization(
axis=-1,
momentum=0.99,
epsilon=0.001,
center=False,
scale=False,
beta_initializer="zeros",
gamma_initializer="ones",
moving_mean_initializer="zeros",
moving_variance_initializer="ones",
),
]
for batch_norm_func in batch_norm_funcs:
x = batch_norm_func(data)
keras_model = keras_mod.models.Model(data, x)
verify_keras_frontend(keras_model)
def test_forward_upsample(self, keras_mod, interpolation="nearest"):
data = keras_mod.layers.Input(shape=(32, 32, 3))
x = keras_mod.layers.UpSampling2D(size=(3, 3), interpolation=interpolation)(data)
keras_model = keras_mod.models.Model(data, x)
verify_keras_frontend(keras_model)
# Height and width are not equal for the attribute size
data = keras_mod.layers.Input(shape=(2, 1, 3))
x = keras_mod.layers.UpSampling2D(size=(1, 2), interpolation=interpolation)(data)
keras_model = keras_mod.models.Model(data, x)
verify_keras_frontend(keras_model)
def test_forward_reshape(self, keras_mod):
"""test_forward_reshape"""
# input_shape len is 3, target_shape len is 3
data = keras_mod.layers.Input(shape=(32, 32, 3))
x = keras_mod.layers.Reshape(target_shape=(16, 64, 3))(data)
keras_model = keras_mod.models.Model(data, x)
verify_keras_frontend(keras_model)
# input_shape len is 3, target_shape len is 2
data = keras_mod.layers.Input(shape=(32, 8, 3))
x = keras_mod.layers.Reshape(target_shape=(256, 3))(data)
keras_model = keras_mod.models.Model(data, x)
verify_keras_frontend(keras_model)
# input_shape len is 2, target_shape len is 3
data = keras_mod.layers.Input(shape=(256, 3))
x = keras_mod.layers.Reshape(target_shape=(8, 32, 3))(data)
keras_model = keras_mod.models.Model(data, x)
verify_keras_frontend(keras_model)
# input_shape len is 2, target_shape len is 1
data = keras_mod.layers.Input(shape=(2, 8))
x = keras_mod.layers.Reshape(target_shape=(16,))(data)
keras_model = keras_mod.models.Model(data, x)
verify_keras_frontend(keras_model, need_transpose=False)
# input_shape len is 1, target_shape len is 2
data = keras_mod.layers.Input(shape=(16,))
x = keras_mod.layers.Reshape(target_shape=(4, 4))(data)
keras_model = keras_mod.models.Model(data, x)
verify_keras_frontend(keras_model, need_transpose=False)
# input_shape len is 2, target_shape len is 2
data = keras_mod.layers.Input(shape=(2, 8))
x = keras_mod.layers.Reshape(target_shape=(4, 4))(data)
keras_model = keras_mod.models.Model(data, x)
verify_keras_frontend(keras_model, need_transpose=False)
# "non-square" target shape
data = keras_mod.layers.Input(shape=(15,))
x = keras_mod.layers.Reshape(target_shape=(5, 3))(data)
keras_model = keras_mod.models.Model(data, x)
verify_keras_frontend(keras_model, need_transpose=False)
# modify channel dim
data = keras_mod.layers.Input(shape=(3, 2, 4))
x = keras_mod.layers.Reshape(target_shape=(3, 8))(data)
keras_model = keras_mod.models.Model(data, x)
verify_keras_frontend(keras_model)
def test_forward_crop(self, keras_mod):
"""test_forward_crop"""
data = keras_mod.layers.Input(shape=(32, 32, 3))
x = keras_mod.layers.Cropping2D(cropping=((1, 1), (1, 1)))(data)
x = keras_mod.layers.Cropping2D(cropping=(1, 1))(x)
x = keras_mod.layers.Cropping2D(cropping=1)(x)
x = keras_mod.layers.Cropping2D(cropping=((0, 1), (1, 0)))(x)
x = keras_mod.layers.Cropping2D(cropping=(1, 0))(x)
x = keras_mod.layers.Cropping2D(cropping=0)(x)
x = keras_mod.layers.Add()([x, x])
keras_model = keras_mod.models.Model(data, x)
verify_keras_frontend(keras_model, layout="NHWC")
verify_keras_frontend(keras_model, layout="NHWC")
data = keras_mod.layers.Input(shape=(32, 32, 3))
x = keras_mod.layers.Cropping2D(cropping=(2, 1))(data)
x = keras_mod.layers.Cropping2D(cropping=(1, 2))(x)
keras_model = keras_mod.models.Model(data, x)
verify_keras_frontend(keras_model, layout="NHWC")
verify_keras_frontend(keras_model, layout="NCHW")
def test_forward_multi_inputs(self, keras_mod):
data1 = keras_mod.layers.Input(shape=(32, 32, 3))
data2 = keras_mod.layers.Input(shape=(32, 32, 3))
x = keras_mod.layers.Conv2D(8, (3, 3), padding="same")(data1)
y = keras_mod.layers.Conv2D(8, (3, 3), padding="same")(data2)
average_z = keras_mod.layers.Average()([x, y])
out = keras_mod.layers.GlobalAveragePooling2D()(average_z)
keras_model = keras_mod.models.Model([data1, data2], out)
verify_keras_frontend(keras_model)
def test_forward_multi_outputs(self, keras_mod):
data = keras_mod.layers.Input(shape=(32, 32, 3))
x = keras_mod.layers.Conv2D(8, (3, 3), padding="same")(data)
x = keras_mod.layers.GlobalAveragePooling2D()(x)
y = keras_mod.layers.Conv2D(8, (3, 3), padding="same")(data)
y = keras_mod.layers.GlobalAveragePooling2D()(y)
keras_model = keras_mod.models.Model(data, [x, y])
verify_keras_frontend(keras_model)
def test_forward_reuse_layers(self, keras_mod):
"""test_forward_reuse_layers"""
# reuse conv2d
data = keras_mod.layers.Input(shape=(32, 32, 3))
conv2d = keras_mod.layers.Conv2D(8, (3, 3), padding="same")
x = conv2d(data)
y = conv2d(data)
add_z = keras_mod.layers.Add()([x, y])
out = keras_mod.layers.GlobalAveragePooling2D()(add_z)
keras_model = keras_mod.models.Model(data, out)
verify_keras_frontend(keras_model)
# reuse add
data = keras_mod.layers.Input(shape=(32, 32, 3))
x = keras_mod.layers.Conv2D(8, (3, 3), padding="same")(data)
add = keras_mod.layers.Add()
x = add([x, x])
x = add([x, x])
out = keras_mod.layers.GlobalAveragePooling2D()(x)
keras_model = keras_mod.models.Model(data, out)
verify_keras_frontend(keras_model)
def test_forward_lstm(self, keras_mod):
"""test_forward_lstm"""
data = keras_mod.layers.Input(shape=(10, 32))
rnn_funcs = [
keras_mod.layers.LSTM(16),
keras_mod.layers.LSTM(16, return_sequences=True),
keras_mod.layers.LSTM(16, go_backwards=True),
keras_mod.layers.LSTM(16, return_sequences=True, go_backwards=True),
keras_mod.layers.LSTM(16, return_sequences=True, use_bias=False),
]
for rnn_func in rnn_funcs:
x = rnn_func(data)
keras_model = keras_mod.models.Model(data, x)
verify_keras_frontend(keras_model, need_transpose=False)
def test_forward_rnn(self, keras_mod):
"""test_forward_rnn"""
data = keras_mod.layers.Input(shape=(1, 32))
rnn_funcs = [
keras_mod.layers.LSTM(
units=16, return_state=False, recurrent_activation="sigmoid", activation="tanh"
),
keras_mod.layers.LSTM(
units=16,
return_state=False,
recurrent_activation="sigmoid",
activation="tanh",
use_bias=False,
),
keras_mod.layers.SimpleRNN(units=16, return_state=False, activation="tanh"),
keras_mod.layers.SimpleRNN(
units=16, return_state=False, activation="tanh", use_bias=False
),
keras_mod.layers.GRU(
units=16,
return_state=False,
recurrent_activation="sigmoid",
activation="tanh",
reset_after=False,
),
keras_mod.layers.GRU(
units=16,
return_state=False,
recurrent_activation="sigmoid",
activation="tanh",
reset_after=False,
use_bias=False,
),
]
for rnn_func in rnn_funcs:
x = rnn_func(data)
keras_model = keras_mod.models.Model(data, x)
verify_keras_frontend(keras_model, need_transpose=False)
def test_forward_vgg16(self, keras_mod, layout="NCHW"):
"""test_forward_vgg16"""
if hasattr(keras_mod.applications, "VGG16"):
# Keras 2.4.x and older
vgg16_mod = keras_mod.applications.VGG16
else:
# Keras 2.6.x and newer
vgg16_mod = keras_mod.applications.vgg16.VGG16
keras_model = vgg16_mod(
include_top=True, weights="imagenet", input_shape=(224, 224, 3), classes=1000
)
verify_keras_frontend(keras_model, layout=layout)
def test_forward_xception(self, keras_mod, layout="NCHW"):
"""test_forward_vgg16"""
if hasattr(keras_mod.applications, "Xception"):
# Keras 2.4.x and older
xception_mod = keras_mod.applications.Xception
else:
# Keras 2.6.x and newer
xception_mod = keras_mod.applications.xception.Xception
keras_model = xception_mod(
include_top=True, weights="imagenet", input_shape=(299, 299, 3), classes=1000
)
verify_keras_frontend(keras_model, layout=layout)
def test_forward_resnet50(self, keras_mod, layout="NCHW"):
"""test_forward_resnet50"""
if hasattr(keras_mod.applications, "ResNet50"):
# Keras 2.4.x and older
resnet50_mod = keras_mod.applications.ResNet50
else:
# Keras 2.6.x and newer
resnet50_mod = keras_mod.applications.resnet.ResNet50
keras_model = resnet50_mod(
include_top=True, weights="imagenet", input_shape=(224, 224, 3), classes=1000
)
verify_keras_frontend(keras_model, layout=layout)
def test_forward_mobilenet(self, keras_mod, layout="NCHW"):
mobilenet_mod = get_mobilenet(keras_mod)
keras_model = mobilenet_mod(
include_top=True, weights="imagenet", input_shape=(224, 224, 3), classes=1000
)
verify_keras_frontend(keras_model, layout=layout)
def test_forward_conv3d(self, keras_mod):
"""test_forward_conv3d"""
data = keras_mod.layers.Input(shape=(32, 32, 32, 3))
conv_funcs = [
keras_mod.layers.Conv3D(
filters=10, kernel_size=(3, 3, 3), strides=(2, 2, 2), padding="same"
),
keras_mod.layers.Conv3D(
filters=10, kernel_size=(3, 3, 3), dilation_rate=(2, 2, 2), padding="same"
),
keras_mod.layers.Conv3D(
filters=1, kernel_size=(3, 3, 3), padding="valid", use_bias=False
),
keras_mod.layers.Conv3D(filters=10, kernel_size=(2, 2, 2), padding="valid"),
]
for conv_func in conv_funcs:
x = conv_func(data)
keras_model = keras_mod.models.Model(data, x)
verify_keras_frontend(keras_model, layout="NDHWC")
def test_forward_conv3d_transpose(self, keras_mod):
"""test_forward_conv3d_transpose"""
data = keras_mod.layers.Input(shape=(32, 32, 32, 3))
conv_funcs = [
keras_mod.layers.Conv3DTranspose(
filters=10, kernel_size=(3, 3, 3), strides=(2, 2, 2), padding="same"
),
keras_mod.layers.Conv3DTranspose(
filters=10, kernel_size=(1, 1, 1), dilation_rate=(1, 1, 1), padding="same"
),
keras_mod.layers.Conv3DTranspose(
filters=1, kernel_size=(3, 3, 3), padding="valid", use_bias=False
),
keras_mod.layers.Conv3DTranspose(filters=10, kernel_size=(2, 2, 2), padding="valid"),
keras_mod.layers.Conv3DTranspose(
filters=2, kernel_size=(3, 3, 3), strides=(2, 2, 2), output_padding=(1, 1, 1)
),
]
for conv_func in conv_funcs:
x = conv_func(data)
keras_model = keras_mod.models.Model(data, x)
verify_keras_frontend(keras_model, layout="NDHWC")
def test_forward_pool3d(self, keras_mod):
"""test_forward_pool3d"""
data = keras_mod.layers.Input(shape=(32, 32, 32, 1))
pool_funcs = [ # maxpool
keras_mod.layers.MaxPooling3D(pool_size=(2, 2, 2), strides=(1, 1, 1), padding="same"),
keras_mod.layers.MaxPooling3D(pool_size=(3, 3, 3), strides=(2, 2, 2), padding="valid"),
# avgpool
keras_mod.layers.AveragePooling3D(
pool_size=(3, 3, 3), strides=(2, 2, 2), padding="same"
),
keras_mod.layers.AveragePooling3D(
pool_size=(2, 2, 2), strides=(1, 1, 1), padding="valid"
),
]
for pool_func in pool_funcs:
x = pool_func(data)
keras_model = keras_mod.models.Model(data, x)
verify_keras_frontend(keras_model, layout="NDHWC")
def test_forward_upsample3d(self, keras_mod):
data = keras_mod.layers.Input(shape=(32, 32, 32, 3))
x = keras_mod.layers.UpSampling3D(size=(2, 3, 4))(data)
keras_model = keras_mod.models.Model(data, x)
verify_keras_frontend(keras_model, layout="NDHWC")
def test_forward_zero_padding3d(self, keras_mod):
"""test_forward_zero_padding3d"""
data = keras_mod.layers.Input(shape=(32, 32, 32, 3))
pad_funcs = [ # Integer
keras_mod.layers.ZeroPadding3D(padding=2),
# tuple of 3 ints
keras_mod.layers.ZeroPadding3D(padding=(1, 2, 3)),
# tuple of 3 tuples of 2 ints
keras_mod.layers.ZeroPadding3D(padding=((1, 1), (2, 2), (2, 2))),
# tuple of 3 tuples of 2 ints different values
keras_mod.layers.ZeroPadding3D(padding=((1, 2), (2, 3), (3, 2))),
]
for pad_func in pad_funcs:
x = pad_func(data)
keras_model = keras_mod.models.Model(data, x)
verify_keras_frontend(keras_model, layout="NDHWC")
def test_forward_embedding(self, keras_mod):
"""test_forward_embedding"""
data = keras_mod.layers.Input(shape=(2, 4), dtype="int32")
x = keras_mod.layers.Embedding(10, 3)(data)
keras_model = keras_mod.models.Model(data, x)
verify_keras_frontend(keras_model, need_transpose=False)
data = keras_mod.layers.Input(shape=(2, 3, 4), dtype="int32")
x = keras_mod.layers.Embedding(4, 5)(data)
keras_model = keras_mod.models.Model(data, x)
verify_keras_frontend(keras_model, need_transpose=False)
data = keras_mod.layers.Input(shape=(6, 2, 3, 4), dtype="int32")
x = keras_mod.layers.Embedding(4, 5)(data)
keras_model = keras_mod.models.Model(data, x)
verify_keras_frontend(keras_model, need_transpose=False)
def test_forward_repeat_vector(self, keras_mod):
"""test_forward_repeat_vector"""
data = keras_mod.layers.Input(shape=(5,), dtype="float32")
x = keras_mod.layers.Dense(6)(data)
x = keras_mod.layers.RepeatVector(2)(x)
keras_model = keras_mod.models.Model(data, x)
verify_keras_frontend(keras_model, need_transpose=False)
data = keras_mod.layers.Input(shape=(10,), dtype="float32")
x = keras_mod.layers.RepeatVector(3)(data)
keras_model = keras_mod.models.Model(data, x)
verify_keras_frontend(keras_model, need_transpose=False)
data = keras_mod.layers.Input(shape=(4,), dtype="float32")
x = keras_mod.layers.RepeatVector(1)(data)
keras_model = keras_mod.models.Model(data, x)
verify_keras_frontend(keras_model, need_transpose=False)
def test_forward_global_pool3d(self, keras_mod):
"""test_forward_zero_padding3d"""
data = keras_mod.layers.Input(shape=(32, 32, 32, 1))
pool_funcs = [ # global maxpool
keras_mod.layers.GlobalMaxPooling3D(),
# global avgpool
keras_mod.layers.GlobalAveragePooling3D(),
]
for pool_func in pool_funcs:
x = pool_func(data)
keras_model = keras_mod.models.Model(data, x)
verify_keras_frontend(keras_model, layout="NDHWC")
def test_forward_nested_layers(self, keras_mod):
"""test_forward_nested_layers"""
mobilenet_mod = get_mobilenet(keras_mod)
sub_model = mobilenet_mod(include_top=False, weights="imagenet", input_shape=(224, 224, 3))
keras_model = keras_mod.Sequential(
[
sub_model,
keras_mod.layers.GlobalAveragePooling2D(),
keras_mod.layers.Dense(1024, activation="relu"),
keras_mod.layers.Dense(2, activation="sigmoid"),
]
)
verify_keras_frontend(keras_model)
def test_forward_l2_normalize(self, keras_mod):
"""test_forward_l2_normalize"""
data = keras_mod.layers.Input(shape=(16, 12, 8))
k_backend = keras_mod.backend
l2_funcs = [
keras_mod.layers.Lambda(lambda v: k_backend.l2_normalize(v, axis=-2)),
keras_mod.layers.Lambda(lambda v: k_backend.l2_normalize(x=v, axis=-1)),
keras_mod.layers.Lambda(lambda v: k_backend.l2_normalize(axis=1, x=v)),
keras_mod.layers.Lambda(lambda v: k_backend.l2_normalize(v, 2)),
keras_mod.layers.Lambda(lambda v: k_backend.l2_normalize(v, axis=3)),
keras_mod.layers.Lambda(lambda v: k_backend.l2_normalize(v, axis=(2, 3))),
keras_mod.layers.Lambda(lambda v: k_backend.l2_normalize(v, (1, 2))),
keras_mod.layers.Lambda(lambda v: k_backend.l2_normalize(v, axis=[-2, -1])),
keras_mod.layers.Lambda(lambda v: k_backend.l2_normalize(v, [-3, -2])),
]
for l2_func in l2_funcs:
x = l2_func(data)
keras_model = keras_mod.models.Model(data, x)
verify_keras_frontend(keras_model, layout="NCHW")
verify_keras_frontend(keras_model, layout="NHWC")
def test_forward_time_distributed(self, keras_mod):
"""test_forward_time_distributed"""
conv2d_inputs = keras_mod.Input(shape=(10, 128, 128, 3))
conv_2d_layer = keras_mod.layers.Conv2D(64, (3, 3))
conv2d_model = keras_mod.models.Model(
conv2d_inputs, keras_mod.layers.TimeDistributed(conv_2d_layer)(conv2d_inputs)
)
verify_keras_frontend(conv2d_model, layout="NDHWC")
dense_inputs = keras_mod.Input(shape=(5, 1))
dense_layer = keras_mod.layers.Dense(1)
dense_model = keras_mod.models.Model(
dense_inputs, keras_mod.layers.TimeDistributed(dense_layer)(dense_inputs)
)
verify_keras_frontend(dense_model, need_transpose=False)
if __name__ == "__main__":
for k in [keras, tf_keras]:
sut = TestKeras()
sut.test_forward_concatenate(keras_mod=k)
sut.test_forward_merge_dot(keras_mod=k)
sut.test_forward_merge(keras_mod=k)
sut.test_forward_activations(keras_mod=k)
sut.test_forward_activations_except(keras_mod=k)
sut.test_forward_dense(keras_mod=k)
sut.test_forward_permute(keras_mod=k)
sut.test_forward_sequential(keras_mod=k)
sut.test_forward_pool(keras_mod=k)
sut.test_forward_conv(keras_mod=k)
sut.test_forward_conv1d(keras_mod=k)
sut.test_forward_batch_norm(keras_mod=k)
sut.test_forward_upsample(keras_mod=k, interpolation="nearest")
sut.test_forward_upsample(keras_mod=k, interpolation="bilinear")
sut.test_forward_reshape(keras_mod=k)
sut.test_forward_crop(keras_mod=k)
sut.test_forward_multi_inputs(keras_mod=k)
sut.test_forward_multi_outputs(keras_mod=k)
sut.test_forward_reuse_layers(keras_mod=k)
sut.test_forward_lstm(keras_mod=k)
sut.test_forward_rnn(keras_mod=k)
sut.test_forward_vgg16(keras_mod=k)
sut.test_forward_vgg16(keras_mod=k, layout="NHWC")
sut.test_forward_xception(keras_mod=k)
sut.test_forward_resnet50(keras_mod=k)
sut.test_forward_resnet50(keras_mod=k, layout="NHWC")
sut.test_forward_mobilenet(keras_mod=k)
sut.test_forward_mobilenet(keras_mod=k, layout="NHWC")
sut.test_forward_conv3d(keras_mod=k)
sut.test_forward_conv3d_transpose(keras_mod=k)
sut.test_forward_pool3d(keras_mod=k)
sut.test_forward_global_pool3d(keras_mod=k)
sut.test_forward_upsample3d(keras_mod=k)
sut.test_forward_zero_padding3d(keras_mod=k)
sut.test_forward_embedding(keras_mod=k)
sut.test_forward_repeat_vector(keras_mod=k)
sut.test_forward_l2_normalize(keras_mod=k)
sut.test_forward_time_distributed(keras_mod=k)
| 38,227 | 43.399535 | 100 | py |
tvm | tvm-main/tests/python/frontend/caffe/test_forward.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=import-self, invalid-name, unused-argument, unspecified-encoding
"""
Caffe testcases
====================
This article is a test script to test Caffe operator with Relay.
"""
import os
import logging
import numpy as np
import pytest
from google.protobuf import text_format
import caffe
from caffe import layers as L, params as P
from caffe.proto import caffe_pb2 as pb
import tvm
import tvm.testing
from tvm import relay
from tvm.contrib import graph_executor
from tvm.contrib.download import download_testdata
os.environ["GLOG_minloglevel"] = "2"
logging.basicConfig(level=logging.ERROR)
CURRENT_DIR = os.path.join(os.path.expanduser("~"), ".tvm_test_data", "caffe_test")
#######################################################################
# Generic functions for TVM & Caffe
# ------------------------------------------
def _create_dir(d_path):
"""If the directory is not existed, create it"""
if not (os.path.exists(d_path) and os.path.isdir(d_path)):
os.makedirs(d_path)
def _list_to_str(ll):
"""Convert list or tuple to str, separated by underline."""
if isinstance(ll, (tuple, list)):
tmp = [str(i) for i in ll]
res = "_".join(tmp)
return res
def _gen_filename_str(op_name, data_shape, *args, **kwargs):
"""Combining the filename according to the op_name, shape and other args."""
file_dir = os.path.join(CURRENT_DIR, op_name)
_create_dir(file_dir)
res = op_name + "_"
shape_str = _list_to_str(list(data_shape))
res += shape_str
for arg in args:
if isinstance(arg, (tuple, list)):
res += "_" + _list_to_str(arg)
elif isinstance(arg, (int, float, str)):
res += "_" + str(arg)
for _, v in kwargs.items():
if isinstance(v, (tuple, list)):
res += "_" + _list_to_str(v)
elif isinstance(v, (int, float, str)):
res += "_" + str(v)
res = res.replace(".", "_")
res = res.replace("-", "_")
proto_file = os.path.join(file_dir, res + ".prototxt")
blob_file = os.path.join(file_dir, res + ".caffemodel")
solver_file = os.path.join(file_dir, res + "_solver.prototxt")
return (proto_file, blob_file, solver_file)
def _save_prototxt(n_netspec, f_path):
"""Generate .prototxt file according to caffe.NetSpec"""
s = n_netspec.to_proto()
with open(f_path, "w") as f:
f.write(str(s))
def _save_solver(solver_file, proto_file, blob_file):
"""Define a solver proto, you can change the configs."""
blob_file_prefix = blob_file.split(".caffemodel")[0]
s = pb.SolverParameter()
s.train_net = proto_file
s.base_lr = 0.01
s.momentum = 0.9
s.weight_decay = 0.0005
s.lr_policy = "inv"
s.gamma = 0.0001
s.power = 0.75
s.display = 1
s.max_iter = 100000
s.snapshot = 100000
s.snapshot_prefix = blob_file_prefix
with open(solver_file, "w") as f:
f.write(str(s))
def _save_caffemodel(solver_file, blob_file):
"""Generate .caffemodel file."""
solver = caffe.SGDSolver(solver_file)
solver.net.save(blob_file)
def _gen_model_files(n_netspec, proto_file, blob_file, solver_file):
_save_prototxt(n_netspec, proto_file)
_save_solver(solver_file, proto_file, blob_file)
_save_caffemodel(solver_file, blob_file)
def _siso_op(data, func, *args, **kwargs):
"""Create single input and single output Caffe op"""
n = caffe.NetSpec()
n.data = L.Input(input_param={"shape": {"dim": list(data.shape)}})
n.output = func(n.data, *args, **kwargs)
return n
def _miso_op(data_list, func, *args, **kwargs):
"""Create multi input and single output Caffe op"""
n = caffe.NetSpec()
if not isinstance(data_list, (tuple, list)):
raise TypeError(f"Need tuple or list but get {type(data_list)}")
input_list = []
for idx, data in enumerate(data_list):
n["data" + str(idx)] = L.Input(input_param={"shape": {"dim": list(data.shape)}})
input_list.append(n["data" + str(idx)])
n.output = func(*input_list, *args, **kwargs)
return n
def _simo_op(data, func, *args, **kwargs):
"""Create single input and multi output Caffe op"""
n = caffe.NetSpec()
n.data = L.Input(input_param={"shape": {"dim": list(data.shape)}})
output_list = func(n.data, *args, **kwargs)
for idx, out in enumerate(output_list):
n["output" + str(idx)] = out
return n
def _run_caffe(data, proto_file, blob_file):
"""Run caffe model by Caffe according to .caffemodel and .prototxt"""
net = caffe.Net(proto_file, blob_file, caffe.TEST)
if isinstance(data, (list, tuple)):
for idx, d in enumerate(data):
net.blobs["data" + str(idx)].data[...] = d
else:
net.blobs["data"].data[...] = data
out = net.forward()
caffe_output = []
for i in range(len(out.keys())):
if "output" + str(i) not in out.keys():
caffe_output.clear()
return list(out.values())
caffe_output.append(out["output" + str(i)])
return caffe_output
def _run_tvm(data, proto_file, blob_file):
"""Run caffe model by TVM according to .caffemodel and .prototxt"""
init_net = pb.NetParameter()
predict_net = pb.NetParameter()
# load model
with open(proto_file, "r") as f:
text_format.Merge(f.read(), predict_net)
# load blob
with open(blob_file, "rb") as f:
init_net.ParseFromString(f.read())
shape_dict = {}
dtype_dict = {}
if isinstance(data, (tuple, list)):
for idx, d in enumerate(data):
shape_dict["data" + str(idx)] = d.shape
dtype_dict["data" + str(idx)] = "float32"
else:
shape_dict = {"data": data.shape}
dtype_dict = {"data": "float32"}
mod, params = relay.frontend.from_caffe(init_net, predict_net, shape_dict, dtype_dict)
target = "llvm"
dev = tvm.cpu(0)
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target=target, params=params)
dtype = "float32"
m = graph_executor.GraphModule(lib["default"](dev))
if isinstance(data, (tuple, list)):
for idx, d in enumerate(data):
m.set_input("data" + str(idx), tvm.nd.array(d.astype(dtype)))
else:
m.set_input("data", tvm.nd.array(data.astype(dtype)))
# execute
m.run()
tvm_output = []
# get outputs
for i in range(m.get_num_outputs()):
tvm_output.append(m.get_output(i).numpy())
return tvm_output
def _compare_caffe_tvm(caffe_out, tvm_out, is_network=False):
for i, _ in enumerate(caffe_out):
if is_network:
caffe_out[i] = caffe_out[i][:1]
tvm.testing.assert_allclose(caffe_out[i], tvm_out[i], rtol=1e-5, atol=1e-5)
def _test_op(data, func_op, op_name, **kwargs):
"""Single op testing pipline."""
shape_list = []
if isinstance(data, (list, tuple)):
n = _miso_op(data, func_op, **kwargs)
for d in data:
shape_list.extend(list(d.shape))
else:
output_num = 1
if "ntop" in kwargs:
output_num = kwargs["ntop"]
if output_num == 1:
n = _siso_op(data, func_op, **kwargs)
else:
n = _simo_op(data, func_op, **kwargs)
shape_list = list(data.shape)
# obtain the .caffemodel file and .prototxt file
(proto_file, blob_file, solver_file) = _gen_filename_str(op_name, shape_list, **kwargs)
_gen_model_files(n, proto_file, blob_file, solver_file)
# run model in Caffe
caffe_out = _run_caffe(data, proto_file, blob_file)
# run model in TVM
tvm_out = _run_tvm(data, proto_file, blob_file)
_compare_caffe_tvm(caffe_out, tvm_out)
def _test_network(data, proto_file, blob_file):
# run model in Caffe
caffe_out = _run_caffe(data, proto_file, blob_file)
# run model in TVM
tvm_out = _run_tvm(data, proto_file, blob_file)
_compare_caffe_tvm(caffe_out, tvm_out, is_network=True)
#######################################################################
# BatchNorm
# -----------
def _test_batchnorm(data, moving_average_fraction=0.999, eps=1e-5):
"""One iteration of BatchNorm"""
_test_op(
data, L.BatchNorm, "BatchNorm", moving_average_fraction=moving_average_fraction, eps=eps
)
def test_forward_BatchNorm():
"""BatchNorm"""
data = np.random.rand(1, 3, 10, 10).astype(np.float32)
_test_batchnorm(data)
_test_batchnorm(data, moving_average_fraction=0.88, eps=1e-4)
#######################################################################
# Concat
# -----------
def _test_concat(data_list, axis=1):
"""One iteration of Concat"""
_test_op(data_list, L.Concat, "Concat", axis=axis)
def test_forward_Concat():
"""Concat"""
_test_concat([np.random.rand(1, 3, 10, 10), np.random.rand(1, 2, 10, 10)], axis=1)
_test_concat([np.random.rand(3, 10, 10), np.random.rand(2, 10, 10)], axis=0)
_test_concat([np.random.rand(3, 10), np.random.rand(2, 10)], axis=0)
#######################################################################
# Convolution
# -----------
def _test_convolution(data, **kwargs):
"""One iteration of Convolution"""
_test_op(data, L.Convolution, "Convolution", **kwargs)
def test_forward_Convolution():
"""Convolution"""
data = np.random.rand(1, 3, 10, 10).astype(np.float32)
_test_convolution(
data,
num_output=20,
bias_term=True,
pad=0,
kernel_size=3,
stride=2,
dilation=1,
weight_filler=dict(type="xavier"),
bias_filler=dict(type="xavier"),
)
_test_convolution(
data,
num_output=20,
bias_term=False,
pad=[1, 2],
kernel_size=3,
stride=2,
dilation=1,
weight_filler=dict(type="xavier"),
bias_filler=dict(type="xavier"),
)
_test_convolution(
data,
num_output=20,
bias_term=True,
pad=[1, 2],
kernel_size=[3, 5],
stride=[2, 1],
dilation=[1, 2],
weight_filler=dict(type="xavier"),
bias_filler=dict(type="xavier"),
)
_test_convolution(
np.random.rand(1, 2, 10, 10).astype(np.float32),
num_output=20,
bias_term=True,
pad=[1, 2],
kernel_size=[3, 5],
stride=[2, 1],
dilation=[1, 2],
weight_filler=dict(type="xavier"),
bias_filler=dict(type="xavier"),
group=2,
)
_test_convolution(
data,
num_output=20,
bias_term=True,
pad_h=1,
pad_w=2,
kernel_h=3,
kernel_w=5,
stride_h=2,
stride_w=1,
dilation=[1, 2],
weight_filler=dict(type="xavier"),
bias_filler=dict(type="xavier"),
)
#######################################################################
# Crop
# -----------
def _test_crop(data, **kwargs):
"""One iteration of Crop"""
_test_op(data, L.Crop, "Crop", **kwargs)
def test_forward_Crop():
"""Crop"""
_test_crop([np.random.rand(10, 10, 120, 120), np.random.rand(10, 5, 50, 60)])
_test_crop([np.random.rand(10, 10, 120, 120), np.random.rand(10, 5, 50, 60)], axis=1)
_test_crop([np.random.rand(10, 10, 120, 120), np.random.rand(10, 5, 50, 60)], axis=1, offset=2)
_test_crop(
[np.random.rand(10, 10, 120, 120), np.random.rand(10, 5, 50, 60)], axis=1, offset=[1, 2, 4]
)
_test_crop(
[np.random.rand(10, 10, 120, 120), np.random.rand(10, 5, 50, 60)], axis=2, offset=[2, 4]
)
_test_crop([np.random.rand(10, 120, 120), np.random.rand(5, 50, 60)], axis=1, offset=[2, 4])
_test_crop([np.random.rand(120, 120), np.random.rand(50, 60)], axis=0, offset=[2, 4])
#######################################################################
# Deconvolution
# -----------
def _test_deconvolution(data, **kwargs):
"""One iteration of Deconvolution"""
_test_op(data, L.Deconvolution, "Deconvolution", **kwargs)
def test_forward_Deconvolution():
"""Deconvolution"""
data = np.random.rand(1, 16, 32, 32).astype(np.float32)
_test_deconvolution(
data,
convolution_param=dict(
num_output=20,
bias_term=True,
pad=0,
kernel_size=3,
stride=2,
dilation=1,
weight_filler=dict(type="xavier"),
bias_filler=dict(type="xavier"),
),
)
_test_deconvolution(
data,
convolution_param=dict(
num_output=20,
bias_term=False,
pad=[1, 2],
kernel_size=3,
stride=2,
dilation=1,
weight_filler=dict(type="xavier"),
bias_filler=dict(type="xavier"),
),
)
_test_deconvolution(
data,
convolution_param=dict(
num_output=20,
bias_term=True,
pad_h=1,
pad_w=2,
kernel_h=3,
kernel_w=5,
stride_h=2,
stride_w=1,
dilation=1,
weight_filler=dict(type="xavier"),
bias_filler=dict(type="xavier"),
),
)
_test_deconvolution(
data,
convolution_param=dict(
num_output=16,
bias_term=False,
pad=0,
kernel_size=2,
stride=2,
dilation=1,
group=16,
weight_filler=dict(type="xavier"),
bias_filler=dict(type="xavier"),
),
)
data = np.random.rand(1, 100, 32, 32).astype(np.float32)
_test_deconvolution(
data,
convolution_param=dict(
num_output=100,
bias_term=False,
pad=0,
kernel_size=2,
stride=2,
dilation=1,
group=100,
weight_filler=dict(type="xavier"),
bias_filler=dict(type="xavier"),
),
)
#######################################################################
# Dropout
# -----------
def _test_dropout(data, **kwargs):
"""One iteration of Dropout"""
_test_op(data, L.Dropout, "Dropout", **kwargs)
def test_forward_Dropout():
"""Dropout"""
data = np.random.rand(1, 3, 10, 10).astype(np.float32)
_test_dropout(data)
_test_dropout(data, dropout_ratio=0.7)
#######################################################################
# Eltwise
# -----------
def _test_eltwise(data_list, **kwargs):
"""One iteration of Eltwise"""
_test_op(data_list, L.Eltwise, "Eltwise", **kwargs)
def test_forward_Eltwise():
"""Eltwise"""
_test_eltwise(
[
np.random.rand(1, 3, 10, 11).astype(np.float32),
np.random.rand(1, 3, 10, 11).astype(np.float32),
],
operation=0,
)
_test_eltwise(
[
np.random.rand(1, 3, 10, 11).astype(np.float32),
np.random.rand(1, 3, 10, 11).astype(np.float32),
],
operation=1,
)
_test_eltwise(
[
np.random.rand(1, 3, 10, 11).astype(np.float32),
np.random.rand(1, 3, 10, 11).astype(np.float32),
],
operation=2,
)
_test_eltwise(
[
np.random.rand(1, 3, 10, 11).astype(np.float32),
np.random.rand(1, 3, 10, 11).astype(np.float32),
],
operation=1,
coeff=[0.5, 1],
)
_test_eltwise(
[
np.random.rand(1, 3, 10, 11).astype(np.float32),
np.random.rand(1, 3, 10, 11).astype(np.float32),
np.random.rand(1, 3, 10, 11).astype(np.float32),
],
operation=0,
)
_test_eltwise(
[
np.random.rand(1, 3, 10, 11).astype(np.float32),
np.random.rand(1, 3, 10, 11).astype(np.float32),
np.random.rand(1, 3, 10, 11).astype(np.float32),
np.random.rand(1, 3, 10, 11).astype(np.float32),
],
operation=1,
)
_test_eltwise(
[
np.random.rand(1, 3, 10, 11).astype(np.float32),
np.random.rand(1, 3, 10, 11).astype(np.float32),
np.random.rand(1, 3, 10, 11).astype(np.float32),
np.random.rand(1, 3, 10, 11).astype(np.float32),
np.random.rand(1, 3, 10, 11).astype(np.float32),
],
operation=2,
)
_test_eltwise(
[
np.random.rand(1, 3, 10, 11).astype(np.float32),
np.random.rand(1, 3, 10, 11).astype(np.float32),
np.random.rand(1, 3, 10, 11).astype(np.float32),
np.random.rand(1, 3, 10, 11).astype(np.float32),
np.random.rand(1, 3, 10, 11).astype(np.float32),
np.random.rand(1, 3, 10, 11).astype(np.float32),
],
operation=1,
coeff=[0.5, 1, 0.2, 1.8, 3.1, 0.1],
)
#######################################################################
# Flatten
# -----------
def _test_flatten(data, axis=1):
"""One iteration of Flatten"""
_test_op(data, L.Flatten, "Flatten", axis=axis)
def test_forward_Flatten():
"""Flatten"""
data = np.random.rand(1, 3, 10, 10).astype(np.float32)
_test_flatten(data)
_test_flatten(data, axis=1)
#######################################################################
# Flatten
# -----------
def _test_inner_product(data, **kwargs):
"""One iteration of InnerProduct"""
_test_op(data, L.InnerProduct, "InnerProduct", **kwargs)
def test_forward_InnerProduct():
"""InnerProduct"""
data = np.random.rand(1, 3, 10, 10)
_test_inner_product(data, num_output=20, bias_term=False, weight_filler=dict(type="xavier"))
_test_inner_product(
data,
num_output=20,
bias_term=True,
weight_filler=dict(type="xavier"),
bias_filler=dict(type="xavier"),
)
_test_inner_product(
np.random.rand(20, 10).astype(np.float32),
num_output=30,
bias_term=True,
weight_filler=dict(type="xavier"),
bias_filler=dict(type="xavier"),
)
#######################################################################
# LRN
# -----------
def _test_lrn(data, local_size=5, alpha=1.0, beta=0.75, k=1.0):
"""One iteration of LRN"""
_test_op(data, L.LRN, "LRN", local_size=local_size, alpha=alpha, beta=beta, k=k)
def test_forward_LRN():
"""LRN"""
data = np.random.rand(1, 3, 10, 10).astype(np.float32)
_test_lrn(data)
_test_lrn(data, local_size=3)
_test_lrn(data, local_size=3, alpha=2.0)
_test_lrn(
data,
local_size=3,
alpha=2.0,
beta=0.5,
)
_test_lrn(data, local_size=3, alpha=2.0, beta=0.5, k=2.0)
#######################################################################
# Permute
# -------
def _test_permute(data, **kwargs):
"""One iteration of Permute."""
_test_op(data, L.Permute, "Permute", **kwargs)
def test_forward_Permute():
"""Permute"""
data = np.random.rand(2, 3, 4).astype(np.float32)
_test_permute(data, permute_param={"order": [0, 1, 2]})
_test_permute(data, permute_param={"order": [0, 2, 1]})
_test_permute(data, permute_param={"order": [1, 0, 2]})
_test_permute(data, permute_param={"order": [1, 2, 0]})
_test_permute(data, permute_param={"order": [2, 0, 1]})
_test_permute(data, permute_param={"order": [2, 1, 0]})
#######################################################################
# Pooling
# -----------
def _test_pooling(data, **kwargs):
"""One iteration of Pooling."""
_test_op(data, L.Pooling, "Pooling", **kwargs)
def test_forward_Pooling():
"""Pooing"""
data = np.random.rand(1, 3, 10, 10).astype(np.float32)
# MAX Pooling
_test_pooling(data, kernel_size=2, stride=2, pad=0, pool=P.Pooling.MAX)
_test_pooling(
data, kernel_h=2, kernel_w=3, stride_h=2, stride_w=1, pad_h=1, pad_w=2, pool=P.Pooling.MAX
)
_test_pooling(data, pool=P.Pooling.MAX, global_pooling=True)
# AVE Pooing
_test_pooling(data, kernel_size=2, stride=2, pad=0, pool=P.Pooling.AVE)
_test_pooling(
data, kernel_h=2, kernel_w=3, stride_h=2, stride_w=1, pad_h=1, pad_w=2, pool=P.Pooling.AVE
)
_test_pooling(data, pool=P.Pooling.AVE, global_pooling=True)
#######################################################################
# Power
# -----
def _test_power(data, **kwargs):
"""One iteration of Power."""
_test_op(data, L.Power, "Power", **kwargs)
def test_forward_Power():
"""Power"""
data = np.random.rand(1, 3, 10, 10).astype(np.float32)
_test_power(data, power_param={"power": 0.37, "scale": 0.83, "shift": -2.4})
_test_power(data, power_param={"power": 0.37, "scale": 0.83, "shift": 0.0})
_test_power(data, power_param={"power": 0.0, "scale": 0.83, "shift": -2.4})
_test_power(data, power_param={"power": 1.0, "scale": 0.83, "shift": -2.4})
_test_power(data, power_param={"power": 2.0, "scale": 0.34, "shift": -2.4})
_test_power(data, power_param={"power": 1.0, "scale": 1.0, "shift": 0.0})
#######################################################################
# PReLU
# -----------
def _test_prelu(data, **kwargs):
"""One iteration of PReLU."""
_test_op(data, L.PReLU, "PReLU", **kwargs)
def test_forward_PReLU():
"""PReLU"""
data = np.random.rand(1, 3, 10, 10).astype(np.float32)
_test_prelu(data, filler=dict(type="constant", value=0.5))
_test_prelu(data)
_test_prelu(np.random.rand(10, 20).astype(np.float32))
#######################################################################
# ReLU
# -----------
def _test_relu(data, **kwargs):
"""One iteration of ReLU."""
_test_op(data, L.ReLU, "ReLU", **kwargs)
def test_forward_ReLU():
"""ReLU"""
data = np.random.rand(1, 3, 10, 10).astype(np.float32)
_test_relu(data)
_test_relu(np.random.rand(10, 20).astype(np.float32))
#######################################################################
# Reshape
# -----------
def _test_reshape(data, **kwargs):
"""One iteration of Reshape."""
_test_op(data, L.Reshape, "Reshape", **kwargs)
def test_forward_Reshape():
"""Reshape"""
data = np.random.rand(1, 8, 6).astype(np.float32)
_test_reshape(data, reshape_param={"shape": {"dim": [4, 3, 4]}})
_test_reshape(data, reshape_param={"shape": {"dim": [2, 0, 3]}})
_test_reshape(data, reshape_param={"shape": {"dim": [2, 0, -1]}})
_test_reshape(data, reshape_param={"shape": {"dim": [0, -1]}})
_test_reshape(data, reshape_param={"shape": {"dim": [2, 3]}, "axis": 2})
_test_reshape(data, reshape_param={"shape": {"dim": [4, 3, 4]}, "axis": 1})
_test_reshape(data, reshape_param={"shape": {"dim": [4, 3, 4]}, "axis": -3})
_test_reshape(data, reshape_param={"shape": {"dim": [2, 4]}, "axis": 1, "num_axes": 1})
_test_reshape(data, reshape_param={"shape": {"dim": [3, 16]}, "axis": 1, "num_axes": 2})
#######################################################################
# Scale
# -----------
def _test_scale(data, **kwargs):
"""One iteration of Scale."""
_test_op(data, L.Scale, "Scale", **kwargs)
def test_forward_Scale():
"""Scale"""
data = np.random.rand(1, 3, 10, 10).astype(np.float32)
_test_scale(data, filler=dict(type="xavier"))
_test_scale(data, filler=dict(type="xavier"), bias_term=True, bias_filler=dict(type="xavier"))
#######################################################################
# Sigmoid
# -----------
def _test_sigmoid(data, **kwargs):
"""One iteration of Sigmoid."""
_test_op(data, L.Sigmoid, "Sigmoid", **kwargs)
def test_forward_Sigmoid():
"""Sigmoid"""
data = np.random.rand(1, 3, 10, 10).astype(np.float32)
_test_sigmoid(data)
#######################################################################
# Slice
# -----------
def _test_slice(data, **kwargs):
"""One iteration of Slice"""
_test_op(data, L.Slice, "Slice", **kwargs)
def test_forward_Slice():
"""Slice"""
data = np.random.rand(1, 3, 10, 10).astype(np.float32)
_test_slice(data, ntop=2, slice_param=dict(axis=1, slice_point=[1]))
_test_slice(data, ntop=2, slice_param=dict(axis=-1, slice_point=[1]))
_test_slice(data, ntop=3, slice_param=dict(axis=2, slice_point=[1, 6]))
_test_slice(data, ntop=3)
#######################################################################
# Softmax
# -----------
def _test_softmax(data, **kwargs):
"""One iteration of Softmax"""
_test_op(data, L.Softmax, "Softmax", **kwargs)
def test_forward_Softmax():
"""Softmax"""
_test_softmax(np.random.rand(1, 3, 10, 10).astype(np.float32))
_test_softmax(np.random.rand(1, 3, 10, 10).astype(np.float32), axis=2)
_test_softmax(np.random.rand(10, 10).astype(np.float32), axis=0)
_test_softmax(np.random.rand(2, 10, 10).astype(np.float32), axis=1)
#######################################################################
# TanH
# -----------
def _test_tanh(data, **kwargs):
"""One iteration of TanH"""
_test_op(data, L.TanH, "TanH", **kwargs)
def test_forward_TanH():
"""TanH"""
_test_tanh(np.random.rand(1, 3, 10, 10).astype(np.float32))
_test_tanh(np.random.rand(3, 10, 10).astype(np.float32))
_test_tanh(np.random.rand(10, 10).astype(np.float32))
_test_tanh(np.random.rand(10).astype(np.float32))
#######################################################################
# Reduction
# -----------
def _test_reduction(data, **kwargs):
"""One iteration of Reduction"""
_test_op(data, L.Reduction, "Reduction", **kwargs)
def test_forward_Reduction():
"""Reduction"""
reduction_op = {"SUM": 1, "ASUM": 2, "SUMSQ": 3, "MEAN": 4}
_test_reduction(np.random.rand(10).astype(np.float32), operation=reduction_op["SUM"], axis=0)
_test_reduction(
np.random.rand(10, 20, 30, 40).astype(np.float32), operation=reduction_op["SUM"], axis=3
)
_test_reduction(
np.random.rand(10, 20, 30, 40).astype(np.float32), operation=reduction_op["SUM"], axis=1
)
_test_reduction(
np.random.rand(10).astype(np.float32), operation=reduction_op["SUM"], axis=0, coeff=0.5
)
_test_reduction(
np.random.rand(10, 20, 30, 40).astype(np.float32),
operation=reduction_op["SUM"],
axis=3,
coeff=5.0,
)
_test_reduction(np.random.rand(10).astype(np.float32), operation=reduction_op["ASUM"])
_test_reduction(
np.random.rand(10, 20).astype(np.float32), operation=reduction_op["ASUM"], axis=1
)
_test_reduction(
np.random.rand(10, 20, 30, 40).astype(np.float32), operation=reduction_op["ASUM"], axis=3
)
_test_reduction(
np.random.rand(10).astype(np.float32), operation=reduction_op["ASUM"], axis=0, coeff=0.0
)
_test_reduction(
np.random.rand(10, 20, 30).astype(np.float32),
operation=reduction_op["ASUM"],
axis=2,
coeff=7.0,
)
_test_reduction(
np.random.rand(10, 20, 30, 40, 10).astype(np.float32),
operation=reduction_op["ASUM"],
axis=3,
coeff=1.0,
)
_test_reduction(np.random.rand(10).astype(np.float32), operation=reduction_op["SUMSQ"], axis=0)
_test_reduction(
np.random.rand(10, 20, 30, 40).astype(np.float32), operation=reduction_op["SUMSQ"], axis=3
)
_test_reduction(
np.random.rand(10).astype(np.float32), operation=reduction_op["SUMSQ"], axis=0, coeff=0.0
)
_test_reduction(
np.random.rand(10, 20, 30, 40, 50).astype(np.float32),
operation=reduction_op["SUMSQ"],
axis=4,
coeff=2.0,
)
_test_reduction(np.random.rand(10).astype(np.float32), operation=reduction_op["MEAN"], axis=0)
_test_reduction(
np.random.rand(10, 20, 30, 40).astype(np.float32), operation=reduction_op["MEAN"], axis=3
)
_test_reduction(
np.random.rand(10).astype(np.float32), operation=reduction_op["MEAN"], axis=0, coeff=0.0
)
_test_reduction(
np.random.rand(10, 20, 30, 40).astype(np.float32),
operation=reduction_op["MEAN"],
axis=3,
coeff=2.0,
)
#######################################################################
# Embed
# -----------
def _test_embed(data, **kwargs):
"""One iteration of Embed"""
_test_op(data, L.Embed, "Embed", **kwargs)
def test_forward_Embed():
"""Embed"""
k = 20
data = list(i for i in range(k))
np.random.shuffle(data)
# dimension is 1
data = np.asarray(data)
_test_embed(
data,
num_output=30,
input_dim=k,
bias_term=True,
weight_filler=dict(type="xavier"),
bias_filler=dict(type="xavier"),
)
_test_embed(
data,
num_output=30,
input_dim=k,
bias_term=False,
weight_filler=dict(type="xavier"),
bias_filler=dict(type="xavier"),
)
# dimension is 2
data = np.reshape(data, [4, 5])
_test_embed(
data,
num_output=30,
input_dim=k,
bias_term=True,
weight_filler=dict(type="xavier"),
bias_filler=dict(type="xavier"),
)
_test_embed(
data,
num_output=30,
input_dim=k,
bias_term=False,
weight_filler=dict(type="xavier"),
bias_filler=dict(type="xavier"),
)
# dimension is 3
data = np.reshape(data, [2, 2, 5])
_test_embed(
data,
num_output=30,
input_dim=k,
bias_term=True,
weight_filler=dict(type="xavier"),
bias_filler=dict(type="xavier"),
)
_test_embed(
data,
num_output=30,
input_dim=k,
bias_term=False,
weight_filler=dict(type="xavier"),
bias_filler=dict(type="xavier"),
)
# dimension is 4
data = np.reshape(data, [2, 2, 5, 1])
_test_embed(
data,
num_output=30,
input_dim=k,
bias_term=True,
weight_filler=dict(type="xavier"),
bias_filler=dict(type="xavier"),
)
_test_embed(
data,
num_output=30,
input_dim=k,
bias_term=False,
weight_filler=dict(type="xavier"),
bias_filler=dict(type="xavier"),
)
#######################################################################
# Mobilenetv2
# -----------
def _test_mobilenetv2(data):
"""One iteration of Mobilenetv2"""
mean_val = np.array([103.939, 116.779, 123.68], dtype=np.float32)
mean_val = np.reshape(mean_val, (1, 3, 1, 1))
mean_val = np.tile(mean_val, (1, 1, 224, 224))
data_process = data - mean_val
data_process = data_process / 58.8
data_process = data_process.astype(np.float32)
proto_file_url = (
"https://github.com/shicai/MobileNet-Caffe/raw/master/mobilenet_v2_deploy.prototxt"
)
blob_file_url = (
"https://github.com/shicai/MobileNet-Caffe/blob/master/mobilenet_v2.caffemodel?raw=true"
)
proto_file = download_testdata(proto_file_url, "mobilenetv2.prototxt", module="model")
blob_file = download_testdata(blob_file_url, "mobilenetv2.caffemodel", module="model")
_test_network(data_process, proto_file, blob_file)
def test_forward_Mobilenetv2():
"""Mobilenetv2"""
data = np.random.randint(0, 256, size=(1, 3, 224, 224)).astype(np.float32)
_test_mobilenetv2(data)
#######################################################################
# Alexnet
# -----------
def _test_alexnet(data):
"""One iteration of Alexnet"""
mean_val = np.array([103.939, 116.779, 123.68], dtype=np.float32)
mean_val = np.reshape(mean_val, (1, 3, 1, 1))
mean_val = np.tile(mean_val, (1, 1, 227, 227))
data_process = data - mean_val
data_process = data_process.astype(np.float32)
proto_file_url = (
"https://github.com/BVLC/caffe/raw/master/models/" + "bvlc_alexnet/deploy.prototxt"
)
blob_file_url = "http://dl.caffe.berkeleyvision.org/bvlc_alexnet.caffemodel"
proto_file = download_testdata(proto_file_url, "alexnet.prototxt", module="model")
blob_file = download_testdata(blob_file_url, "alexnet.caffemodel", module="model")
_test_network(data_process, proto_file, blob_file)
@pytest.mark.skip(reason="See https://github.com/apache/tvm/issues/13227")
def test_forward_Alexnet():
"""Alexnet"""
data = np.random.randint(0, 256, size=(1, 3, 227, 227)).astype(np.float32)
_test_alexnet(data)
#######################################################################
# Resnet50
# -----------
def _test_resnet50(data):
"""One iteration of Resnet50"""
mean_val = np.array([103.939, 116.779, 123.68], dtype=np.float32)
mean_val = np.reshape(mean_val, (1, 3, 1, 1))
mean_val = np.tile(mean_val, (1, 1, 224, 224))
data_process = data - mean_val
data_process = data_process.astype(np.float32)
proto_file_url = (
"https://github.com/fernchen/CaffeModels/raw/master/resnet/ResNet-50-deploy.prototxt"
)
blob_file_url = (
"https://github.com/fernchen/CaffeModels/raw/master/resnet/ResNet-50-model.caffemodel"
)
proto_file = download_testdata(proto_file_url, "resnet50.prototxt", module="model")
blob_file = download_testdata(blob_file_url, "resnet50.caffemodel", module="model")
_test_network(data_process, proto_file, blob_file)
def test_forward_Resnet50():
"""Resnet50"""
data = np.random.randint(0, 256, size=(1, 3, 224, 224)).astype(np.float32)
_test_resnet50(data)
#######################################################################
# Inceptionv4
# -----------
def _test_inceptionv1(data):
"""One iteration of Inceptionv4"""
mean_val = np.array([103.939, 116.779, 123.68], dtype=np.float32)
mean_val = np.reshape(mean_val, (1, 3, 1, 1))
mean_val = np.tile(mean_val, (1, 1, 224, 224))
data_process = data - mean_val
data_process = data_process / 58.8
data_process = data_process.astype(np.float32)
proto_file_url = (
"https://github.com/BVLC/caffe/raw/master/models" + "/bvlc_googlenet/deploy.prototxt"
)
blob_file_url = "http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel"
proto_file = download_testdata(proto_file_url, "inceptionv1.prototxt", module="model")
blob_file = download_testdata(blob_file_url, "inceptionv1.caffemodel", module="model")
_test_network(data_process, proto_file, blob_file)
@pytest.mark.skip(reason="See issue https://github.com/apache/tvm/issues/13227")
def test_forward_Inceptionv1():
"""Inceptionv4"""
data = np.random.randint(0, 256, size=(1, 3, 224, 224)).astype(np.float32)
_test_inceptionv1(data)
if __name__ == "__main__":
tvm.testing.main()
| 35,361 | 29.301628 | 99 | py |
tvm | tvm-main/tests/python/frontend/pytorch/test_lstm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Tests on torch lstm model conversion """
# originally from https://github.com/pytorch/pytorch/blob/master/benchmarks/fastrnns/custom_lstms.py
# described in https://pytorch.org/blog/optimizing-cuda-rnn-with-torchscript/
import numpy as np
import torch
import torch.nn as nn
from torch.nn import Parameter
import torch.jit as jit
from typing import List, Tuple
from torch import Tensor
import tvm
import tvm.testing
from tvm import relay
from tvm.relay.frontend.pytorch import from_pytorch
from tvm.relay.prelude import Prelude
from tvm.runtime.container import ADT, tuple_object
class LayerNormLSTMCell(jit.ScriptModule):
def __init__(self, input_size, hidden_size):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.weight_ih = Parameter(torch.randn(4 * hidden_size, input_size))
self.weight_hh = Parameter(torch.randn(4 * hidden_size, hidden_size))
ln = nn.LayerNorm
self.layernorm_i = ln(4 * hidden_size)
self.layernorm_h = ln(4 * hidden_size)
self.layernorm_c = ln(hidden_size)
@jit.script_method
def forward(self, input, state):
# type: (Tensor, Tuple[Tensor, Tensor]) -> Tuple[Tensor, Tuple[Tensor, Tensor]]
hx, cx = state
igates = self.layernorm_i(torch.mm(input, self.weight_ih.t()))
hgates = self.layernorm_h(torch.mm(hx, self.weight_hh.t()))
gates = igates + hgates
ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)
ingate = torch.sigmoid(ingate)
forgetgate = torch.sigmoid(forgetgate)
cellgate = torch.tanh(cellgate)
outgate = torch.sigmoid(outgate)
cy = self.layernorm_c((forgetgate * cx) + (ingate * cellgate))
hy = outgate * torch.tanh(cy)
return hy, (hy, cy)
class LSTMLayer(jit.ScriptModule):
def __init__(self, cell, *cell_args):
super().__init__()
self.cell = cell(*cell_args)
@jit.script_method
def forward(self, input, state):
# type: (Tensor, Tuple[Tensor, Tensor]) -> Tuple[Tensor, Tuple[Tensor, Tensor]]
outputs = []
for i in range(input.size(0)):
out, state = self.cell(input[i], state)
outputs += [out]
return torch.stack(outputs), state
class ReverseLSTMLayer(jit.ScriptModule):
def __init__(self, cell, *cell_args):
super(ReverseLSTMLayer, self).__init__()
self.cell = cell(*cell_args)
@jit.script_method
def forward(self, inputs, state):
# type: (Tensor, Tuple[Tensor, Tensor]) -> Tuple[Tensor, Tuple[Tensor, Tensor]]
outputs = jit.annotate(List[Tensor], [])
seq_len = inputs.size(0)
for i in range(seq_len):
out, state = self.cell(inputs[seq_len - i - 1], state)
# workaround for the lack of list rev support
outputs = [out] + outputs
return torch.stack(outputs), state
class BidirLSTMLayer(jit.ScriptModule):
__constants__ = ["directions"]
def __init__(self, cell, *cell_args):
super(BidirLSTMLayer, self).__init__()
self.directions = nn.ModuleList(
[
LSTMLayer(cell, *cell_args),
ReverseLSTMLayer(cell, *cell_args),
]
)
@jit.script_method
def forward(self, input, states):
# type: (Tensor, List[Tuple[Tensor, Tensor]]) -> Tuple[Tensor, List[Tuple[Tensor, Tensor]]]
# List[LSTMState]: [forward LSTMState, backward LSTMState]
outputs = jit.annotate(List[Tensor], [])
output_states = jit.annotate(List[Tuple[Tensor, Tensor]], [])
for (i, direction) in enumerate(self.directions):
state = states[i]
out, out_state = direction(input, state)
outputs += [out]
output_states += [out_state]
# tensor array concat assumes axis == 0 for now
# return torch.cat(outputs, -1), output_states
return torch.cat(outputs, 0), output_states
def init_stacked_lstm(num_layers, layer, first_layer_args, other_layer_args):
layers = [layer(*first_layer_args)] + [layer(*other_layer_args) for _ in range(num_layers - 1)]
return nn.ModuleList(layers)
class StackedLSTM(jit.ScriptModule):
__constants__ = ["layers"] # Necessary for iterating through self.layers
def __init__(self, num_layers, layer, first_layer_args, other_layer_args):
super().__init__()
self.layers = init_stacked_lstm(num_layers, layer, first_layer_args, other_layer_args)
@jit.script_method
def forward(self, input, states):
# type: (Tensor, List[Tuple[Tensor, Tensor]]) -> Tuple[Tensor, List[Tuple[Tensor, Tensor]]]
# List[LSTMState]: One state per layer
output_states = jit.annotate(List[Tuple[Tensor, Tensor]], [])
output = input
for (i, rnn_layer) in enumerate(self.layers):
state = states[i]
output, out_state = rnn_layer(output, state)
output_states += [out_state]
return output, output_states
class StackedBidirLSTM(jit.ScriptModule):
__constants__ = ["layers"] # Necessary for iterating through self.layers
def __init__(self, num_layers, layer, first_layer_args, other_layer_args):
super(StackedBidirLSTM, self).__init__()
self.layers = init_stacked_lstm(num_layers, layer, first_layer_args, other_layer_args)
@jit.script_method
def forward(self, input, states):
# type: (Tensor, List[List[Tuple[Tensor, Tensor]]]) -> Tuple[Tensor, List[List[Tuple[Tensor, Tensor]]]]
# List[List[LSTMState]]: The outer list is for layers,
# inner list is for directions.
output_states = jit.annotate(List[List[Tuple[Tensor, Tensor]]], [])
output = input
for (i, rnn_layer) in enumerate(self.layers):
state = states[i]
output, out_state = rnn_layer(output, state)
output_states += [out_state]
return output, output_states
def lstm(input_size, hidden_size):
return LSTMLayer(LayerNormLSTMCell, input_size, hidden_size)
def stacked_lstm(input_size, hidden_size, num_layers):
return StackedLSTM(
num_layers,
LSTMLayer,
first_layer_args=[LayerNormLSTMCell, input_size, hidden_size],
other_layer_args=[LayerNormLSTMCell, hidden_size, hidden_size],
)
def bidir_lstm(input_size, hidden_size):
return BidirLSTMLayer(LayerNormLSTMCell, input_size, hidden_size)
def stacked_bidir_lstm(input_size, hidden_size, num_layers):
return StackedBidirLSTM(
num_layers,
BidirLSTMLayer,
first_layer_args=[LayerNormLSTMCell, input_size, hidden_size],
other_layer_args=[LayerNormLSTMCell, hidden_size, hidden_size],
)
def vmobj_to_list(o, dtype="float32"):
if isinstance(o, tvm.nd.NDArray):
return [o]
elif isinstance(o, tvm.runtime.container.ADT):
result = []
for f in o:
result.extend(vmobj_to_list(f, dtype))
return result
else:
raise RuntimeError("Unknown object type: %s" % type(o))
def assert_equal(tvm_result, torch_result):
if isinstance(torch_result, (tuple, list)):
assert isinstance(tvm_result, list)
for tvm_res, pt_res in zip(tvm_result, torch_result):
assert_equal(tvm_res, pt_res)
elif isinstance(torch_result, torch.Tensor):
tvm.testing.assert_allclose(tvm_result.numpy(), torch_result.numpy(), rtol=1e-4, atol=1e-4)
def run_and_compare(mod, params, pt_result, target, device):
exec_res = relay.create_executor("vm", mod=mod, device=device, target=target).evaluate()(
**params
)
def flatten(nested):
res = []
for r in nested:
if isinstance(r, torch.Tensor):
res.append(r)
else:
res.extend(flatten(r))
return res
if isinstance(exec_res, tvm.runtime.container.ADT):
assert not isinstance(pt_result, torch.Tensor)
tvm_res = vmobj_to_list(exec_res)
torch_res = flatten(pt_result)
else:
tvm_res = exec_res
torch_res = pt_result
assert_equal(tvm_res, torch_res)
def convert_list_to_vmobj(py_lst):
def wrap_nd_array(arr):
return tvm.nd.array(arr, device=tvm.cpu(0))
mod = tvm.IRModule()
prelude = Prelude(mod)
list, cons, nil = mod.get_type("List")
adt_lst = ADT(nil.tag, [])
for elem in reversed(py_lst):
if isinstance(elem, np.ndarray):
vmobj = wrap_nd_array(elem)
elif isinstance(elem, tuple):
vmobj = tuple_object([wrap_nd_array(e) for e in elem])
elif isinstance(elem, list):
vmobj = convert_list_to_vmobj(elem)
adt_lst = ADT(cons.tag, [vmobj, adt_lst])
return adt_lst
@tvm.testing.uses_gpu
def test_custom_lstm():
input_name = "input"
states_name = "states"
seq_len = 5
batch = 2
input_size = 3
hidden_size = 4
num_layers = 3
state_tensor_shape = (batch, hidden_size)
torch.manual_seed(1)
inp = torch.randn(seq_len, batch, input_size)
input_shapes = [
(input_name, (seq_len, batch, input_size)),
(states_name, (state_tensor_shape, state_tensor_shape)),
]
input_shapes_stacked = [
(input_name, (seq_len, batch, input_size)),
(
states_name,
[(state_tensor_shape, state_tensor_shape), (state_tensor_shape, state_tensor_shape)],
),
]
input_shapes_stacked_bidir = [
(input_name, (seq_len, batch, input_size)),
(
states_name,
[
[(state_tensor_shape, state_tensor_shape) for _ in range(2)]
for _ in range(num_layers)
],
),
]
states = [
(torch.randn(state_tensor_shape), torch.randn(state_tensor_shape))
for _ in range(num_layers)
]
bidir_states = [
(torch.randn(state_tensor_shape), torch.randn(state_tensor_shape)) for _ in range(2)
]
stacked_bidir_states = [
[(torch.randn(state_tensor_shape), torch.randn(state_tensor_shape)) for _ in range(2)]
for _ in range(num_layers)
]
models = [
("lstm", lstm(input_size, hidden_size).eval(), states[0], input_shapes),
(
"stacked",
stacked_lstm(input_size, hidden_size, num_layers).eval(),
states,
input_shapes_stacked,
),
("bidir", bidir_lstm(input_size, hidden_size).eval(), bidir_states, input_shapes_stacked),
# TODO(masahi): stacked bidir seems to have a rare accuracy issue
# (
# "stacked_bidir",
# stacked_bidir_lstm(input_size, hidden_size, num_layers).eval(),
# stacked_bidir_states,
# input_shapes_stacked_bidir,
# ),
]
for (name, raw_model, states, input_shapes) in models:
script_module = torch.jit.script(raw_model)
with tvm.testing.disable_span_filling():
mod, params = from_pytorch(script_module, input_shapes)
with tvm.testing.enable_span_filling():
mod_with_span, _ = from_pytorch(script_module, input_shapes)
assert tvm.ir.structural_equal(mod, mod_with_span, map_free_vars=True)
with torch.no_grad():
pt_result = raw_model(inp.clone(), states)
params[input_name] = inp.numpy()
if isinstance(states, tuple):
states_np = tuple(st.numpy() for st in states)
elif isinstance(states, list) and isinstance(states[0], torch.Tensor):
states_np = [st.numpy() for st in states]
elif isinstance(states, list) and isinstance(states[0], tuple):
states_np = [tuple(st.numpy() for st in states[i]) for i in range(len(states))]
elif isinstance(states, list) and isinstance(states[0], list):
states_np = [
[tuple(st.numpy() for st in states) for states in states[layer]]
for layer in range(num_layers)
]
else:
assert False
if isinstance(states_np, list):
params[states_name] = convert_list_to_vmobj(states_np)
else:
params[states_name] = states_np
for tgt, dev in tvm.testing.enabled_targets():
print("Running %s on target %s" % (name, tgt))
run_and_compare(mod, params, pt_result, target=tgt, device=dev)
| 13,252 | 34.530831 | 111 | py |
tvm | tvm-main/tests/python/frontend/pytorch/test_fx_quant.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Tests on fx-quantized torch model conversion """
import torch
import torchvision
import pytest
import numpy as np
from torch.quantization import get_default_qconfig
from torch.quantization.quantize_fx import prepare_fx, convert_fx
from torchvision.models.efficientnet import efficientnet_b4
from torchvision.models.resnet import resnet50
from tvm import relay
import tvm.testing
def quantize(model, example_inputs):
qconfig = get_default_qconfig("fbgemm")
qconfig_dict = {"": qconfig}
return convert_fx(prepare_fx(model, qconfig_dict, example_inputs))
def quantize_and_build(model, in_size):
inp = torch.rand(1, 3, in_size, in_size)
input_name = "inp"
qmodel = quantize(model, inp)
with torch.no_grad():
script_module = torch.jit.trace(qmodel, inp)
with tvm.testing.disable_span_filling():
mod, _ = relay.frontend.from_pytorch(script_module, [(input_name, inp.shape)])
with tvm.testing.enable_span_filling():
mod_with_span, _ = relay.frontend.from_pytorch(script_module, [(input_name, inp.shape)])
assert tvm.ir.structural_equal(mod, mod_with_span, map_free_vars=True)
mod = relay.transform.InferType()(mod)
# Make sure that the model is quantized
assert "qnn.conv2d" in mod.astext(show_meta_data=False)
# Skip building since it is slow on CI
# relay.build(mod, params=params, target="llvm")
@pytest.mark.skip(reason="unsupported op aten::linalg_vector_norm")
def test_ssd_vgg():
class TraceWrapper(torch.nn.Module):
def __init__(self, model):
super().__init__()
self.model = model
def forward(self, inp):
features = self.model.backbone(inp)
features = list(features.values())
out = self.model.head(features)
return out["bbox_regression"], out["cls_logits"]
model_func = torchvision.models.detection.ssd300_vgg16
model = TraceWrapper(model_func(num_classes=50, pretrained_backbone=True)).eval()
quantize_and_build(model, 300)
def test_deeplab_v3():
class TraceWrapper(torch.nn.Module):
def __init__(self, model):
super().__init__()
self.model = model
def forward(self, inp):
out = self.model(inp)
return out["out"]
deeplabv3 = torchvision.models.segmentation.deeplabv3_mobilenet_v3_large(pretrained=True)
model = TraceWrapper(deeplabv3.eval()).eval()
quantize_and_build(model, 300)
def test_imagenet():
for model_func in [resnet50, efficientnet_b4]:
quantize_and_build(model_func(pretrained=True).eval(), 224)
| 3,436 | 35.956989 | 100 | py |
tvm | tvm-main/tests/python/frontend/pytorch/test_forward.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=import-self, invalid-name, unused-argument, missing-function-docstring
"""Unit tests for various models and operators"""
import os
import platform
import sys
from packaging import version as package_version
import pytest
import numpy as np
import torch
from torch.nn import Module
from torch.nn import functional as F
import torchvision
import tvm
import tvm.testing
from tvm import relay
from tvm.contrib import graph_executor
from tvm.contrib.nvcc import have_fp16
from tvm.contrib import cudnn, utils
from relay.utils.tag_span import _create_span, _set_span, _verify_structural_equal_with_span
sys.setrecursionlimit(10000)
if torch.cuda.is_available():
torch.backends.cuda.matmul.allow_tf32 = False
torch.backends.cudnn.allow_tf32 = False
def list_ops(expr):
"""list_ops"""
class OpLister(tvm.relay.ExprVisitor):
"""OpLister inherits from ExprVisitor"""
def visit_op(self, op):
if op not in self.node_set:
self.node_list.append(op)
return super().visit_op(op)
def list_nodes(self, expr):
self.node_set = {}
self.node_list = []
self.visit(expr)
return self.node_list
return OpLister().list_nodes(expr)
def assert_shapes_match(tru, est):
"""Verfiy whether the shapes are equal"""
if tru.shape != est.shape:
msg = "Output shapes {} and {} don't match"
raise AssertionError(msg.format(tru.shape, est.shape))
def load_torchvision(model_name):
"""Given a model name, returns a Torchvision model in eval mode as well
as an example input."""
with torch.no_grad():
if model_name.startswith("inception"):
height = width = 299
mean = [0.5, 0.5, 0.5]
std = [0.5, 0.5, 0.5]
else:
height = width = 224
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
input_shape = [1, 3, height, width]
input_data = torch.randn(input_shape).float()
for channel in range(3):
input_data[:, channel] -= mean[channel]
input_data[:, channel] /= std[channel]
if model_name.startswith("googlenet"):
model = getattr(torchvision.models, model_name)(pretrained=True, aux_logits=True)
else:
model = getattr(torchvision.models, model_name)(pretrained=True)
model = model.float().eval()
return model, [input_data]
def load_pretrainedmodels(model_name):
"""Given a model name, returns a pretrainedmodels.pytorch model in eval
mode as well as an example input."""
# pylint: disable=import-outside-toplevel
import pretrainedmodels # https://github.com/Cadene/pretrained-models.pytorch
model = getattr(pretrainedmodels, model_name)().float().eval()
input_shape = [1, *model.input_size]
input_data = torch.rand(input_shape).float() * 256
for channel in range(3):
input_data[:, channel] -= model.mean[channel]
input_data[:, channel] /= model.std[channel]
return model, [input_data]
def load_model(model_name):
"""Given a model name, returns a model as well as an example input."""
if hasattr(torchvision.models, model_name):
return load_torchvision(model_name)
# pylint: disable=import-outside-toplevel
try:
import pretrainedmodels
if hasattr(pretrainedmodels, model_name):
return load_pretrainedmodels(model_name)
except ModuleNotFoundError as e:
raise ModuleNotFoundError("Please install pretrainedmodels.pytorch") from e
raise RuntimeError("Model not supported")
def verify_model(
model_name,
input_data=None,
custom_convert_map=None,
rtol=1e-5,
atol=1e-5,
expected_ops=None,
kind="graph",
check_correctness=True,
cpu_only=False,
validate_structural_equal=True,
):
"""Assert that the output of a compiled model matches with that of its
baseline."""
input_data = [] if input_data is None else input_data
custom_convert_map = custom_convert_map or {}
expected_ops = expected_ops or []
if isinstance(model_name, str):
baseline_model, baseline_input = load_model(model_name)
elif isinstance(input_data, list):
baseline_model = model_name
baseline_input = input_data
elif isinstance(input_data, torch.Tensor) or not input_data.shape:
baseline_model = model_name
baseline_input = [input_data]
else:
assert False, "Unexpected input format"
if torch.cuda.is_available():
if isinstance(baseline_model, torch.nn.Module):
baseline_model = baseline_model.cuda()
baseline_input = [inp.cuda() for inp in baseline_input]
with torch.no_grad():
baseline_outputs = baseline_model(*[input.clone() for input in baseline_input])
if isinstance(baseline_outputs, tuple):
baseline_outputs = tuple(out.cpu().numpy() for out in baseline_outputs)
else:
baseline_outputs = (baseline_outputs.cpu().numpy(),)
trace = torch.jit.trace(baseline_model, [input.clone() for input in baseline_input])
if isinstance(baseline_model, torch.nn.Module):
trace = trace.float().eval()
if torch.cuda.is_available():
trace = trace.cuda()
else:
trace = trace.cpu()
input_names = [f"input{idx}" for idx, _ in enumerate(baseline_input)]
input_shapes = list(zip(input_names, [inp.shape for inp in baseline_input]))
with tvm.testing.disable_span_filling():
mod, params = relay.frontend.from_pytorch(trace, input_shapes, custom_convert_map)
if validate_structural_equal:
with tvm.testing.enable_span_filling():
mod_with_span, _ = relay.frontend.from_pytorch(trace, input_shapes, custom_convert_map)
assert tvm.ir.structural_equal(mod, mod_with_span, map_free_vars=True)
for arg in mod["main"].params[: len(input_names)]:
assert arg.name_hint in input_names
compiled_input = dict(zip(input_names, [inp.clone().cpu().numpy() for inp in baseline_input]))
targets = ["llvm"]
if not cpu_only:
targets.append("cuda")
with tvm.transform.PassContext(opt_level=3):
for target in targets:
if not tvm.runtime.enabled(target):
continue
dev = tvm.device(target, 0)
exe = relay.create_executor(
kind, mod=mod, params=params, device=dev, target=target
).evaluate()
result = exe(**compiled_input)
if not isinstance(result, list):
result = [result]
for i, baseline_output in enumerate(baseline_outputs):
output = result[i].numpy()
assert_shapes_match(baseline_output, output)
if check_correctness:
tvm.testing.assert_allclose(baseline_output, output, rtol=rtol, atol=atol)
if expected_ops:
def visit(op):
if isinstance(op, tvm.ir.op.Op):
if op.name in expected_ops:
expected_ops.remove(op.name)
tvm.relay.analysis.post_order_visit(mod["main"].body, visit)
if expected_ops:
msg = "TVM Relay do not contain expected ops {}"
raise AssertionError(msg.format(expected_ops))
del model_name
del baseline_model
if torch.cuda.is_available():
torch.cuda.empty_cache()
def verify_model_with_input(
test_func,
input_data,
*,
input_dict=None,
custom_convert_map=None,
rtol=1e-5,
atol=1e-5,
assert_shape_only=False,
validate_structural_equal=True,
):
"""Generic function to generate and compare Pytorch and TVM output"""
input_dict = input_dict or {}
custom_convert_map = custom_convert_map or {}
baseline_outputs = test_func(*input_data)
trace = torch.jit.trace(test_func, [input.clone() for input in input_data])
input_names = [f"input{idx}" for idx, _ in enumerate(input_data)]
input_shapes = list(zip(input_names, [inp.shape for inp in input_data]))
with tvm.testing.disable_span_filling():
mod, params = relay.frontend.from_pytorch(trace, input_shapes, custom_convert_map)
if validate_structural_equal:
with tvm.testing.enable_span_filling():
mod_with_span, _ = relay.frontend.from_pytorch(trace, input_shapes, custom_convert_map)
assert tvm.ir.structural_equal(mod, mod_with_span, map_free_vars=True)
with tvm.transform.PassContext(opt_level=3):
for target in ["llvm", "cuda"]:
if not tvm.runtime.enabled(target):
continue
dev = tvm.device(target, 0)
lib = relay.build(mod, target=target, params=params)
relay_model = graph_executor.GraphModule(lib["default"](dev))
for name, value in input_dict.items():
relay_model.set_input(name, value)
relay_model.run()
compiled_output = relay_model.get_output(0).numpy()
assert_shapes_match(baseline_outputs, compiled_output)
if assert_shape_only is False:
tvm.testing.assert_allclose(baseline_outputs, compiled_output, rtol=rtol, atol=atol)
def gen_ir_module(model, inputs, use_parser_friendly_name=False):
"""Helper function to generate IRModule with meaningful source information"""
trace = torch.jit.trace(model, inputs)
input_names = ["input{}".format(idx) for idx, _ in enumerate(inputs)]
input_shapes = list(zip(input_names, [inp.shape for inp in inputs]))
mod, _ = relay.frontend.from_pytorch(
trace,
input_shapes,
use_parser_friendly_name=use_parser_friendly_name,
)
return mod
# Single operator tests
@tvm.testing.uses_gpu
def test_forward_pixel_shuffle():
"""test_forward_pixel_shuffle"""
torch.set_grad_enabled(False)
input_shape = [1, 144, 16, 16]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.PixelShuffle(2).float().eval(), input_data=input_data)
verify_model(torch.nn.PixelShuffle(3).float().eval(), input_data=input_data)
verify_model(torch.nn.PixelShuffle(4).float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_add():
"""test_forward_add"""
torch.set_grad_enabled(False)
input_shape = [10]
class Add1(Module):
def forward(self, *args):
return args[0] + args[0]
class Add2(Module):
def forward(self, *args):
return args[0] + 1
class Add3(Module):
def forward(self, *args):
ones = torch.ones(input_shape, dtype=torch.float)
if torch.cuda.is_available():
ones = ones.cuda()
return args[0] + ones
class Add4(Module):
def forward(self, *args):
ones = torch.ones([], dtype=torch.float)
if torch.cuda.is_available():
ones = ones.cuda()
return args[0] + ones
input_data = torch.rand(input_shape).float()
verify_model(Add1().float().eval(), input_data=input_data)
verify_model(Add2().float().eval(), input_data=input_data)
verify_model(Add3().float().eval(), input_data=input_data)
verify_model(Add4().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_subtract():
"""test_forward_subtract"""
torch.set_grad_enabled(False)
input_shape = [10]
class Subtract1(Module):
def forward(self, *args):
return args[0] - args[0]
class Subtract2(Module):
def forward(self, *args):
return args[0] - 1
class Subtract3(Module):
def forward(self, *args):
ones = torch.ones(input_shape)
if torch.cuda.is_available():
ones = ones.cuda()
return args[0] - ones
class Subtract4(Module):
def forward(self, *args):
ones = torch.ones([])
if torch.cuda.is_available():
ones = ones.cuda()
return args[0] - ones
input_data = torch.rand(input_shape).float()
verify_model(Subtract1().float().eval(), input_data=input_data)
verify_model(Subtract2().float().eval(), input_data=input_data)
verify_model(Subtract3().float().eval(), input_data=input_data)
verify_model(Subtract4().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_multiply():
"""test_forward_multiply"""
torch.set_grad_enabled(False)
input_shape = [10]
class Multiply1(Module):
def forward(self, *args):
return args[0] * args[0]
class Multiply2(Module):
def forward(self, *args):
return args[0] * 1.0
class Multiply3(Module):
def forward(self, *args):
ones = torch.ones(input_shape)
if torch.cuda.is_available():
ones = ones.cuda()
return args[0] * ones
class Multiply4(Module):
def forward(self, *args):
ones = torch.ones([])
if torch.cuda.is_available():
ones = ones.cuda()
return args[0] * ones
input_data = torch.rand(input_shape).float()
verify_model(Multiply1().float().eval(), input_data=input_data)
verify_model(Multiply2().float().eval(), input_data=input_data)
verify_model(Multiply3().float().eval(), input_data=input_data)
verify_model(Multiply4().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_min_max():
"""test_min_max"""
class Max(Module):
def forward(self, inp):
return torch.max(inp)
class Min(Module):
def forward(self, inp):
return torch.min(inp)
class Max2(Module):
def forward(self, inp):
out, _ = torch.max(inp, 1, keepdim=True)
return out
class Min2(Module):
def forward(self, inp):
out, _ = torch.min(inp, 0, keepdim=False)
return out
class Max3(Module):
def forward(self, lhs, rhs):
return torch.max(lhs, rhs)
class Min3(Module):
def forward(self, lhs, rhs):
return torch.min(lhs, rhs)
class Max4(Module):
def forward(self, inp):
out = torch.amax(inp, (1, 2), keepdim=True)
return out
class Min4(Module):
def forward(self, inp):
out = torch.amin(inp, (0, 3), keepdim=False)
return out
input_data = [torch.rand((10, 10, 10, 10)), torch.rand((10, 10, 10, 10))]
verify_model(Max(), input_data=input_data[0])
verify_model(Min(), input_data=input_data[0])
verify_model(Max2(), input_data=input_data[0])
verify_model(Min2(), input_data=input_data[0])
verify_model(Max3(), input_data=input_data)
verify_model(Min3(), input_data=input_data)
verify_model(Max4(), input_data=input_data[0])
verify_model(Min4(), input_data=input_data[0])
@tvm.testing.uses_gpu
def test_minimum_maximum():
"""test_minimum_maximum"""
class Maximum(Module):
def forward(self, lhs, rhs):
return torch.maximum(lhs, rhs)
class Minimum(Module):
def forward(self, lhs, rhs):
return torch.minimum(lhs, rhs)
input_data = [torch.rand((10, 10, 10, 10)), torch.rand((10, 10, 10, 10))]
verify_model(Maximum(), input_data=input_data)
verify_model(Minimum(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_reciprocal():
"""test_forward_reciprocal"""
torch.set_grad_enabled(False)
input_shape = [2, 1, 10, 1, 10]
class Reciprocal1(Module):
def forward(self, *args):
return args[0].reciprocal()
input_data = torch.rand(input_shape).float()
verify_model(Reciprocal1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_repeat():
"""test_forward_repeat"""
torch.set_grad_enabled(False)
input_shape = [1, 3]
class Repeat1(Module):
def forward(self, *args):
return args[0].repeat(1, 1)
class Repeat2(Module):
def forward(self, *args):
return args[0].repeat(4, 2)
class Repeat3(Module):
def forward(self, *args):
return args[0].repeat(4, 2, 1)
input_data = torch.rand(input_shape).float()
verify_model(Repeat1().float().eval(), input_data=input_data)
verify_model(Repeat2().float().eval(), input_data=input_data)
verify_model(Repeat3().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_repeat_interleave():
"""test_forward_repeat_interleave"""
torch.set_grad_enabled(False)
input_shape = [2, 2, 3]
class RepeatInterleave1(Module):
def forward(self, *args):
return args[0].repeat_interleave(2)
class RepeatInterleave2(Module):
def forward(self, *args):
return args[0].repeat_interleave(3, dim=0)
class RepeatInterleave3(Module):
def forward(self, *args):
return args[0].repeat_interleave(2, dim=1)
class RepeatInterleave4(Module):
def forward(self, *args):
return args[0].repeat_interleave(4, dim=2)
input_data = torch.rand(input_shape).float()
verify_model(RepeatInterleave1().float().eval(), input_data=input_data)
verify_model(RepeatInterleave2().float().eval(), input_data=input_data)
verify_model(RepeatInterleave3().float().eval(), input_data=input_data)
verify_model(RepeatInterleave4().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_unsqueeze():
"""test_forward_unsqueeze"""
torch.set_grad_enabled(False)
input_shape = [10, 10]
class Unsqueeze1(Module):
def forward(self, *args):
return args[0].unsqueeze(2)
class Unsqueeze2(Module):
def forward(self, *args):
_ = args[0].unsqueeze_(2)
# Check whether operations after inplace unsqueeze works as expected
y = args[0].squeeze(2)
return torch.add(y, y)
input_data = torch.rand(input_shape).float()
verify_model(Unsqueeze1().float().eval(), input_data=input_data)
verify_model(Unsqueeze2().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_squeeze():
"""test_forward_squeeze"""
torch.set_grad_enabled(False)
input_shape = [2, 1, 10, 1, 10]
class Squeeze1(Module):
def forward(self, *args):
return args[0].squeeze()
class Squeeze2(Module):
def forward(self, *args):
return args[0].squeeze(1)
class Squeeze3(Module):
def forward(self, *args):
return args[0].squeeze((1, 3))
input_data = torch.rand(input_shape).float()
verify_model(Squeeze1().float().eval(), input_data=input_data)
verify_model(Squeeze2().float().eval(), input_data=input_data)
if package_version.parse(torch.__version__) >= package_version.parse("2.0.0"):
verify_model(Squeeze3().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_arange():
"""test_forward_arange"""
torch.set_grad_enabled(False)
class Arange1(Module):
def forward(self, *args):
return torch.arange(5)
class Arange2(Module):
def forward(self, *args):
return torch.arange(2.5)
class Arange3(Module):
def forward(self, *args):
return torch.arange(1, 4)
class Arange4(Module):
def forward(self, *args):
return torch.arange(1, 2.5, 0.5)
class Arange5(Module):
def forward(self, *args):
return torch.arange(1, 2, 1, dtype=torch.int32)
class Arange6(Module):
def forward(self, *args):
return torch.arange(start=1, end=6, step=2)
class Arange7(Module):
def forward(self, *args):
return torch.arange(1, 4, dtype=torch.float32)
class Arange8(Module):
def forward(self, *args):
return torch.arange(1, 2, 1, dtype=torch.int16)
class Arange9(Module):
def forward(self, *args):
end = torch.add(torch.tensor(4), 1)
return torch.arange(end) + torch.ones((5,), dtype=torch.int64)
class Arange10(Module):
def forward(self, *args):
end = torch.add(torch.tensor(4.0), torch.tensor(1.0))
return torch.arange(end) + torch.ones((5,), dtype=torch.float)
class Arange11(Module):
def forward(self, *args):
start = torch.add(torch.tensor(1), 1)
end = torch.add(torch.tensor(4), 1)
step = torch.add(torch.tensor(2), 1)
out = torch.arange(start, end, step)
return out + torch.ones((3,), dtype=torch.int64)
class Arange12(Module):
def forward(self, *args):
start = torch.add(torch.tensor(1), 1)
end = torch.add(torch.tensor(4), 1)
step = torch.add(torch.tensor(2.5), torch.tensor(4.1))
out = torch.arange(start, end, step)
return out + torch.ones((3,), dtype=torch.float)
verify_model(Arange1().float().eval())
verify_model(Arange2().float().eval())
verify_model(Arange3().float().eval())
verify_model(Arange4().float().eval())
verify_model(Arange5().float().eval())
verify_model(Arange6().float().eval())
verify_model(Arange7().float().eval())
verify_model(Arange8().float().eval())
verify_model(Arange9().float().eval())
verify_model(Arange10().float().eval())
verify_model(Arange11().float().eval())
verify_model(Arange12().float().eval())
@tvm.testing.uses_gpu
def test_forward_mesh_grid():
"""test_forward_mesh_grid"""
torch.set_grad_enabled(False)
class MeshGrid1(Module):
def forward(self, *args):
x = torch.tensor([1, 2, 3])
y = torch.tensor([4, 5, 6])
grid_x, grid_y = torch.meshgrid([x, y])
return grid_x, grid_y
class MeshGrid2(Module):
def forward(self, *args):
x = torch.tensor([1, 2, 3], dtype=torch.float32)
y = torch.add(torch.tensor(5, dtype=torch.float32), 1)
grid_x, grid_y = torch.meshgrid([x, y])
return grid_x, grid_y
verify_model(MeshGrid1().float().eval())
verify_model(MeshGrid2().float().eval())
@tvm.testing.uses_gpu
def test_forward_abs():
"""test_forward_abs"""
torch.set_grad_enabled(False)
input_shape = [2, 1, 10, 1, 10]
class Abs1(Module):
def forward(self, *args):
return args[0].abs()
input_data = torch.rand(input_shape).float()
verify_model(Abs1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_concatenate():
"""test_forward_concatenate"""
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class Concatenate1(Module):
def forward(self, *args):
return torch.cat([args[0][:, 0].unsqueeze(1), args[0][:, 1].unsqueeze(1)], 1)
class Concatenate2(Module):
def forward(self, *args):
a = (args[0][:, :, 0] + 2) * 7
b = (args[0][:, :, 1] + 3) * 11
c = (args[0][:, :, 2] + 5) * 13
return torch.cat([t.unsqueeze(2) for t in [a, b, c]], 2)
input_data = torch.rand(input_shape).float()
verify_model(Concatenate1().float().eval(), input_data=input_data)
verify_model(Concatenate2().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_relu():
"""test_forward_relu"""
torch.set_grad_enabled(False)
input_shape = [10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.ReLU().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_relu6():
"""test_forward_relu6"""
torch.set_grad_enabled(False)
input_shape = [10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.ReLU6().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_prelu():
"""test_forward_prelu"""
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.PReLU(num_parameters=3).eval(), input_data=input_data)
# Test when input channel > 1 and num parameters = 1
verify_model(torch.nn.PReLU(num_parameters=1).eval(), input_data=input_data)
# Test when input dims < 2
verify_model(torch.nn.PReLU(num_parameters=1).eval(), input_data=torch.randn(2))
@tvm.testing.uses_gpu
def test_forward_leakyrelu():
"""test_forward_leakyrelu"""
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.LeakyReLU().eval(), input_data=input_data)
verify_model(torch.nn.LeakyReLU(negative_slope=0.05).eval(), input_data=input_data)
verify_model(torch.nn.LeakyReLU(negative_slope=1.0, inplace=True).eval(), input_data=input_data)
verify_model(
torch.nn.LeakyReLU(negative_slope=1.25, inplace=True).eval(), input_data=input_data
)
@tvm.testing.uses_gpu
def test_forward_elu():
"""test_forward_elu"""
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.randn(input_shape).float()
verify_model(torch.nn.ELU().eval(), input_data=input_data)
verify_model(torch.nn.ELU(alpha=0.3).eval(), input_data=input_data)
verify_model(torch.nn.ELU(alpha=1.0).eval(), input_data=input_data)
verify_model(torch.nn.ELU(alpha=1.3).eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_celu():
"""test_forward_celu"""
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.CELU().eval(), input_data=input_data)
verify_model(torch.nn.CELU(alpha=0.3).eval(), input_data=input_data)
verify_model(torch.nn.CELU(alpha=1.0).eval(), input_data=input_data)
verify_model(torch.nn.CELU(alpha=1.3).eval(), input_data=input_data)
input_data = torch.tensor([-1.0, 2.0], dtype=torch.float32)
verify_model(torch.nn.CELU().eval(), input_data=input_data)
input_shape = [2, 0, 1]
input_data = torch.rand(input_shape).float()
with pytest.raises(RuntimeError):
verify_model(torch.nn.CELU().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_gelu():
"""test_forward_gelu"""
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.GELU().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_selu():
"""test_forward_selu"""
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.SELU().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_silu():
"""test_forward_silu"""
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.SiLU().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_glu():
"""test_forward_glu"""
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.GLU().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_softplus():
"""test_forward_softplus"""
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.Softplus().eval(), input_data=input_data)
verify_model(torch.nn.Softplus(beta=1.5, threshold=20).eval(), input_data=input_data)
verify_model(torch.nn.Softplus(beta=5, threshold=10).eval(), input_data=input_data)
verify_model(torch.nn.Softplus(beta=5, threshold=1).eval(), input_data=input_data)
verify_model(torch.nn.Softplus(beta=1, threshold=2).eval(), input_data=input_data)
verify_model(torch.nn.Softplus(beta=1, threshold=-1).eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_softsign():
"""test_forward_softsign"""
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.Softsign().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_log_sigmoid():
"""test_forward_log_sigmoid"""
torch.set_grad_enabled(False)
input_shape = [10, 10]
input_data = torch.rand(input_shape).float()
input_data_overflow = torch.tensor([-300.0, -100.0]).float()
verify_model(torch.nn.LogSigmoid().eval(), input_data=input_data)
verify_model(torch.nn.LogSigmoid().eval(), input_data=input_data_overflow)
@tvm.testing.uses_gpu
def test_forward_adaptive_avgpool():
"""test_forward_adaptive_avgpool"""
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.AdaptiveAvgPool2d([1, 1]).eval(), input_data=input_data)
verify_model(torch.nn.AdaptiveAvgPool2d([10, 10]).eval(), input_data=input_data)
input_data = torch.rand([1, 3, 10]).float()
verify_model(torch.nn.AdaptiveAvgPool1d([1]).eval(), input_data=input_data)
verify_model(torch.nn.AdaptiveAvgPool1d([5]).eval(), input_data=input_data)
input_data = torch.rand([1, 3, 5, 6]).float()
verify_model(torch.nn.AdaptiveAvgPool2d([3, None]).eval(), input_data=input_data)
input_data = torch.rand([1, 1, 3, 5, 6]).float()
verify_model(torch.nn.AdaptiveAvgPool3d([3, None, None]).eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_adaptive_maxpool():
"""test_forward_adaptive_maxpool"""
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.AdaptiveMaxPool2d([1, 1]).eval(), input_data=input_data)
verify_model(torch.nn.AdaptiveMaxPool2d([10, 10]).eval(), input_data=input_data)
input_data = torch.rand([1, 3, 10]).float()
verify_model(torch.nn.AdaptiveMaxPool1d([1]).eval(), input_data=input_data)
verify_model(torch.nn.AdaptiveMaxPool1d([5]).eval(), input_data=input_data)
input_data = torch.rand([1, 3, 5, 6]).float()
verify_model(torch.nn.AdaptiveMaxPool2d([3, None]).eval(), input_data=input_data)
input_data = torch.rand([1, 1, 3, 5, 6]).float()
verify_model(torch.nn.AdaptiveMaxPool3d([3, None, None]).eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_maxpool2d():
"""test_forward_maxpool2d"""
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.MaxPool2d(kernel_size=[1, 1]).eval(), input_data)
verify_model(torch.nn.MaxPool2d(kernel_size=[2, 2], dilation=[2, 3]).eval(), input_data)
verify_model(torch.nn.MaxPool2d(kernel_size=[10, 10]).eval(), input_data)
verify_model(torch.nn.MaxPool2d(kernel_size=[4, 4], padding=2, stride=2).eval(), input_data)
# A functional variant (default strides = None case)
class MaxPool2D(Module):
def forward(self, *args):
return torch.nn.functional.max_pool2d(args[0], kernel_size=[10, 10])
verify_model(MaxPool2D(), input_data=input_data)
class MaxPool2DWithIndices(Module):
def __init__(self):
super().__init__()
self.pool = torch.nn.MaxPool2d(kernel_size=[1, 1], return_indices=True)
def forward(self, *args):
output, _ = self.pool(args[0])
return output
class MaxPool2DWithIntStrides(Module):
def forward(self, *args):
# Makes kernel_size and strides a Relay expr to test converting back to int
x_shape = args[0].shape
# kernel_size = [torch.tensor(x_shape[1]).int(), torch.tensor(x_shape[1]).int()]
strides = [torch.tensor(x_shape[0]).int(), torch.tensor(x_shape[0]).int()]
return torch.nn.functional.max_pool2d(args[0], kernel_size=[4, 4], stride=strides)
verify_model(MaxPool2DWithIndices().float().eval(), input_data=input_data)
verify_model(MaxPool2DWithIntStrides().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_maxpool1d():
"""test_forward_maxpool1d"""
torch.set_grad_enabled(False)
input_shape = [1, 3, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.MaxPool1d(kernel_size=1).eval(), input_data)
verify_model(torch.nn.MaxPool1d(kernel_size=2, dilation=[1]).eval(), input_data)
verify_model(torch.nn.MaxPool1d(kernel_size=10).eval(), input_data)
verify_model(torch.nn.MaxPool1d(kernel_size=4, padding=2, stride=2).eval(), input_data)
# A functional variant (default strides = None case)
class MaxPool1D(Module):
def forward(self, *args):
return torch.nn.functional.max_pool1d(args[0], kernel_size=10)
verify_model(MaxPool1D(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_maxpool3d():
"""test_forward_maxpool3d"""
torch.set_grad_enabled(False)
for input_shape in [(1, 3, 10, 10, 10), (3, 10, 10, 10)]:
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.MaxPool3d(kernel_size=[1, 1, 1]).eval(), input_data)
verify_model(
torch.nn.MaxPool3d(kernel_size=[2, 2, 2], dilation=[1, 2, 3]).eval(), input_data
)
verify_model(torch.nn.MaxPool3d(kernel_size=[10, 10, 10]).eval(), input_data)
verify_model(
torch.nn.MaxPool3d(kernel_size=[4, 4, 4], padding=2, stride=2).eval(), input_data
)
# A functional variant (default strides = None case)
class MaxPool3D(Module):
def forward(self, *args):
return torch.nn.functional.max_pool3d(args[0], kernel_size=[10, 10, 10])
verify_model(MaxPool3D(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_split():
"""test_forward_split"""
torch.set_grad_enabled(False)
input_shape = [4, 10]
class Split(Module):
def __init__(self, split_size_or_sections, dim):
super().__init__()
self.split_size_or_sections = split_size_or_sections
self.dim = dim
def forward(self, *args):
return torch.split(args[0], self.split_size_or_sections, self.dim)
input_data = torch.rand(input_shape).float()
verify_model(Split(2, 0).float().eval(), input_data=input_data)
verify_model(Split(3, 1).float().eval(), input_data=input_data)
verify_model(Split(4, 1).float().eval(), input_data=input_data)
verify_model(Split([2, 3, 5], 1).float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_tensor_split():
"""test_forward_tensor_split"""
torch.set_grad_enabled(False)
input_shape = [4, 10]
class Tensor_Split(Module):
def __init__(self, split_size_or_sections, dim):
super().__init__()
self.split_size_or_sections = split_size_or_sections
self.dim = dim
def forward(self, *args):
return torch.tensor_split(args[0], self.split_size_or_sections, self.dim)
# tensor_split was introduced when torch > 1.7.1
if package_version.parse(torch.__version__) > package_version.parse("1.7.1"):
input_data = torch.rand(input_shape).float()
verify_model(Tensor_Split(2, 0).float().eval(), input_data=input_data)
verify_model(Tensor_Split(torch.tensor(3), 1).float().eval(), input_data=input_data)
verify_model(Tensor_Split([2, 3, 5], 1).float().eval(), input_data=input_data)
verify_model(Tensor_Split((2, 3, 5), 1).float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_avgpool1d():
"""test_forward_avgpool1d"""
torch.set_grad_enabled(False)
input_shape = [1, 3, 10]
class AvgPool1D2(Module):
def forward(self, *args):
return torch.nn.functional.avg_pool1d(args[0], kernel_size=[10])
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.AvgPool1d(kernel_size=[10]).eval(), input_data=input_data)
verify_model(AvgPool1D2().float().eval(), input_data=input_data)
verify_model(
torch.nn.AvgPool1d(kernel_size=[5], stride=2, padding=2).eval(), input_data=input_data
)
@tvm.testing.uses_gpu
def test_forward_avgpool2d():
"""test_forward_avgpool2d"""
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class AvgPool2D2(Module):
def forward(self, *args):
return torch.nn.functional.avg_pool2d(args[0], kernel_size=[10, 10])
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.AvgPool2d(kernel_size=[10, 10]).eval(), input_data=input_data)
verify_model(AvgPool2D2().float().eval(), input_data=input_data)
verify_model(
torch.nn.AvgPool2d(kernel_size=5, stride=2, padding=2).eval(), input_data=input_data
)
input_shape = [1, 1, 1, 9]
input_data = torch.rand(input_shape).float()
verify_model(
torch.nn.AvgPool2d(
kernel_size=[1, 2], stride=[1, 2], ceil_mode=True, count_include_pad=True
).eval(),
input_data=input_data,
)
@tvm.testing.uses_gpu
def test_forward_avgpool3d():
"""test_forward_avgpool3d"""
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10, 10]
class AvgPool3D1(Module):
def forward(self, *args):
return torch.nn.functional.avg_pool3d(args[0], kernel_size=[10, 10, 10])
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.AvgPool3d(kernel_size=[10, 10, 10]).eval(), input_data=input_data)
verify_model(AvgPool3D1().float().eval(), input_data=input_data)
verify_model(
torch.nn.AvgPool3d(kernel_size=5, stride=2, padding=2).eval(), input_data=input_data
)
@tvm.testing.uses_gpu
def test_forward_hardtanh():
"""test_forward_hardtanh"""
torch.set_grad_enabled(False)
input_shape = [10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.Hardtanh().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_conv():
"""test_forward_conv"""
torch.set_grad_enabled(False)
conv1d_input_shape = [1, 3, 10]
conv2d_input_shape = [1, 3, 10, 10]
class Conv2D1(Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(3, 6, 7, bias=True)
self.softmax = torch.nn.Softmax()
def forward(self, *args):
return self.softmax(self.conv(args[0]))
class Conv2D2(Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(3, 6, 7, bias=False)
self.softmax = torch.nn.Softmax()
def forward(self, *args):
return self.softmax(self.conv(args[0]))
class Conv2D3(Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(3, 6, 7, groups=3, bias=False)
self.softmax = torch.nn.Softmax()
def forward(self, *args):
return self.softmax(self.conv(args[0]))
class Conv1D1(Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv1d(3, 6, 7)
self.softmax = torch.nn.Softmax()
def forward(self, *args):
return self.softmax(self.conv(args[0]))
class Conv1D2(Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv1d(3, 6, 7, bias=False)
self.softmax = torch.nn.Softmax()
def forward(self, *args):
return self.softmax(self.conv(args[0]))
class Conv1D3(Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv1d(3, 6, 7, groups=3, bias=False)
self.softmax = torch.nn.Softmax()
def forward(self, *args):
return self.softmax(self.conv(args[0]))
conv2d_input_data = torch.rand(conv2d_input_shape).float()
verify_model(Conv2D1().float().eval(), input_data=conv2d_input_data)
verify_model(Conv2D2().float().eval(), input_data=conv2d_input_data)
# depth wise conv with channel mult 2
verify_model(Conv2D3().float().eval(), input_data=conv2d_input_data)
# group conv
verify_model(
torch.nn.Conv2d(8, 8, kernel_size=(3, 3), stride=(1, 1), groups=2).eval(),
input_data=torch.randn((1, 8, 16, 16)),
)
conv1d_input_data = torch.rand(conv1d_input_shape).float()
verify_model(Conv1D1().float().eval(), input_data=conv1d_input_data)
verify_model(Conv1D2().float().eval(), input_data=conv1d_input_data)
verify_model(Conv1D3().float().eval(), input_data=conv1d_input_data)
@tvm.testing.uses_gpu
@pytest.mark.parametrize("in_channels", [3], ids=lambda x: "in_channels=" + str(x))
@pytest.mark.parametrize("out_channels", [5], ids=lambda x: "out_channels=" + str(x))
@pytest.mark.parametrize("kernel_size", [3], ids=lambda x: "kernel_size=" + str(x))
@pytest.mark.parametrize("output_padding", [0, 1, 2], ids=lambda x: "output_padding=" + str(x))
@pytest.mark.parametrize("groups", [1], ids=lambda x: "groups=" + str(x))
@pytest.mark.parametrize("bias", [True, False], ids=lambda x: "bias=" + str(x))
def test_forward_conv_transpose(
in_channels, out_channels, kernel_size, output_padding, bias, groups
):
"""test_forward_conv_transpose"""
# Note we do not test with groups > 1 because that is not supported
# in tvm for conv transpose operations
# Output padding must be smaller than either stride or dilation so we
# opt to make the stride 1 + output padding
stride = output_padding + 1
# Conv 3D Transpose Tests
conv3d_input_shape = [1, in_channels, 16, 16, 16]
conv3d_input_data = torch.rand(conv3d_input_shape).float()
conv3d_transpose = torch.nn.ConvTranspose3d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
output_padding=output_padding,
groups=groups,
bias=bias,
).eval()
verify_model(conv3d_transpose, conv3d_input_data)
# Conv 2D Transpose Tests
conv2d_input_shape = [1, in_channels, 128, 256]
conv2d_input_data = torch.rand(conv2d_input_shape).float()
conv2d_transpose = torch.nn.ConvTranspose2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
output_padding=output_padding,
groups=groups,
bias=bias,
).eval()
verify_model(conv2d_transpose, conv2d_input_data)
# # Conv 1D Transpose Tests
conv1d_input_shape = [1, in_channels, 10]
conv1d_input_data = torch.rand(conv1d_input_shape).float()
conv1d_transpose = torch.nn.ConvTranspose1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
output_padding=output_padding,
groups=groups,
bias=bias,
).eval()
verify_model(conv1d_transpose, conv1d_input_data)
@tvm.testing.uses_gpu
def test_forward_conv2d_transpose_group():
"""test_forward_conv2d_transpose_group"""
# https://github.com/apache/tvm/issues/10223
class ModulatedConvTranspose2D(torch.nn.Module):
"""ModulatedConvTranspose2D module"""
def forward(self, x, w, s):
"""forward"""
B, C, H, W = x.shape
I, O, KH, KW = w.shape
# weight is different for each input in batch (this is why we want grouped conv
# transpose)
w = w.unsqueeze(0) * s.reshape(B, 1, 1, 1, 1)
w = w.reshape(B * I, O, KH, KW)
x = x.reshape(1, B * C, H, W)
x = torch.nn.functional.conv_transpose2d(
x, w, stride=(2, 2), padding=(1, 1), output_padding=(1, 1), groups=B
)
return x.reshape(B, O, H * 2, W * 2)
b, c, h, w, k = 4, 512, 8, 16, 3
inputs = torch.rand(b, c, h, w)
weights = torch.rand(c, c // 2, k, k)
styles = torch.rand(b)
# cuda not supported for group > 1 conv2d_transpose
targets = ["llvm"]
if cudnn.exists():
targets.append("cuda -libs=cudnn")
verify_trace_model(ModulatedConvTranspose2D().eval(), [inputs, weights, styles], targets)
def test_forward_deform_conv():
"""test_forward_deform_conv"""
torch.set_grad_enabled(False)
def test_run(
batch_size,
in_channels,
out_channels,
in_height,
in_width,
out_height,
out_width,
offset_groups,
kh,
kw,
groups,
):
input_shape = [batch_size, in_channels, in_height, in_width]
offset_shape = [batch_size, 2 * offset_groups * kh * kw, out_height, out_width]
weight_shape = [out_channels, in_channels // groups, kh, kw]
input_data = torch.rand(input_shape)
offset_data = torch.rand(offset_shape)
weight_data = torch.rand(weight_shape)
class DeformConv2D(Module):
def forward(self, *args):
return torchvision.ops.deform_conv2d(args[0], args[1], args[2])
verify_model(
DeformConv2D().float().eval(),
input_data=[input_data, offset_data, weight_data],
rtol=1e-4,
atol=1e-4,
)
batch_size = 4
in_channels, out_channels = 4, 6
in_height, in_width = 10, 10
out_height, out_width = 8, 8
offset_groups = 2
kh, kw = 3, 3
groups = 1
test_run(
batch_size,
in_channels,
out_channels,
in_height,
in_width,
out_height,
out_width,
offset_groups,
kh,
kw,
groups,
)
batch_size = 5
in_channels, out_channels = 4, 6
in_height, in_width = 10, 10
out_height, out_width = 8, 8
offset_groups = 1
kh, kw = 3, 3
groups = 1
test_run(
batch_size,
in_channels,
out_channels,
in_height,
in_width,
out_height,
out_width,
offset_groups,
kh,
kw,
groups,
)
@tvm.testing.uses_gpu
def test_forward_threshold():
"""test_forward_threshold"""
torch.set_grad_enabled(False)
input_shape = [1, 3]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.Threshold(0, 0).float().eval(), input_data=input_data)
input_data = torch.tensor([[-1.0, 2.0]], dtype=torch.float32)
verify_model(torch.nn.Threshold(1, 1).float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_contiguous():
"""test_forward_contiguous"""
torch.set_grad_enabled(False)
input_shape = [10]
class Contiguous1(Module):
def forward(self, *args):
return args[0].contiguous()
input_data = torch.rand(input_shape).float()
verify_model(Contiguous1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_batchnorm():
"""test_forward_batchnorm"""
def init_weight(m):
torch.nn.init.normal_(m.weight, 0, 0.01)
torch.nn.init.normal_(m.bias)
inp_2d = torch.rand((1, 16, 10, 10))
inp_3d = torch.rand((1, 16, 10, 10, 10))
class BatchNorm(Module):
def __init__(self, weight, bias):
super().__init__()
self.weight = weight
self.bias = bias
def forward(self, *args):
return torch.nn.functional.batch_norm(
args[0],
running_mean=torch.zeros(args[0].shape[1]),
running_var=torch.ones(args[0].shape[1]),
weight=self.weight,
bias=self.bias,
)
for bn, inp in [(torch.nn.BatchNorm2d(16), inp_2d), (torch.nn.BatchNorm3d(16), inp_3d)]:
init_weight(bn.eval())
verify_model(bn.eval(), input_data=inp)
verify_model(BatchNorm(bn.weight, None).eval(), input_data=inp)
verify_model(BatchNorm(bn.weight, bn.bias).eval(), input_data=inp)
@tvm.testing.uses_gpu
def test_forward_instancenorm():
"""test_forward_instancenorm"""
inp_2d = torch.rand((1, 16, 10, 10))
inp_3d = torch.rand((1, 16, 10, 10, 10))
for ins_norm, inp in [
(torch.nn.InstanceNorm2d(16), inp_2d),
(torch.nn.InstanceNorm3d(16), inp_3d),
(torch.nn.InstanceNorm2d(16, track_running_stats=True), inp_2d),
(torch.nn.InstanceNorm3d(16, track_running_stats=True), inp_3d),
]:
verify_model(ins_norm.eval(), input_data=inp)
@tvm.testing.uses_gpu
def test_forward_layernorm():
"""test_forward_layernorm"""
def init_weight(m):
torch.nn.init.normal_(m.weight, 0, 0.01)
torch.nn.init.normal_(m.bias, 0.02)
inp_2d = torch.rand((1, 16, 10, 10))
inp_3d = torch.rand((1, 16, 10, 10, 10))
for ln, inp in [(torch.nn.LayerNorm(10), inp_2d), (torch.nn.LayerNorm(10), inp_3d)]:
init_weight(ln.eval())
verify_model(ln.eval(), input_data=inp)
@tvm.testing.uses_gpu
def test_forward_groupnorm():
"""test_forward_groupnorm"""
input_shape = [10, 6, 5, 5]
input_data = torch.rand(input_shape).float()
# Separate 6 channels into 3 groups
verify_model(torch.nn.GroupNorm(3, 6).eval(), input_data=input_data)
# Put all 6 channels into a single group (equivalent with LayerNorm)
verify_model(torch.nn.GroupNorm(1, 6).eval(), input_data=input_data)
# Separate 6 channels into 6 groups (equivalent with InstanceNorm)
verify_model(torch.nn.GroupNorm(6, 6).eval(), input_data=input_data)
input_shape = [1, 10, 4, 7]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.GroupNorm(1, 10).eval(), input_data=input_data)
verify_model(torch.nn.GroupNorm(2, 10).eval(), input_data=input_data)
verify_model(torch.nn.GroupNorm(5, 10).eval(), input_data=input_data)
verify_model(torch.nn.GroupNorm(10, 10).eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_reshape():
"""test_forward_reshape"""
torch.set_grad_enabled(False)
input_shape = [2, 1, 10, 1, 10]
new_shape = [2, 1, 10, 10]
class Reshape1(Module):
def forward(self, *args):
return args[0].reshape(new_shape)
class Reshape2(Module):
def forward(self, *args):
return args[0].reshape([-1])
class Reshape3(torch.nn.Module):
def forward(self, x):
x_shape = x.shape
return x.reshape((x_shape[0] * x_shape[1], x_shape[2]))
input_data = torch.rand(input_shape).float()
verify_model(Reshape1(), input_data=input_data)
verify_model(Reshape2(), input_data=input_data)
verify_model(Reshape3(), input_data=torch.randn(2, 3, 4))
@tvm.testing.uses_gpu
def test_forward_reshape_as():
"""test_forward_reshape_as"""
def test_func(input_tensor, other_tensor):
return input_tensor.reshape_as(other_tensor)
input_data = [torch.rand([2, 1, 10, 1, 10]), torch.rand([2, 1, 10, 10])]
verify_model_with_input(test_func, input_data, input_dict={"input0": input_data[0]})
@tvm.testing.uses_gpu
def test_flatten():
"""test_flatten"""
def _test_flatten(start_dim, end_dim):
return lambda inp: torch.flatten(inp, start_dim, end_dim)
inp = torch.rand((3, 5, 2, 2))
# [3, 5, 2, 2] -> [60]
verify_model(_test_flatten(0, -1), inp)
verify_model(_test_flatten(0, 3), inp)
verify_model(_test_flatten(-4, 3), inp)
verify_model(_test_flatten(-4, -1), inp)
# [3, 5, 2, 2] -> [3, 5, 2, 2]
verify_model(_test_flatten(3, -1), inp)
verify_model(_test_flatten(-1, -1), inp)
verify_model(_test_flatten(0, -4), inp)
verify_model(_test_flatten(-4, -4), inp)
# [3, 5, 2, 2] -> [3, 10, 2]
verify_model(_test_flatten(1, 2), inp)
verify_model(_test_flatten(1, -2), inp)
verify_model(_test_flatten(-3, 2), inp)
verify_model(_test_flatten(-3, -2), inp)
@tvm.testing.uses_gpu
def test_forward_transpose():
"""test_forward_transpose"""
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class Transpose1(Module):
def forward(self, *args):
return args[0].transpose(2, 3)
class Transpose2(Module):
def forward(self, *args):
return args[0].transpose(-2, -1)
class Transpose3(Module):
def forward(self, *args):
return args[0].permute(0, 2, 3, 1)
input_data = torch.rand(input_shape).float()
verify_model(Transpose1().float().eval(), input_data=input_data)
verify_model(Transpose2().float().eval(), input_data=input_data)
verify_model(Transpose3().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_numpy_T():
"""test_forward_numpy_T"""
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
def test_fn(x):
return x.T
input_data = torch.rand(input_shape).float()
verify_model(test_fn, input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_size():
"""test_forward_size"""
torch.set_grad_enabled(False)
input_shape = [1, 3]
class Size1(Module):
def forward(self, *args):
return float(args[0].size(0)) * args[0]
input_data = torch.rand(input_shape).float()
verify_model(Size1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_type_as():
"""test_type_as"""
torch.set_grad_enabled(False)
input_shape = [1, 3]
def _create_module(dtype):
class TypeAs(Module):
def forward(self, *args):
expected_type_tensor = torch.zeros(1, 3, dtype=dtype)
return args[0].type_as(expected_type_tensor)
return TypeAs()
input_data = torch.randn(input_shape).float()
verify_model(_create_module(torch.float64), input_data=input_data)
verify_model(_create_module(torch.float32), input_data=input_data)
verify_model(_create_module(torch.int64), input_data=input_data)
verify_model(_create_module(torch.int32), input_data=input_data)
verify_model(_create_module(torch.int16), input_data=input_data)
verify_model(_create_module(torch.int8), input_data=input_data)
if torch.cuda.is_available():
check_fp16 = False
try:
# Only check half precision on supported hardwares.
if have_fp16(tvm.cuda(0).compute_version):
check_fp16 = True
# pylint: disable=broad-except
except Exception:
# If GPU is not enabled in TVM, skip the fp16 test.
pass
# Temporary disable fp16 test
check_fp16 = False
if check_fp16:
verify_model(_create_module(torch.float16), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_view():
"""test_forward_view"""
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class View1(Module):
def forward(self, *args):
return args[0].view((1, 3 * 10 * 10))
class View2(Module):
def forward(self, *args):
return args[0].view(args[0].shape[0], -1)
class View3(Module):
def forward(self, *args):
d1 = torch.tensor(3) * torch.tensor(10) * torch.tensor(10)
return args[0].view(args[0].shape[0], d1)
input_data = torch.rand(input_shape).float()
verify_model(View1().float().eval(), input_data=input_data)
verify_model(View2().float().eval(), input_data=input_data)
verify_model(View3().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_select():
"""test_forward_select"""
torch.set_grad_enabled(False)
input_shape = [5, 3, 10, 10]
class Select1(Module):
def forward(self, *args):
return args[0].select(1, 1)
class IndexedSelect(Module):
def __init__(self, inp, dim):
super().__init__()
self.inp = inp
self.dim = dim
if torch.cuda.is_available():
self.inp = self.inp.cuda()
def forward(self, index):
return torch.index_select(self.inp, self.dim, index)
input_data = torch.rand(input_shape).float()
verify_model(Select1().float().eval(), input_data=input_data)
# test negative indexing
verify_model(lambda x: x[-1], input_data=input_data)
x = torch.randn(3, 4)
indices = torch.tensor([0, 2])
verify_model(IndexedSelect(x, 0).eval(), input_data=indices)
verify_model(IndexedSelect(x, 1).eval(), input_data=indices)
@tvm.testing.uses_gpu
def test_forward_clone():
"""test_forward_clone"""
torch.set_grad_enabled(False)
input_shape = [10]
class Clone1(Module):
def forward(self, *args):
return args[0].clone()
input_data = torch.rand(input_shape).float()
verify_model(Clone1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_gather():
"""test_forward_gather"""
torch.set_grad_enabled(False)
class Gather1(Module):
def forward(self, *args):
return torch.gather(args[0], 0, args[1])
class Gather2(Module):
def forward(self, *args):
return torch.gather(args[0], 1, args[1])
class Gather3(Module):
def forward(self, *args):
return torch.gather(args[0], 2, args[1])
input_data = torch.rand((4,)).float()
index = torch.tensor([1])
verify_model(Gather1().float().eval(), input_data=[input_data, index])
input_data = torch.rand((2, 2)).float()
index = torch.tensor([[1, 0], [0, 1]])
verify_model(Gather1().float().eval(), input_data=[input_data, index])
input_data = torch.tensor([[1, 2], [3, 4]])
index = torch.tensor([[0, 0], [1, 0]])
verify_model(Gather2().float().eval(), input_data=[input_data, index])
input_data = torch.rand((2, 2)).float()
index = torch.tensor([[1, 0], [0, 1]])
verify_model(Gather2().float().eval(), input_data=[input_data, index])
input_data = torch.rand((3, 3, 3)).float()
index = torch.tensor(
[
[[1, 0, 0], [1, 0, 1], [0, 1, 1]],
[[1, 1, 1], [1, 2, 1], [1, 0, 1]],
[[1, 2, 1], [1, 2, 1], [1, 2, 1]],
]
)
verify_model(Gather3().float().eval(), input_data=[input_data, index])
@tvm.testing.uses_gpu
def test_forward_logsoftmax():
"""test_forward_logsoftmax"""
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class LogSoftmax1(Module):
def forward(self, *args):
return torch.nn.LogSoftmax(dim=1)(args[0][0, 0])
input_data = torch.rand(input_shape).float()
verify_model(LogSoftmax1().float().eval(), input_data=input_data)
@pytest.mark.skip(reason="unsupported op aten::linalg_vector_norm")
@tvm.testing.uses_gpu
def test_forward_norm():
"""test_forward_norm"""
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class Norm1(Module):
def forward(self, *args):
return torch.norm(args[0], p=float("inf"), dim=None, keepdim=False)
class Norm2(Module):
def forward(self, *args):
return torch.norm(args[0], p=float("-inf"), dim=None, keepdim=False)
class Norm3(Module):
def forward(self, *args):
return torch.norm(args[0], p=float("-inf"), dim=None, keepdim=True)
class Norm4(Module):
def forward(self, *args):
return torch.norm(args[0], p=float("inf"), dim=(1, 2), keepdim=False)
class Norm5(Module):
def forward(self, *args):
return torch.norm(args[0], p=float("inf"), dim=(1), keepdim=True)
class Norm6(Module):
def forward(self, *args):
return torch.norm(args[0], p=float(0.5), dim=(1), keepdim=True)
class Norm7(Module):
def forward(self, *args):
return torch.norm(args[0], p=float(1), dim=None, keepdim=False)
class Norm8(Module):
def forward(self, *args):
return torch.norm(args[0], p=float(2.0), dim=(1), keepdim=True)
class Norm9(Module):
def forward(self, *args):
return torch.norm(args[0], p=float(-0.5), dim=(1, 2), keepdim=True)
class Norm10(Module):
def forward(self, *args):
return torch.norm(args[0], p=float(-2), dim=(1), keepdim=False)
input_data = torch.rand(input_shape).float()
verify_model(Norm1().float().eval(), input_data=input_data)
verify_model(Norm2().float().eval(), input_data=input_data)
verify_model(Norm3().float().eval(), input_data=input_data)
verify_model(Norm4().float().eval(), input_data=input_data)
verify_model(Norm5().float().eval(), input_data=input_data)
verify_model(Norm6().float().eval(), input_data=input_data)
verify_model(Norm7().float().eval(), input_data=input_data)
verify_model(Norm8().float().eval(), input_data=input_data)
verify_model(Norm9().float().eval(), input_data=input_data)
verify_model(Norm10().float().eval(), input_data=input_data)
@pytest.mark.skip(reason="unsupported op aten::linalg_vector_norm")
@tvm.testing.uses_gpu
def test_forward_frobenius_norm():
"""test_forward_frobenius_norm"""
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class FroNorm1(Module):
def forward(self, *args):
return torch.norm(args[0])
class FroNorm2(Module):
def forward(self, *args):
return torch.norm(args[0], p="fro", dim=None, keepdim=True)
class FroNorm3(Module):
def forward(self, *args):
return torch.norm(args[0], p="fro", dim=(1), keepdim=True)
class FroNorm4(Module):
def forward(self, *args):
return torch.norm(args[0], dim=None, keepdim=False)
input_data = torch.rand(input_shape).float()
verify_model(FroNorm1().float().eval(), input_data=input_data)
verify_model(FroNorm2().float().eval(), input_data=input_data)
verify_model(FroNorm3().float().eval(), input_data=input_data)
verify_model(FroNorm4().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_sigmoid():
"""test_forward_sigmoid"""
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.Sigmoid().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_dense():
"""test_forward_dense"""
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class Dense1(Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(10, 7, bias=True)
def forward(self, *args):
return self.linear(args[0][0, 0])
class Dense2(Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(10, 7, bias=False)
def forward(self, *args):
return self.linear(args[0][0, 0])
input_data = torch.rand(input_shape).float()
verify_model(Dense1().float().eval(), input_data=input_data)
verify_model(Dense2().float().eval(), input_data=input_data)
trace = torch.jit.trace(Dense1(), [input_data])
mod, _ = relay.frontend.from_pytorch(
trace,
[("input", input_shape)],
)
assert not any(list(op.name == "multiply" for op in list_ops(mod["main"])))
@tvm.testing.uses_gpu
def test_forward_linear():
"""test_forward_linear"""
torch.set_grad_enabled(False)
class Linear(Module):
def forward(self, inputs, weight, bias):
return F.linear(inputs, weight, bias)
class LinearNoBias(Module):
def forward(self, inputs, weight):
return F.linear(inputs, weight)
class LinearNested(Module):
def forward(self, x, y, z):
return F.linear(x, F.linear(y, z))
input1d = torch.rand([2]).float()
input2d = torch.rand([2, 2]).float()
input3d = torch.rand([4, 3, 2]).float()
weight1d = torch.rand([2]).float()
weight2d = torch.rand([2, 2]).float()
weight3x2 = torch.rand([3, 2]).float()
bias0d = torch.rand([]).float()
bias1d = torch.rand([2]).float()
bias2d = torch.rand([2, 2]).float()
# 2D input, 2D weight, 1D bias
verify_model(Linear(), input_data=[input2d, weight2d, bias1d])
# 2D input, 2D weight, 2D bias
verify_model(Linear(), input_data=[input2d, weight2d, bias2d])
# 2D input, 2D weight, no bias
verify_model(LinearNoBias(), input_data=[input2d, weight2d])
verify_model(LinearNoBias(), input_data=[input2d, weight3x2])
# 2D input, 1D weight, 1D bias is not supported by torch.linear()
# 2D input, 1D weight, no bias
verify_model(LinearNoBias(), input_data=[input2d, weight1d])
# 3D input, 2D weight, no bias
verify_model(LinearNoBias(), input_data=[input3d, weight3x2])
# 3D input, 2D weight, 1D bias
verify_model(Linear(), input_data=[input3d, weight2d, bias1d])
verify_model(LinearNested(), input_data=[torch.randn(10, 10) for _ in range(3)])
# 1D input, 2D weight, 1D bias
verify_model(Linear(), input_data=[input1d, weight2d, bias1d])
# 1D input, 2D weight, no bias
verify_model(LinearNoBias(), input_data=[input1d, weight2d])
# 1D input, 1D weight, scalar bias
verify_model(Linear(), input_data=[input1d, weight1d, bias0d])
# 1D input, 1D weight, no bias
verify_model(LinearNoBias(), input_data=[input1d, weight1d])
@tvm.testing.uses_gpu
def test_forward_dropout():
"""test_forward_dropout"""
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(torch.nn.Dropout(p=0.5).eval(), input_data=input_data[0, 0])
verify_model(torch.nn.Dropout2d(p=0.5).eval(), input_data=input_data[0])
verify_model(torch.nn.Dropout3d(p=0.5).eval(), input_data=input_data)
verify_model(torch.nn.AlphaDropout(p=0.5).eval(), input_data=input_data[0, 0])
@tvm.testing.uses_gpu
def test_forward_slice():
"""test_forward_slice"""
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class Slice1(Module):
def forward(self, *args):
return args[0][:, :, :, :3]
class Slice2(Module):
def forward(self, *args):
return args[0][0, :, :-3, :]
class Slice3(Module):
def forward(self, *args):
x0 = torch.tensor(2) - torch.tensor(1)
x1 = torch.tensor(3) + torch.tensor(1)
return args[0][:, x0:, 1:x1, :]
class SliceWithStride(torch.nn.Module):
def forward(self, x):
return x[..., 0::2] + x[..., 1::2]
class SliceWithStride2(torch.nn.Module):
def forward(self, x):
return x[0::2, 0::2] + x[1::2, 1::2]
class DynamicLengthSlice(torch.nn.Module):
def forward(self, values, length):
return values[0:length]
input_data = torch.rand(input_shape).float()
verify_model(Slice1(), input_data=input_data)
verify_model(Slice2(), input_data=input_data)
verify_model(Slice3(), input_data=input_data)
verify_model(SliceWithStride(), input_data=torch.randn(1, 4))
verify_model(SliceWithStride2(), input_data=torch.randn(4, 4))
inp = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
slice_len = torch.tensor(2)
targets = ["llvm", "cuda"]
verify_trace_model(DynamicLengthSlice(), [inp, slice_len], targets)
@tvm.testing.uses_gpu
def test_forward_narrow():
"""test_forward_narrow"""
torch.set_grad_enabled(False)
input_shape = [3, 3]
class Narrow1(Module):
def forward(self, *args):
return torch.narrow(args[0], 0, 0, 2)
class Narrow2(Module):
def forward(self, *args):
return torch.narrow(args[0], 1, 1, 2)
class Narrow3(Module):
def forward(self, *args):
begin = torch.tensor(2) - torch.tensor(1)
length = torch.tensor(1) * torch.tensor(2)
return torch.narrow(args[0], 1, begin, length)
input_data = torch.rand(input_shape).float()
verify_model(Narrow1(), input_data=input_data)
verify_model(Narrow2(), input_data=input_data)
verify_model(Narrow3(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_mean():
"""test_forward_mean"""
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class Mean1(Module):
def forward(self, *args):
return args[0].mean(2)
input_data = torch.rand(input_shape).float()
verify_model(Mean1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_expand():
"""test_forward_expand"""
torch.set_grad_enabled(False)
class Expand1(Module):
def forward(self, *args):
return args[0].expand((3, -1, -1, -1))
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(Expand1().float().eval(), input_data=input_data)
class Expand2(Module):
def forward(self, *args):
return args[0].expand((3, 3, 3, 1))
input_shape = [3, 1]
input_data = torch.rand(input_shape).float()
verify_model(Expand2().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_broadcast_tensors():
"""test_forward_broadcast_tensors"""
torch.set_grad_enabled(False)
class BroadCastTensors1(Module):
def forward(self, x, y):
return torch.broadcast_tensors(x, y)
x = torch.arange(3).view(1, 1, 3)
y = torch.arange(2).view(1, 2, 1)
verify_model(BroadCastTensors1().float().eval(), input_data=[x, y])
class BroadCastTensors2(Module):
def forward(self, x, y, z):
return torch.broadcast_tensors(x, y, z)
x = torch.arange(3).view(1, 1, 3)
y = torch.arange(2).view(1, 2, 1)
z = torch.arange(4).view(4, 1, 1)
verify_model(BroadCastTensors2().float().eval(), input_data=[x, y, z])
@tvm.testing.uses_gpu
def test_forward_pow():
"""test_forward_pow"""
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class Pow1(Module):
def forward(self, *args):
return args[0] ** 2
input_data = torch.rand(input_shape).float()
verify_model(Pow1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_chunk():
"""test_forward_chunk"""
torch.set_grad_enabled(False)
input_shape = [1, 3, 14, 14]
class Chunk1(Module):
def forward(self, *args):
chunks = args[0].chunk(7, 2)
return torch.cat(chunks, 2)
input_data = torch.rand(input_shape).float()
verify_model(Chunk1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_upsample():
"""test_upsample"""
class Upsample(Module):
def __init__(self, size=None, scale=None, mode="nearest", align_corners=None):
super().__init__()
self.size = size
self.scale = scale
self.mode = mode
self.align_corners = align_corners
def forward(self, x):
return torch.nn.functional.interpolate(
x,
size=self.size,
scale_factor=self.scale,
mode=self.mode,
align_corners=self.align_corners,
)
inp = torch.rand((1, 3, 32, 32))
verify_model(Upsample(size=(64, 64), mode="nearest"), inp)
verify_model(Upsample(scale=2, mode="nearest"), inp)
verify_model(Upsample(size=(50, 50), mode="nearest"), inp)
verify_model(Upsample(size=(64, 64), mode="bilinear", align_corners=True), inp)
verify_model(Upsample(scale=2, mode="bilinear", align_corners=True), inp)
verify_model(Upsample(size=(50, 50), mode="bilinear", align_corners=True), inp)
verify_model(Upsample(size=(64, 64), mode="bicubic", align_corners=True), inp)
verify_model(Upsample(scale=2, mode="bicubic", align_corners=True), inp)
verify_model(Upsample(size=(50, 50), mode="bicubic", align_corners=True), inp)
@tvm.testing.uses_gpu
def test_to():
"""test for aten::to(...)"""
class ToCPU(Module):
def forward(self, x):
return x.to("cpu")
class ToFloat(Module):
def forward(self, x):
return x.float()
class ToInt(Module):
def forward(self, x):
return x.int()
class ToLong(Module):
def forward(self, x):
return x.long()
class ToDouble(Module):
def forward(self, x):
return x.double()
class ToFloat16(Module):
def forward(self, x):
return x.to(torch.float16)
verify_model(ToCPU().eval(), torch.rand((1, 3, 32, 32)))
verify_model(ToFloat().eval(), torch.zeros((1, 3, 32, 32), dtype=torch.int))
verify_model(ToFloat().eval(), torch.tensor(2, dtype=torch.int))
verify_model(ToInt().eval(), torch.zeros((1, 3, 32, 32)))
verify_model(ToInt().eval(), torch.tensor(0.8))
verify_model(ToLong().eval(), torch.tensor(0.8))
verify_model(ToDouble().eval(), torch.tensor(0.8))
verify_model(ToFloat16().eval(), torch.tensor(2, dtype=torch.float32))
verify_model(ToFloat16().eval(), torch.zeros((1, 3, 32, 32), dtype=torch.int))
@tvm.testing.uses_gpu
def test_adaptive_pool3d():
"""test_adaptive_pool3d"""
for ishape in [(1, 32, 16, 16, 16), (1, 32, 9, 15, 15), (1, 32, 13, 7, 7)]:
inp = torch.rand(ishape)
verify_model(torch.nn.AdaptiveMaxPool3d((1, 1, 1)).eval(), inp)
verify_model(torch.nn.AdaptiveMaxPool3d((2, 2, 2)).eval(), inp)
verify_model(torch.nn.AdaptiveAvgPool3d((1, 1, 1)).eval(), inp)
verify_model(torch.nn.AdaptiveAvgPool3d((2, 2, 2)).eval(), inp)
verify_model(torch.nn.AdaptiveAvgPool3d((4, 8, 8)).eval(), inp)
verify_model(torch.nn.AdaptiveMaxPool3d((7, 8, 9)).eval(), inp)
@tvm.testing.uses_gpu
def test_forward_functional_pad():
"""test_forward_functional_pad"""
torch.set_grad_enabled(False)
pad = (0, 0)
class Pad1(Module):
def forward(self, *args):
return torch.nn.functional.pad(args[0], pad, "constant", 0)
input_data = torch.rand((3, 3, 4, 2))
pad = (1, 1)
verify_model(Pad1().float().eval(), input_data=input_data)
pad = (1, 1, 2, 2)
verify_model(Pad1().float().eval(), input_data=input_data)
pad = (0, 1, 2, 1, 3, 3)
verify_model(Pad1().float().eval(), input_data=input_data)
class Pad2(Module):
def forward(self, *args):
return torch.nn.functional.pad(args[0], pad, "constant", 1)
input_data = torch.rand((3, 3, 4, 2))
pad = (1, 1)
verify_model(Pad2().float().eval(), input_data=input_data)
pad = (1, 1, 2, 2)
verify_model(Pad2().float().eval(), input_data=input_data)
pad = (0, 1, 2, 1, 3, 3)
verify_model(Pad2().float().eval(), input_data=input_data)
class Pad3(Module):
def forward(self, *args):
return torch.nn.functional.pad(args[0], pad, "constant", 1.0)
input_data = torch.rand((3, 3, 4, 2))
pad = (1, 1)
verify_model(Pad3().float().eval(), input_data=input_data)
pad = (1, 1, 2, 2)
verify_model(Pad3().float().eval(), input_data=input_data)
pad = (0, 1, 2, 1, 3, 3)
verify_model(Pad3().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_zero_pad2d():
"""test_forward_zero_pad2d"""
inp = torch.rand((1, 1, 3, 3))
verify_model(torch.nn.ZeroPad2d(2).eval(), inp)
verify_model(torch.nn.ZeroPad2d((1, 1, 2, 0)).eval(), inp)
@tvm.testing.uses_gpu
def test_forward_constant_pad1d():
"""test_forward_constant_pad1d"""
inp = torch.rand((1, 2, 4))
verify_model(torch.nn.ConstantPad1d(2, 3.5).eval(), inp)
inp = torch.rand((1, 2, 3))
verify_model(torch.nn.ConstantPad1d((3, 1), 3.5).eval(), inp)
@tvm.testing.uses_gpu
def test_forward_constant_pad2d():
"""test_forward_constant_pad2d"""
inp = torch.rand((1, 2, 2, 2))
verify_model(torch.nn.ConstantPad2d(2, 3.5).eval(), inp)
verify_model(torch.nn.ConstantPad2d((3, 0, 2, 1), 3.5).eval(), inp)
@tvm.testing.uses_gpu
def test_forward_constant_pad3d():
"""test_forward_constant_pad3d"""
inp = torch.rand((1, 3, 2, 2, 2))
verify_model(torch.nn.ConstantPad3d(3, 3.5).eval(), inp)
verify_model(torch.nn.ConstantPad3d((3, 4, 5, 6, 0, 1), 3.5).eval(), inp)
@tvm.testing.uses_gpu
def test_forward_reflection_pad1d():
"""test_forward_reflection_pad1d"""
inp = torch.rand((1, 2, 4))
verify_model(torch.nn.ReflectionPad1d(2).eval(), inp)
verify_model(torch.nn.ReflectionPad1d((3, 1)).eval(), inp)
inp = torch.rand((2, 4, 5))
verify_model(torch.nn.ReflectionPad1d((2, 3)).eval(), inp)
@tvm.testing.uses_gpu
def test_forward_reflection_pad2d():
"""test_forward_reflection_pad2d"""
inp = torch.rand((1, 1, 3, 3))
verify_model(torch.nn.ReflectionPad2d(2).eval(), inp)
verify_model(torch.nn.ReflectionPad2d((1, 1, 2, 0)).eval(), inp)
inp = torch.rand((2, 4, 5, 6))
verify_model(torch.nn.ReflectionPad2d((1, 3, 2, 4)).eval(), inp)
@tvm.testing.uses_gpu
def test_forward_replication_pad1d():
"""test_forward_replication_pad1d"""
inp = torch.rand((1, 2, 4))
verify_model(torch.nn.ReplicationPad1d(2).eval(), inp)
verify_model(torch.nn.ReplicationPad1d((3, 1)).eval(), inp)
inp = torch.rand((2, 4, 5))
verify_model(torch.nn.ReplicationPad1d((2, 3)).eval(), inp)
@tvm.testing.uses_gpu
def test_forward_replication_pad2d():
"""test_forward_replication_pad2d"""
inp = torch.rand((1, 1, 3, 3))
verify_model(torch.nn.ReplicationPad2d(2).eval(), inp)
verify_model(torch.nn.ReplicationPad2d((1, 1, 2, 0)).eval(), inp)
inp = torch.rand((2, 4, 5, 6))
verify_model(torch.nn.ReplicationPad2d((1, 3, 2, 4)).eval(), inp)
@tvm.testing.uses_gpu
def test_forward_replication_pad3d():
"""test_forward_replication_pad3d"""
inp = torch.rand((1, 1, 3, 3, 3))
verify_model(torch.nn.ReplicationPad3d(3).eval(), inp)
verify_model(torch.nn.ReplicationPad3d((1, 1, 2, 2, 1, 1)).eval(), inp)
inp = torch.rand((7, 5, 4, 5, 6))
verify_model(torch.nn.ReplicationPad3d((2, 3, 2, 5, 1, 4)).eval(), inp)
@tvm.testing.uses_gpu
def test_forward_upsample3d():
"""test_forward_upsample3d"""
inp = torch.arange(1, 9, dtype=torch.float32).view(1, 1, 2, 2, 2)
verify_model(torch.nn.Upsample(scale_factor=2, mode="nearest").eval(), inp)
verify_model(torch.nn.Upsample(scale_factor=2, mode="trilinear").eval(), inp)
verify_model(
torch.nn.Upsample(scale_factor=2, mode="trilinear", align_corners=True).eval(), inp
)
def test_forward_nms():
"""dynamic Non-Maximum Suppression"""
torch.set_grad_enabled(False)
class NonMaxSupression(Module):
def __init__(self, iou_thres):
super().__init__()
self.iou_threshold = iou_thres
def forward(self, *args):
return torchvision.ops.nms(args[0], args[1], self.iou_threshold)
# Generate random input data
def _gen_rand_inputs(num_boxes):
box_len = 4
boxes = torch.rand(num_boxes, box_len, dtype=torch.float) * 0.5
boxes[:, 2] += boxes[:, 0]
boxes[:, 3] += boxes[:, 1]
scores = np.linspace(0, 1, num=num_boxes).astype("float32")
np.random.shuffle(scores)
return boxes, torch.from_numpy(scores)
targets = ["llvm", "cuda"]
for num_boxes, iou_thres in [(10, 0.3), (100, 0.5), (500, 0.9)]:
in_boxes, in_scores = _gen_rand_inputs(num_boxes)
verify_trace_model(NonMaxSupression(iou_thres), [in_boxes, in_scores], targets)
def test_forward_roi_align():
"""ROI align"""
torch.set_grad_enabled(False)
class ROIAlign(Module):
def __init__(self, output_sizes, spatial_scale=1.0, sampling_ratio=-1):
super().__init__()
self.spatial_scale = spatial_scale
self.sampling_ratio = sampling_ratio
self.output_sizes = output_sizes
def forward(self, *args):
return torchvision.ops.roi_align(
args[0],
args[1],
self.output_sizes,
self.spatial_scale,
self.sampling_ratio,
)
in_data = torch.Tensor(np.random.uniform(size=(1, 8, 100, 100)))
in_boxes = torch.Tensor(np.random.uniform(0.0, 100.0, size=(35, 4)))
in_batch = torch.zeros((35, 1), dtype=torch.float)
in_boxes = torch.cat([in_batch, in_boxes], dim=1)
verify_model(ROIAlign(7), [in_data, in_boxes])
verify_model(ROIAlign((10, 10), 0.7, 5), [in_data, in_boxes])
verify_model(ROIAlign(15, 0.9, 3), [in_data, in_boxes])
@tvm.testing.uses_gpu
def test_conv3d():
"""test_conv3d"""
for ishape in [(1, 32, 16, 16, 16), (1, 32, 9, 15, 15), (1, 32, 13, 7, 7)]:
inp = torch.rand(ishape)
verify_model(torch.nn.Conv3d(32, 16, (3, 3, 3), padding=(1, 1, 1)).eval(), inp)
verify_model(torch.nn.Conv3d(32, 16, (5, 5, 5), padding=(2, 2, 2)).eval(), inp)
verify_model(torch.nn.Conv3d(32, 16, kernel_size=1).eval(), inp)
# downsample
verify_model(torch.nn.Conv3d(32, 16, kernel_size=1, stride=2).eval(), inp)
@tvm.testing.uses_gpu
def test_conv3d_transpose():
"""test_conv3d_transpose"""
for ishape in [(1, 8, 10, 5, 10), (1, 8, 5, 8, 8), (1, 8, 13, 7, 7)]:
inp = torch.rand(ishape)
verify_model(
torch.nn.ConvTranspose3d(
in_channels=8, out_channels=33, kernel_size=3, stride=2
).eval(),
inp,
)
verify_model(
torch.nn.ConvTranspose3d(
in_channels=8,
out_channels=20,
kernel_size=(3, 5, 2),
stride=(2, 1, 1),
padding=(0, 4, 2),
).eval(),
inp,
)
verify_model(
torch.nn.ConvTranspose3d(in_channels=8, out_channels=20, kernel_size=1).eval(), inp
)
verify_model(
torch.nn.ConvTranspose3d(in_channels=8, out_channels=5, kernel_size=1, stride=2).eval(),
inp,
)
# Model tests
@tvm.testing.uses_gpu
def test_resnet18():
"""test_resnet18"""
torch.set_grad_enabled(False)
verify_model("resnet18", atol=1e-4, rtol=1e-4)
@tvm.testing.uses_gpu
def test_squeezenet1_0():
"""test_squeezenet1_0"""
torch.set_grad_enabled(False)
verify_model("squeezenet1_0", atol=1e-4, rtol=1e-4)
@tvm.testing.uses_gpu
def test_squeezenet1_1():
"""test_squeezenet1_1"""
torch.set_grad_enabled(False)
verify_model("squeezenet1_1", atol=1e-4, rtol=1e-4)
@tvm.testing.uses_gpu
def test_densenet121():
"""test_densenet121"""
torch.set_grad_enabled(False)
verify_model("densenet121", atol=1e-4, rtol=1e-4)
@tvm.testing.uses_gpu
def test_inception_v3():
"""test_inception_v3"""
torch.set_grad_enabled(False)
verify_model("inception_v3", atol=1e-4, rtol=1e-4)
@tvm.testing.uses_gpu
def test_googlenet():
"""test_googlenet"""
torch.set_grad_enabled(False)
verify_model("googlenet", atol=1e-4, rtol=1e-4)
@tvm.testing.uses_gpu
def test_mnasnet0_5():
"""test_mnasnet0_5"""
torch.set_grad_enabled(False)
verify_model("mnasnet0_5", atol=1e-4, rtol=1e-4)
@tvm.testing.uses_gpu
def test_mobilenet_v2():
"""test_mobilenet_v2"""
torch.set_grad_enabled(False)
verify_model("mobilenet_v2", atol=1e-4, rtol=1e-4)
# pylint: disable=pointless-string-statement
"""
#TODO: Fix VGG and AlexNet issues (probably due to pooling)
@tvm.testing.uses_gpu
def test_alexnet():
torch.set_grad_enabled(False)
verify_model("alexnet")
@tvm.testing.uses_gpu
def test_vgg11():
torch.set_grad_enabled(False)
verify_model("vgg11")
@tvm.testing.uses_gpu
def test_vgg11_bn():
torch.set_grad_enabled(False)
verify_model("vgg11_bn")
"""
@tvm.testing.uses_gpu
def test_custom_conversion_map():
"""test_custom_conversion_map"""
def get_roi_align():
pool_size = 5
n_channels = 2 * (pool_size**2)
x = torch.rand(2, n_channels, 10, 10)
rois = torch.tensor(
[
[0, 0, 0, 9, 9], # format is (xyxy)
[0, 0, 5, 4, 9],
[0, 5, 5, 9, 9],
[1, 0, 0, 9, 9],
],
dtype=torch.float,
)
roi_align = torchvision.ops.RoIAlign(pool_size, spatial_scale=1, sampling_ratio=-1)
return roi_align.eval(), [x, rois]
def convert_roi_align():
def _impl(inputs, input_types):
spatial_scale = inputs[2]
pooled_size = (inputs[3], inputs[4])
sampling_ratio = inputs[5]
return relay.op.vision.roi_align(
inputs[0], inputs[1], pooled_size, spatial_scale, sampling_ratio
)
return _impl
custom_map = {"torchvision::roi_align": convert_roi_align()}
model, inputs = get_roi_align()
verify_model(model, inputs, custom_map)
@tvm.testing.uses_gpu
def test_segmentation_models():
"""test_segmentation_models"""
class SegmentationModelWrapper(Module):
def __init__(self, model):
super().__init__()
self.model = model
def forward(self, inp):
out = self.model(inp)
return out["out"]
fcn = torchvision.models.segmentation.fcn_resnet101(pretrained=True)
deeplab = torchvision.models.segmentation.deeplabv3_resnet101(pretrained=True)
inp = [torch.rand((1, 3, 300, 300), dtype=torch.float)]
verify_model(SegmentationModelWrapper(fcn.eval()), inp, atol=1e-4, rtol=1e-4)
verify_model(SegmentationModelWrapper(deeplab.eval()), inp, atol=1e-4, rtol=1e-4)
@tvm.testing.uses_gpu
def test_3d_models():
"""test_3d_models"""
input_shape = (1, 3, 4, 56, 56)
resnet3d = torchvision.models.video.r3d_18(pretrained=True).eval()
verify_model(resnet3d, [torch.rand(input_shape)], atol=1e-4, rtol=1e-4)
def _get_default_vm_targets():
"""Get default vm targets"""
return ["llvm", "cuda"]
def verify_script_model(pt_model, ishapes, targets, idtype=None):
"""verify_script_model"""
script_module = torch.jit.script(pt_model)
verify_model_vm(script_module, ishapes, idtype=idtype, targets=targets)
def verify_trace_model(pt_model, idata, targets):
"""verify_trace_model"""
traced_model = torch.jit.trace(pt_model, idata)
ishapes = [data.shape for data in idata]
verify_model_vm(traced_model, ishapes, idata=idata, targets=targets)
def convert_pt_to_tvm_type(idtype):
"""Accepts a pytorch dtype and returns string TVM dtype."""
# TVM does not support PyTorch complex dtypes
if idtype == torch.float64:
curr_dtype = "float64"
elif idtype == torch.float32:
curr_dtype = "float32"
elif idtype == torch.float16:
curr_dtype = "float16"
elif idtype == torch.bfloat16:
curr_dtype = "bfloat16"
elif idtype == torch.int64:
curr_dtype = "int64"
elif idtype == torch.int32:
curr_dtype = "int32"
elif idtype == torch.int16:
curr_dtype = "int16"
elif idtype == torch.int8:
curr_dtype = "int8"
elif idtype == torch.uint8:
curr_dtype = "uint8"
elif idtype == torch.bool:
curr_dtype = "bool"
else:
raise NotImplementedError(f"Unsupported dtype: {idtype}")
return curr_dtype
def verify_model_vm(input_model, ishapes, idtype=None, idata=None, targets=None):
"""verify_model_vm"""
targets = targets or ["llvm"]
if not idtype:
idtype = torch.float
input_names = [f"i{idx}" for idx, _ in enumerate(ishapes)]
tvm_dtype = convert_pt_to_tvm_type(idtype)
input_dtypes = [tvm_dtype] * len(input_names)
input_shapes = list(zip(input_names, list(zip(ishapes, input_dtypes))))
if idata:
input_data = idata
# If no input_data provided, generate random data of specified dtype
else:
if idtype == torch.bool:
input_data = [
torch.Tensor.bool(torch.randint(low=0, high=2, size=shape)) for shape in ishapes
]
# Torch dtype can be float, complex, int, or Bool. Complex not supported,
# so if not float or Bool, dtype must be int!
elif not idtype.is_floating_point:
input_data = [
torch.randint(low=0, high=10, size=shape, dtype=idtype) for shape in ishapes
]
else:
input_data = [torch.randn(shape, dtype=idtype) for shape in ishapes]
# Compile via VM
with tvm.testing.disable_span_filling():
mod, params = relay.frontend.from_pytorch(input_model, input_shapes)
with tvm.testing.enable_span_filling():
mod_with_span, _ = relay.frontend.from_pytorch(input_model, input_shapes)
assert tvm.ir.structural_equal(mod, mod_with_span, map_free_vars=True)
for tgt in targets:
if not tvm.testing.device_enabled(tgt):
continue
print("Running on target", tgt)
dev = tvm.device(tgt, 0)
evaluator = relay.create_executor("vm", mod=mod, device=dev, target=tgt).evaluate()
# Inference
for name, inp in zip(input_names, input_data):
params[name] = inp.numpy()
vm_res = evaluator(**params)
# Baseline result
with torch.no_grad():
pt_result = input_model(*input_data)
# Verify the accuracy
if isinstance(pt_result, tuple):
# handle multiple outputs
for i, pt_result in enumerate(pt_result):
tvm_res = vm_res[i].numpy()
tvm.testing.assert_allclose(tvm_res, pt_result.numpy(), rtol=1e-5, atol=1e-5)
elif not isinstance(pt_result, torch.Tensor):
tvm_res = vm_res.numpy().item()
assert pt_result == tvm_res
else:
tvm.testing.assert_allclose(vm_res.numpy(), pt_result.numpy(), rtol=1e-5, atol=1e-5)
@tvm.testing.uses_gpu
def test_control_flow():
"""test_control_flow"""
class SimpleIf(torch.nn.Module):
"""SimpleIf module"""
def __init__(self, N, M):
super().__init__()
self.weight = torch.nn.Parameter(torch.rand(N, M))
def forward(self, inp):
if inp.sum() > 0.0:
output = self.weight + inp
else:
output = self.weight - inp
return output
class NestedIf(torch.nn.Module):
"""NestedIf module"""
def __init__(self, N, M):
super().__init__()
self.weight = torch.nn.Parameter(torch.rand(N, M))
def forward(self, inp):
"""forward"""
if inp.sum() > 0.0:
if inp.mean() > 0.0:
output = self.weight + inp
else:
output = self.weight - inp
else:
if inp.mean() >= 0.0:
output = self.weight * inp
else:
output = self.weight / inp
return output
class ScalarLoop(torch.nn.Module):
"""ScalarLoop module"""
def forward(self, inp):
"""forward"""
a = 0
for i in range(inp.size(0)):
b = i * i
b = b + 1
a += b
if a != 0:
a += 1
else:
a += 2
return a
class SimpleLoop(torch.nn.Module):
def forward(self, inp):
a = inp
for _ in range(inp.size(0)):
b = a * 2.0
c = a + b
a += c
return a
class LoopWithIf(torch.nn.Module):
"""LoopWithIf module"""
def forward(self, inp):
a = inp
for _ in range(inp.size(0)):
b = a * 2.0
b = a + b
if b.sum() > 0.0:
a += b
else:
a -= b
return a
class NestedLoop(torch.nn.Module):
def forward(self, inp):
a = inp
for i in range(inp.size(0)):
b = a * float(i)
for j in range(inp.size(1)):
a += b * float(j)
return a
class SimpleScalarWhileLoop(torch.nn.Module):
"""SimpleScalarWhileLoop module"""
def forward(self, inp):
"""forward"""
a = 1
i = 0
while i <= inp.size(0):
a += i
i += 2
i = 0
# also test constant init cond
while i < 10:
a += i
i += 3
return a
class SimpleWhileLoop(torch.nn.Module):
def forward(self, inp):
a = inp
i = 0
while i < inp.size(0):
a += a * float(i) * 2.0
i += 1
return a
models = [
SimpleIf(10, 20),
NestedIf(10, 20),
ScalarLoop(),
SimpleLoop(),
LoopWithIf(),
SimpleScalarWhileLoop(),
SimpleWhileLoop(),
NestedLoop(),
]
for pt_model in models:
verify_script_model(pt_model.eval(), [(10, 20)], _get_default_vm_targets())
@tvm.testing.uses_gpu
def test_simple_rnn():
"""test_simple_rnn"""
# The mixed tracing and scripting example from
# https://pytorch.org/tutorials/beginner/Intro_to_TorchScript_tutorial.html#mixing-scripting-and-tracing
class DecisionGate(torch.nn.Module):
def forward(self, x):
if x.sum() > 0:
return x
else:
return -x
class Cell(torch.nn.Module):
def __init__(self, dg):
super().__init__()
self.dg = dg
self.linear = torch.nn.Linear(4, 4)
def forward(self, x, h):
new_h = torch.tanh(self.dg(self.linear(x)) + h)
return new_h, new_h
class RNNLoop(torch.nn.Module):
"""Pytorch RNNLoop module"""
def __init__(self):
super().__init__()
x = torch.rand(10, 4, dtype=torch.float)
h = torch.rand(10, 4, dtype=torch.float)
self.cell = torch.jit.trace(Cell(DecisionGate()), (x, h))
def forward(self, xs):
h = torch.zeros(10, 4, dtype=torch.float)
y = torch.zeros(10, 4, dtype=torch.float)
for i in range(xs.size(0)):
y, h = self.cell(xs[i], h)
return y
verify_script_model(RNNLoop().eval(), [(10, 10, 4)], _get_default_vm_targets())
@tvm.testing.uses_gpu
def test_forward_reduce_sum():
"""test_forward_reduce_sum"""
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class ReduceSum1(Module):
def forward(self, *args):
return args[0].sum(1)
class ReduceSum2(Module):
def forward(self, *args):
return args[0].sum(dim=1, keepdim=False)
class ReduceSum3(Module):
def forward(self, *args):
return args[0].sum(dim=2, keepdim=True)
class ReduceSum4(Module):
def forward(self, *args):
return args[0].sum(dim=(2, 3), keepdim=True)
class ReduceSum5(Module):
def forward(self, *args):
return args[0].sum(dim=(2, 3), keepdim=False)
input_data = torch.rand(input_shape).float()
verify_model(ReduceSum1().float().eval(), input_data=input_data)
verify_model(ReduceSum2().float().eval(), input_data=input_data)
verify_model(ReduceSum3().float().eval(), input_data=input_data)
verify_model(ReduceSum4().float().eval(), input_data=input_data)
verify_model(ReduceSum5().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_reduce_prod():
"""test_forward_reduce_prod"""
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class ReduceProd1(Module):
def forward(self, *args):
return args[0].prod(1)
class ReduceProd2(Module):
def forward(self, *args):
return args[0].prod(dim=1, keepdim=False)
class ReduceProd3(Module):
def forward(self, *args):
return args[0].prod(dim=2, keepdim=True)
input_data = torch.rand(input_shape).float()
verify_model(ReduceProd1().float().eval(), input_data=input_data)
verify_model(ReduceProd2().float().eval(), input_data=input_data)
verify_model(ReduceProd3().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_argmin():
"""test_forward_argmin"""
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class ArgMin1(Module):
def forward(self, *args):
return args[0].argmin(1)
class ArgMin2(Module):
def forward(self, *args):
return args[0].argmin(dim=1, keepdim=False)
class ArgMin3(Module):
def forward(self, *args):
return args[0].argmin(dim=2, keepdim=True)
input_data = torch.rand(input_shape).float()
verify_model(ArgMin1().float().eval(), input_data=input_data)
verify_model(ArgMin2().float().eval(), input_data=input_data)
verify_model(ArgMin3().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_argmax():
"""test_forward_argmax"""
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class ArgMax1(Module):
def forward(self, *args):
return args[0].argmax(1)
class ArgMax2(Module):
def forward(self, *args):
return args[0].argmax(dim=1, keepdim=False)
class ArgMax3(Module):
def forward(self, *args):
return args[0].argmax(dim=2, keepdim=True)
input_data = torch.rand(input_shape).float()
verify_model(ArgMax1().float().eval(), input_data=input_data)
verify_model(ArgMax2().float().eval(), input_data=input_data)
verify_model(ArgMax3().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_std():
"""test_forward_std"""
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class Std1(Module):
def forward(self, *args):
return args[0].std(1, unbiased=False)
class Std2(Module):
def forward(self, *args):
return args[0].std(dim=1, keepdim=False, unbiased=False)
class Std3(Module):
def forward(self, *args):
return args[0].std(dim=2, keepdim=True, unbiased=False)
class Std4(Module):
def forward(self, *args):
return args[0].std(dim=(2, 3), keepdim=True, unbiased=False)
class Std5(Module):
def forward(self, *args):
return args[0].std(dim=(2, 3), keepdim=False, unbiased=False)
class Std6(Module):
def forward(self, *args):
return args[0].std(unbiased=False)
class Std7(Module):
def forward(self, *args):
return args[0].std(dim=1, keepdim=False, unbiased=True)
class Std8(Module):
def forward(self, *args):
return args[0].std(dim=(2, 3), keepdim=True, unbiased=True)
class Std9(Module):
def forward(self, *args):
return args[0].std(unbiased=True)
input_data = torch.rand(input_shape).float()
verify_model(Std1().float().eval(), input_data=input_data)
verify_model(Std2().float().eval(), input_data=input_data)
verify_model(Std3().float().eval(), input_data=input_data)
verify_model(Std4().float().eval(), input_data=input_data)
verify_model(Std5().float().eval(), input_data=input_data)
verify_model(Std6().float().eval(), input_data=input_data)
verify_model(Std7().float().eval(), input_data=input_data)
verify_model(Std8().float().eval(), input_data=input_data)
verify_model(Std9().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_var_mean():
"""test_forward_var_mean"""
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class VarMean1(Module):
def forward(self, *args):
return torch.var_mean(args[0], 1, unbiased=False)
class VarMean2(Module):
def forward(self, *args):
return torch.var_mean(args[0], dim=1, keepdim=False, unbiased=False)
class VarMean3(Module):
def forward(self, *args):
return torch.var_mean(args[0], dim=2, keepdim=True, unbiased=False)
class VarMean4(Module):
def forward(self, *args):
return torch.var_mean(args[0], dim=(2, 3), keepdim=True, unbiased=False)
class VarMean5(Module):
def forward(self, *args):
return torch.var_mean(args[0], dim=(2, 3), keepdim=False, unbiased=False)
class VarMean6(Module):
def forward(self, *args):
return torch.var_mean(args[0], unbiased=False)
class VarMean7(Module):
def forward(self, *args):
return torch.var_mean(args[0], dim=1, keepdim=False, unbiased=True)
class VarMean8(Module):
def forward(self, *args):
return torch.var_mean(args[0], dim=(2, 3), keepdim=True, unbiased=True)
class VarMean9(Module):
def forward(self, *args):
return torch.var_mean(args[0], unbiased=True)
input_data = torch.rand(input_shape).float()
verify_model(VarMean1().float().eval(), input_data=input_data)
verify_model(VarMean2().float().eval(), input_data=input_data)
verify_model(VarMean3().float().eval(), input_data=input_data)
verify_model(VarMean4().float().eval(), input_data=input_data)
verify_model(VarMean5().float().eval(), input_data=input_data)
verify_model(VarMean6().float().eval(), input_data=input_data)
verify_model(VarMean7().float().eval(), input_data=input_data)
verify_model(VarMean8().float().eval(), input_data=input_data)
verify_model(VarMean9().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_variance():
"""test_forward_variance"""
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class Variance1(Module):
def forward(self, *args):
return args[0].var(1, unbiased=False)
class Variance2(Module):
def forward(self, *args):
return args[0].var(dim=1, keepdim=False, unbiased=False)
class Variance3(Module):
def forward(self, *args):
return args[0].var(dim=2, keepdim=True, unbiased=False)
class Variance4(Module):
def forward(self, *args):
return args[0].var(dim=(2, 3), keepdim=True, unbiased=False)
class Variance5(Module):
def forward(self, *args):
return args[0].var(dim=(2, 3), keepdim=False, unbiased=False)
class Variance6(Module):
def forward(self, *args):
return args[0].var(unbiased=False)
class Variance7(Module):
def forward(self, *args):
return args[0].var(dim=1, keepdim=False, unbiased=True)
class Variance8(Module):
def forward(self, *args):
return args[0].var(dim=(2, 3), keepdim=True, unbiased=True)
class Variance9(Module):
def forward(self, *args):
return args[0].var(unbiased=True)
input_data = torch.rand(input_shape).float()
verify_model(Variance1().float().eval(), input_data=input_data)
verify_model(Variance2().float().eval(), input_data=input_data)
verify_model(Variance3().float().eval(), input_data=input_data)
verify_model(Variance4().float().eval(), input_data=input_data)
verify_model(Variance5().float().eval(), input_data=input_data)
verify_model(Variance6().float().eval(), input_data=input_data)
verify_model(Variance7().float().eval(), input_data=input_data)
verify_model(Variance8().float().eval(), input_data=input_data)
verify_model(Variance9().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_rsub():
"""test_forward_rsub"""
torch.set_grad_enabled(False)
class Rsub1(Module):
def forward(self, *args):
return torch.rsub(args[0], args[1])
class Rsub2(Module):
def forward(self, *args):
return torch.rsub(args[0], args[1], alpha=0.5)
d1 = torch.rand([1, 3]).float()
d2 = torch.rand([1, 3]).float()
d3 = torch.rand([1, 3]).int()
verify_model(Rsub1().float().eval(), input_data=[d1, d2])
verify_model(Rsub1().float().eval(), input_data=[d1, d3])
verify_model(Rsub2().float().eval(), input_data=[d1, d2])
verify_model(Rsub2().float().eval(), input_data=[d1, d3])
d1 = torch.rand([1, 3]).half()
d2 = torch.rand([1, 3]).half()
verify_model(Rsub1().half().eval(), input_data=[d1, d2])
verify_model(Rsub1().half().eval(), input_data=[d1, d3])
verify_model(Rsub2().half().eval(), input_data=[d1, d2])
verify_model(Rsub2().half().eval(), input_data=[d1, d3])
@tvm.testing.uses_gpu
def test_forward_embedding():
"""test_forward_embedding"""
torch.set_grad_enabled(False)
input_data = torch.randint(0, 10, [2, 4]).long()
verify_model(torch.nn.Embedding(10, 3).float().eval(), input_data=input_data)
input_data = torch.randint(0, 4, [2, 3, 4]).long()
verify_model(torch.nn.Embedding(4, 5, sparse=False).float().eval(), input_data=input_data)
input_data = torch.randint(0, 4, [2, 3, 4]).long()
verify_model(torch.nn.Embedding(4, 5, sparse=True).float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_onehot():
"""test_forward_onehot"""
torch.set_grad_enabled(False)
class OneHot1(Module):
def forward(self, *args):
return torch.nn.functional.one_hot(args[0], num_classes=3)
class OneHot2(Module):
def forward(self, *args):
return torch.nn.functional.one_hot(args[0], num_classes=5)
input_data = torch.arange(0, 5) % 3
verify_model(OneHot1().float().eval(), input_data=input_data)
input_data = torch.arange(0, 5) % 4
verify_model(OneHot2().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_isfinite():
"""test_forward_isfinite"""
torch.set_grad_enabled(False)
class IsFinite1(Module):
def forward(self, *args):
return torch.isfinite(args[0])
input_data = torch.tensor([1, float("inf"), 2, float("-inf"), float("nan")]).float()
verify_model(IsFinite1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_isnan():
"""test_forward_isnan"""
torch.set_grad_enabled(False)
class IsNan1(Module):
def forward(self, *args):
return torch.isnan(args[0])
input_data = torch.tensor([1, float("inf"), 2, float("-inf"), float("nan")]).float()
verify_model(IsNan1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_isinf():
"""test_forward_isinf"""
torch.set_grad_enabled(False)
class IsInf1(Module):
def forward(self, *args):
return torch.isinf(args[0])
input_data = torch.tensor([1, float("inf"), 2, float("-inf"), float("nan")]).float()
verify_model(IsInf1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_clamp():
"""test_forward_clamp"""
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class Clamp1(Module):
def forward(self, *args):
return torch.clamp(args[0], min=-0.5, max=0.5)
class Clamp2(Module):
def forward(self, *args):
return torch.clamp(args[0], min=-0.3)
class Clamp3(Module):
def forward(self, *args):
return torch.clamp(args[0], max=1.0)
class Clamp_MinExpr_MaxConstant(Module):
def forward(self, *args):
h, w = args[0].shape[2:]
amin = h / 100.0
return torch.clamp(args[0], min=amin, max=w)
input_data = torch.rand(input_shape).float()
verify_model(Clamp1().float().eval(), input_data=input_data)
verify_model(Clamp2().float().eval(), input_data=input_data)
verify_model(Clamp3().float().eval(), input_data=input_data)
verify_model(Clamp_MinExpr_MaxConstant().float().eval(), input_data=input_data)
verify_model(lambda inp: torch.clamp_min(inp, 0.5), input_data)
inp_uint8 = torch.randint(low=0, high=256, size=(100, 100), dtype=torch.uint8)
verify_model(lambda inp: torch.clamp_max(inp, 125), inp_uint8)
@tvm.testing.uses_gpu
def test_forward_clamp_():
"""test_forward_clamp_"""
torch.set_grad_enabled(False)
class ClampInPlace(Module):
def __init__(self, i_min, i_max):
super().__init__()
self.min = i_min
self.max = i_max
def forward(self, *args):
return torch.clamp_(args[0], self.min, self.max)
for ishape, i_min, i_max in (([4, 8], 0.1, 0.9), ([7, 6], 0.2, 0.5)):
input_data = torch.rand(ishape).float()
verify_model(ClampInPlace(i_min, i_max).float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_ones():
"""test_forward_ones"""
torch.set_grad_enabled(False)
class Ones1(Module):
def forward(self, *args):
return torch.ones(2, 3)
verify_model(Ones1().float().eval(), input_data=[])
@tvm.testing.uses_gpu
def test_forward_ones_like():
"""test_forward_ones_like"""
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class OnesLike1(Module):
def forward(self, *args):
return torch.ones_like(args[0])
class OnesLike2(Module):
def forward(self, *args):
return torch.ones_like(args[0], dtype=torch.int8)
class OnesLike3(Module):
def forward(self, *args):
return torch.ones_like(args[0], dtype=torch.float)
input_data = torch.rand(input_shape).float()
verify_model(OnesLike1().float().eval(), input_data=input_data)
verify_model(OnesLike2().float().eval(), input_data=input_data)
verify_model(OnesLike3().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_new_ones():
"""test_forward_new_ones"""
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
def test_func(input_tensor):
return input_tensor.new_ones([3, 10, 10])
verify_model_with_input(test_func, [torch.rand(input_shape).float()])
@tvm.testing.uses_gpu
def test_forward_zeros():
"""test_forward_zeros"""
torch.set_grad_enabled(False)
class Zeros1(Module):
def forward(self, *args):
return torch.zeros(2, 3)
verify_model(Zeros1().float().eval(), input_data=[])
def test_forward_zero_():
def test_func(x):
return x.zero_()
verify_model_with_input(test_func, [torch.rand([1, 3, 10, 10]).float()])
@tvm.testing.uses_gpu
def test_forward_zeros_like():
"""test_forward_zeros_like"""
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class ZerosLike1(Module):
def forward(self, *args):
return torch.zeros_like(args[0])
class ZerosLike2(Module):
def forward(self, *args):
return torch.zeros_like(args[0], dtype=torch.int32)
class ZerosLike3(Module):
def forward(self, *args):
return torch.zeros_like(args[0], dtype=torch.float)
input_data = torch.rand(input_shape).float()
verify_model(ZerosLike1().float().eval(), input_data=input_data)
verify_model(ZerosLike2().float().eval(), input_data=input_data)
verify_model(ZerosLike3().float().eval(), input_data=input_data)
def test_forward_new_zeros():
def test_func(x):
return x.new_zeros((2, 3))
verify_model_with_input(test_func, [torch.rand([1, 3, 10, 10]).float()])
@tvm.testing.uses_gpu
def test_forward_full():
"""test_forward_full"""
torch.set_grad_enabled(False)
class Full1(Module):
def forward(self, *args):
return torch.full((2, 3), 3.14)
class Full2(Module):
def forward(self, *args):
return torch.full((1, 2, 3), 1.0, dtype=torch.int32)
verify_model(Full1().float().eval(), input_data=[])
verify_model(Full2().float().eval(), input_data=[])
@tvm.testing.uses_gpu
def test_forward_full_like():
"""test_forward_full_like"""
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
class FullLike1(Module):
def forward(self, *args):
return torch.full_like(args[0], 3.14)
class FullLike2(Module):
def forward(self, *args):
return torch.full_like(args[0], 22.22, dtype=torch.int32)
class FullLike3(Module):
def forward(self, *args):
return torch.full_like(args[0], 1.4, dtype=torch.float)
input_data = torch.rand(input_shape).float()
verify_model(FullLike1().float().eval(), input_data=input_data)
verify_model(FullLike2().float().eval(), input_data=input_data)
verify_model(FullLike3().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_new_full():
"""test_forward_new_full"""
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
def test_func(input_tensor):
return input_tensor.new_full([2, 3], 1)
verify_model_with_input(test_func, [torch.rand(input_shape).float()])
def test_forward_fill_():
def test_func(x):
return x.fill_(3)
verify_model_with_input(test_func, [torch.rand([1, 3, 10, 10]).float()])
def test_forward_fill_with_div():
"""test_forward_fill_with_div"""
def test_func(x):
y = torch.div(torch.tensor(6.0), torch.tensor(2.0))
return x.fill_(y)
verify_model_with_input(test_func, [torch.rand([1, 3, 10, 10]).float()])
@tvm.testing.uses_gpu
def test_forward_linspace():
"""test_forward_linspace"""
torch.set_grad_enabled(False)
class Linspace1(Module):
def forward(self, *args):
return torch.linspace(5, 10, steps=100)
class Linspace2(Module):
def forward(self, *args):
return torch.linspace(-10, 10, steps=5)
class Linspace3(Module):
def forward(self, *args):
return torch.linspace(start=-10, end=10, steps=5)
class Linspace4(Module):
def forward(self, *args):
return torch.linspace(start=-10, end=10, steps=1)
class Linspace5(Module):
def forward(self, *args):
return torch.linspace(1, 2, 1, dtype=torch.int32)
class Linspace6(Module):
def forward(self, *args):
return torch.linspace(start=1, end=6, steps=2)
class Linspace7(Module):
def forward(self, *args):
return torch.linspace(1, 4, steps=100, dtype=torch.float32)
class Linspace8(Module):
def forward(self, *args):
return torch.linspace(1, 2, 1, dtype=torch.int16)
verify_model(Linspace1().float().eval())
verify_model(Linspace2().float().eval())
verify_model(Linspace3().float().eval())
verify_model(Linspace4().float().eval())
verify_model(Linspace5().float().eval())
verify_model(Linspace6().float().eval())
verify_model(Linspace7().float().eval())
verify_model(Linspace8().float().eval())
@tvm.testing.uses_gpu
def test_forward_take():
"""test_forward_take"""
torch.set_grad_enabled(False)
class Take1(Module):
def forward(self, *args):
indices = torch.tensor([[0, 0], [1, 0]])
if torch.cuda.is_available():
indices = indices.cuda()
return torch.take(args[0], indices)
class Take2(Module):
def forward(self, *args):
return torch.take(args[0], args[1])
input_data = torch.tensor([[1, 2], [3, 4]])
verify_model(Take1().float().eval(), input_data=input_data)
indices = torch.tensor([[0, 0], [1, 0]])
verify_model(Take2().float().eval(), input_data=[input_data, indices])
indices = torch.tensor([0, -1])
verify_model(Take2().float().eval(), input_data=[input_data, indices])
@tvm.testing.uses_gpu
def test_forward_topk():
"""test_forward_topk"""
torch.set_grad_enabled(False)
class Topk1(Module):
def forward(self, *args):
return torch.topk(args[0], k=3)
class Topk2(Module):
def forward(self, *args):
return torch.topk(args[0], k=3, dim=-2)
class Topk3(Module):
def forward(self, *args):
return torch.topk(args[0], k=3, dim=3)
class Topk4(Module):
def forward(self, *args):
return torch.topk(args[0], k=3, largest=True)
class Topk5(Module):
def forward(self, *args):
return torch.topk(args[0], k=3, largest=False)
class Topk6(Module):
def forward(self, *args):
return torch.topk(args[0], k=3, sorted=True)
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(Topk1().float().eval(), input_data=input_data)
verify_model(Topk2().float().eval(), input_data=input_data)
verify_model(Topk3().float().eval(), input_data=input_data)
verify_model(Topk4().float().eval(), input_data=input_data)
verify_model(Topk5().float().eval(), input_data=input_data)
verify_model(Topk6().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_logical_not():
"""test_forward_logical_not"""
torch.set_grad_enabled(False)
class LogicalNot1(Module):
def forward(self, *args):
return torch.logical_not(args[0])
input_data = torch.tensor([True, False])
verify_model(LogicalNot1().float().eval(), input_data=input_data)
input_data = torch.tensor([0, 1, -10], dtype=torch.int8)
verify_model(LogicalNot1().float().eval(), input_data=input_data)
input_data = torch.tensor([0.0, 1.5, -10.0], dtype=torch.double)
verify_model(LogicalNot1().float().eval(), input_data=input_data)
input_data = torch.tensor([0.0, 1.0, -10.0], dtype=torch.int32)
verify_model(LogicalNot1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_bitwise_not():
"""test_forward_bitwise_not"""
torch.set_grad_enabled(False)
class BitwiseNot1(Module):
def forward(self, *args):
return torch.bitwise_not(args[0])
input_data = torch.tensor([0, 1, -10], dtype=torch.int8)
verify_model(BitwiseNot1().float().eval(), input_data=input_data)
input_data = torch.tensor([0.0, 1.0, -10.0], dtype=torch.int32)
verify_model(BitwiseNot1().float().eval(), input_data=input_data)
input_data = torch.tensor([True, False])
verify_model(BitwiseNot1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_bitwise_xor():
"""test_forward_bitwise_xor"""
torch.set_grad_enabled(False)
class BitwiseXor1(Module):
def forward(self, *args):
return torch.bitwise_xor(args[0], args[1])
class BitwiseXor2(Module):
def forward(self, *args):
rhs = torch.tensor([1, 0, 3], dtype=torch.int8)
if torch.cuda.is_available():
rhs = rhs.cuda()
return torch.bitwise_xor(args[0], rhs)
lhs = torch.tensor([-1, -2, 3], dtype=torch.int8)
rhs = torch.tensor([1, 0, 3], dtype=torch.int8)
verify_model(BitwiseXor1().float().eval(), input_data=[lhs, rhs])
lhs = torch.tensor([True, True, False])
rhs = torch.tensor([False, True, False])
verify_model(BitwiseXor1().float().eval(), input_data=[lhs, rhs])
lhs = torch.tensor([-1, -2, 3], dtype=torch.int8)
verify_model(BitwiseXor2().float().eval(), input_data=[lhs])
@tvm.testing.uses_gpu
def test_forward_logical_xor():
"""test_forward_logical_xor"""
torch.set_grad_enabled(False)
class LogicalXor1(Module):
def forward(self, *args):
return torch.logical_xor(args[0], args[1])
class LogicalXor2(Module):
def forward(self, *args):
rhs = torch.tensor([1, 0, 3], dtype=torch.int8)
if torch.cuda.is_available():
rhs = rhs.cuda()
return torch.logical_xor(args[0], rhs)
lhs = torch.tensor([-1, -2, 3], dtype=torch.int8)
rhs = torch.tensor([1, 0, 3], dtype=torch.int8)
verify_model(LogicalXor1().float().eval(), input_data=[lhs, rhs])
lhs = torch.tensor([True, True, False])
rhs = torch.tensor([False, True, False])
verify_model(LogicalXor1().float().eval(), input_data=[lhs, rhs])
lhs = torch.tensor([-1, -2, 3], dtype=torch.int8)
verify_model(LogicalXor2().float().eval(), input_data=[lhs])
@tvm.testing.uses_gpu
def test_forward_unary():
"""test_forward_unary"""
torch.set_grad_enabled(False)
class Sqrt1(Module):
def forward(self, *args):
return torch.sqrt(args[0])
class RSqrt1(Module):
def forward(self, *args):
return torch.rsqrt(args[0])
class Ceil1(Module):
def forward(self, *args):
return torch.ceil(args[0])
class Floor1(Module):
def forward(self, *args):
return torch.floor(args[0])
class Round1(Module):
def forward(self, *args):
return torch.round(args[0])
class Cos1(Module):
def forward(self, *args):
return torch.cos(args[0])
class Sin1(Module):
def forward(self, *args):
return torch.sin(args[0])
class Tan1(Module):
def forward(self, *args):
return torch.tan(args[0])
class Tanh1(Module):
def forward(self, *args):
return torch.tanh(args[0])
class Acos1(Module):
def forward(self, *args):
return torch.acos(args[0])
class Asin1(Module):
def forward(self, *args):
return torch.asin(args[0])
class Atan1(Module):
def forward(self, *args):
return torch.atan(args[0])
class Log1(Module):
def forward(self, *args):
return torch.log(args[0])
class Exp1(Module):
def forward(self, *args):
return torch.exp(args[0])
class Erf1(Module):
def forward(self, *args):
return torch.erf(args[0])
class Trunc1(Module):
def forward(self, *args):
return torch.trunc(args[0])
class Sign1(Module):
def forward(self, *args):
return torch.sign(args[0])
class Neg1(Module):
def forward(self, *args):
return torch.neg(args[0])
class Sinh1(Module):
def forward(self, *args):
return torch.sinh(args[0])
class Cosh1(Module):
def forward(self, *args):
return torch.cosh(args[0])
class Log2_1(Module):
def forward(self, *args):
return torch.log2(args[0])
class Log10_1(Module):
def forward(self, *args):
return torch.log10(args[0])
class Log1p_1(Module):
def forward(self, *args):
return torch.log1p(args[0])
class Square(Module):
def forward(self, *args):
return torch.square(args[0])
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(Square().float().eval(), input_data=input_data)
verify_model(Sqrt1().float().eval(), input_data=input_data)
verify_model(RSqrt1().float().eval(), input_data=input_data)
verify_model(Ceil1().float().eval(), input_data=input_data)
verify_model(Floor1().float().eval(), input_data=input_data)
verify_model(Round1().float().eval(), input_data=input_data)
verify_model(Cos1().float().eval(), input_data=input_data)
verify_model(Cosh1().float().eval(), input_data=input_data)
verify_model(Sin1().float().eval(), input_data=input_data)
verify_model(Sinh1().float().eval(), input_data=input_data)
verify_model(Tan1().float().eval(), input_data=input_data)
verify_model(Tanh1().float().eval(), input_data=input_data)
verify_model(Acos1().float().eval(), input_data=input_data)
verify_model(Asin1().float().eval(), input_data=input_data)
verify_model(Atan1().float().eval(), input_data=input_data)
verify_model(Log1().float().eval(), input_data=input_data)
verify_model(Log2_1().float().eval(), input_data=input_data)
verify_model(Log10_1().float().eval(), input_data=input_data)
verify_model(Log1p_1().float().eval(), input_data=input_data)
verify_model(Exp1().float().eval(), input_data=input_data)
verify_model(Erf1().float().eval(), input_data=input_data)
verify_model(Trunc1().float().eval(), input_data=input_data)
verify_model(Sign1().float().eval(), input_data=input_data)
verify_model(Neg1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_tril():
"""test_forward_tril"""
torch.set_grad_enabled(False)
def test_func(input_data):
return torch.tril(input_data)
input_data = torch.rand([3, 3]).float()
verify_model(test_func, input_data=input_data)
input_data = torch.rand([1, 3, 10, 10]).float()
verify_model(test_func, input_data=input_data)
def test_func1(input_data):
return torch.tril(input_data, 1)
input_data = torch.rand([3, 3]).float()
verify_model(test_func1, input_data=input_data)
input_data = torch.rand([1, 3, 10, 10]).float()
verify_model(test_func1, input_data=input_data)
def test_func2(input_data):
return torch.tril(input_data, -1)
input_data = torch.rand([3, 3]).float()
verify_model(test_func2, input_data=input_data)
input_data = torch.rand([1, 3, 10, 10]).float()
verify_model(test_func2, input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_triu():
"""test_forward_triu"""
torch.set_grad_enabled(False)
def test_func(input_data):
return torch.triu(input_data)
input_data = torch.rand([3, 3]).float()
verify_model(test_func, input_data=input_data)
input_data = torch.rand([1, 3, 10, 10]).float()
verify_model(test_func, input_data=input_data)
def test_func1(input_data):
return torch.triu(input_data, 1)
input_data = torch.rand([3, 3]).float()
verify_model(test_func1, input_data=input_data)
input_data = torch.rand([1, 3, 10, 10]).float()
verify_model(test_func1, input_data=input_data)
def test_func2(input_data):
return torch.triu(input_data, -1)
input_data = torch.rand([3, 3]).float()
verify_model(test_func2, input_data=input_data)
input_data = torch.rand([1, 3, 10, 10]).float()
verify_model(test_func2, input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_where():
"""test_forward_where"""
torch.set_grad_enabled(False)
class Where1(Module):
def forward(self, *args):
y = torch.ones([3, 2])
if torch.cuda.is_available():
y = y.cuda()
return torch.where(args[0] > 0, args[0], y)
class Where2(Module):
def forward(self, *args):
return torch.where(args[0] > 0, args[0], args[1])
class Where3(Module):
def forward(self, *args):
return torch.where(args[0])[0]
x = torch.rand([3, 2]).float()
verify_model(Where1(), input_data=[x])
y = torch.rand([3, 2])
verify_model(Where2(), input_data=[x, y])
# a single argument variant, equivalent to torch.nonzero(..., as_tuple=True)
inp = torch.rand([10])
inp[3:8] = 0
verify_trace_model(Where3(), [inp], ["llvm"])
@tvm.testing.uses_gpu
def test_forward_addcdiv():
"""test_forward_addcdiv"""
torch.set_grad_enabled(False)
class Addcdiv1(Module):
def forward(self, *args):
t1 = torch.ones([3, 1])
t2 = torch.ones([1, 3])
if torch.cuda.is_available():
t1 = t1.cuda()
t2 = t2.cuda()
return torch.addcdiv(args[0], 0.1, t1, t2)
class Addcdiv2(Module):
def forward(self, *args):
return torch.addcdiv(args[0], 0.5, args[1], args[2])
input_data = torch.rand([1, 3]).float()
verify_model(Addcdiv1().float().eval(), input_data=input_data)
t1 = torch.rand([3, 1]).float()
t2 = torch.rand([1, 3]).float()
verify_model(Addcdiv2().float().eval(), input_data=[input_data, t1, t2])
@tvm.testing.uses_gpu
def test_forward_addcmul():
"""test_forward_addcmul"""
torch.set_grad_enabled(False)
class Addcmul1(Module):
def forward(self, *args):
t1 = torch.ones([3, 1])
t2 = torch.ones([1, 3])
if torch.cuda.is_available():
t1 = t1.cuda()
t2 = t2.cuda()
return torch.addcmul(args[0], 0.1, t1, t2)
class Addcmul2(Module):
def forward(self, *args):
return torch.addcmul(args[0], 0.5, args[1], args[2])
input_data = torch.rand([1, 3]).float()
verify_model(Addcmul1().float().eval(), input_data=input_data)
t1 = torch.rand([3, 1]).float()
t2 = torch.rand([1, 3]).float()
verify_model(Addcmul2().float().eval(), input_data=[input_data, t1, t2])
@tvm.testing.uses_gpu
def test_forward_true_divide():
"""test_forward_true_divide"""
if package_version.parse(torch.__version__) < package_version.parse("1.5.0"):
return
torch.set_grad_enabled(False)
class TrueDivide(Module):
def forward(self, *args):
return torch.true_divide(args[0], args[1])
dividend = torch.rand([5, 3]).float()
# divisor could be either tensor or scalar
divisor_tensor = torch.rand([5, 3]).float() + 0.5
divisor_scalar = torch.tensor(1.0, dtype=torch.float32)
verify_model(
TrueDivide().float().eval(), input_data=[dividend, divisor_tensor], atol=1e-4, rtol=1e-4
)
verify_model(
TrueDivide().float().eval(), input_data=[dividend, divisor_scalar], atol=1e-4, rtol=1e-4
)
@tvm.testing.uses_gpu
def test_forward_is_floating_point():
"""test_forward_is_floating_point"""
torch.set_grad_enabled(False)
class IsFloatingPoint(Module):
def forward(self, arg):
# `torch.jit.trace` cannot accept something that outputs
# a Bool, so `torch.jit.script` will be used instead
return torch.is_floating_point(arg)
targets = _get_default_vm_targets()
verify_script_model(IsFloatingPoint(), [(1, 1)], targets, idtype=torch.float64)
verify_script_model(IsFloatingPoint(), [(1, 1)], targets, idtype=torch.float32)
verify_script_model(IsFloatingPoint(), [(1, 1)], targets, idtype=torch.float16)
# todo(dvisnty): Run the test for bfloat16 when full bfloat16 support is implemented
# verify_script_model(IsFloatingPoint(), [(1,1)], targets, idtype=torch.bfloat16)
verify_script_model(IsFloatingPoint(), [(1, 1)], targets, idtype=torch.int64)
verify_script_model(IsFloatingPoint(), [(1, 1)], targets, idtype=torch.int32)
verify_script_model(IsFloatingPoint(), [(1, 1)], targets, idtype=torch.int16)
verify_script_model(IsFloatingPoint(), [(1, 1)], targets, idtype=torch.int8)
verify_script_model(IsFloatingPoint(), [(1, 1)], targets, idtype=torch.uint8)
@tvm.testing.uses_gpu
def test_forward_traced_function():
"""test_forward_traced_function"""
def fn(t1, t2):
return t1 + t2
tensor1 = torch.randn(3, 4)
tensor2 = torch.randn(3, 4)
verify_model(fn, input_data=[tensor1, tensor2])
@tvm.testing.uses_gpu
def test_forward_dtypes():
"""test_forward_dtypes"""
def fn(t1, t2):
return 2.5 * t1 + t2
for dt in [torch.int32, torch.int64, torch.double]:
tensor1 = torch.randn(3, 4).to(dtype=dt)
tensor2 = torch.randn(3, 4).to(dtype=dt)
verify_model(fn, input_data=[tensor1, tensor2])
class ModuleWithIntParameters(Module):
def __init__(self, arr):
super().__init__()
self.param = torch.nn.Parameter(torch.LongTensor(arr), requires_grad=False)
def forward(self, x):
return x.long() + self.param
shape = (10, 10)
param = torch.ones(shape, dtype=torch.long)
inp = torch.ones(shape, dtype=torch.int)
verify_model(ModuleWithIntParameters(param), input_data=inp)
@tvm.testing.uses_gpu
def test_weight_names():
tm = torch.jit.trace(torch.nn.Linear(3, 4), [torch.randn(2, 3)])
_, params = relay.frontend.from_pytorch(tm, [("input", (2, 3))])
keys = [key.split(".")[-1] for key in params.keys()]
assert set(keys) == set(n for n, p in tm.named_parameters())
@tvm.testing.uses_gpu
def test_duplicate_weight_use():
"""test_duplicate_weight_use"""
# The test cases doesn't make any sense as a neural network,
# the issue popped up in shared input/output embeddings of bert,
# but this is quicker
class Test(Module):
def __init__(self):
super().__init__()
self.lin = torch.nn.Linear(5, 3)
def forward(self, x):
x = self.lin(x)
x = x @ self.lin.weight
return x
verify_model(Test(), input_data=[torch.randn(5, 5)])
@tvm.testing.uses_gpu
def test_forward_matmul():
"""test_forward_matmul"""
torch.set_grad_enabled(False)
class MatMul1(Module):
def forward(self, *args):
return torch.matmul(args[0], args[1])
# vector x vector - 1D x 1D
tensor1 = torch.randn(4)
tensor2 = torch.randn(4)
verify_model(MatMul1().float().eval(), input_data=[tensor1, tensor2], expected_ops=["nn.dense"])
# vector x matrix - 1D x 2D
tensor1 = torch.randn(4)
tensor2 = torch.randn(4, 3)
verify_model(MatMul1().float().eval(), input_data=[tensor1, tensor2], expected_ops=["nn.dense"])
# vector x batched_matrix - 1D x ND
tensor1 = torch.randn(5)
tensor2 = torch.randn(2, 3, 5, 4)
verify_model(
MatMul1().float().eval(), input_data=[tensor1, tensor2], expected_ops=["nn.batch_matmul"]
)
# matrix x vector - 2D - 1D
tensor1 = torch.randn(3, 4)
tensor2 = torch.randn(4)
verify_model(MatMul1().float().eval(), input_data=[tensor1, tensor2], expected_ops=["nn.dense"])
# matrix x matrix - 2D x 2D
tensor1 = torch.randn(10, 4)
tensor2 = torch.randn(4, 10)
verify_model(MatMul1().float().eval(), input_data=[tensor1, tensor2], expected_ops=["nn.dense"])
# broadcasted matrix x batched matrix - 2D x ND
tensor1 = torch.randn(10, 4)
tensor2 = torch.randn(2, 3, 4, 5)
verify_model(
MatMul1().float().eval(), input_data=[tensor1, tensor2], expected_ops=["nn.batch_matmul"]
)
# batched matrix x vector - ND x 1D
tensor1 = torch.randn(2, 3, 4, 5)
tensor2 = torch.randn(5)
verify_model(
MatMul1().float().eval(), input_data=[tensor1, tensor2], expected_ops=["nn.batch_matmul"]
)
# batched matrix x broadcasted matrix - ND x 2D
tensor1 = torch.randn(10, 3, 4)
tensor2 = torch.randn(4, 5)
verify_model(
MatMul1().float().eval(), input_data=[tensor1, tensor2], expected_ops=["nn.batch_matmul"]
)
# batched matrix x batched matrix - ND x ND
tensor1 = torch.randn(2, 10, 3, 4)
tensor2 = torch.randn(2, 10, 4, 5)
verify_model(
MatMul1().float().eval(), input_data=[tensor1, tensor2], expected_ops=["nn.batch_matmul"]
)
# batched matrix x broadcasted matrix - ND x ND
tensor1 = torch.randn(2, 5, 3, 4)
tensor2 = torch.randn(2, 1, 4, 5)
verify_model(
MatMul1().float().eval(), input_data=[tensor1, tensor2], expected_ops=["nn.batch_matmul"]
)
# broadcasted matrix x batched matrix - ND x ND
tensor1 = torch.randn(2, 1, 5, 4)
tensor2 = torch.randn(2, 5, 4, 3)
verify_model(
MatMul1().float().eval(), input_data=[tensor1, tensor2], expected_ops=["nn.batch_matmul"]
)
# broadcasted matrix x broadcasted matrix - ND x ND
tensor1 = torch.randn(3, 2, 3, 1, 5, 4)
tensor2 = torch.randn(2, 1, 5, 4, 3)
verify_model(
MatMul1().float().eval(), input_data=[tensor1, tensor2], expected_ops=["nn.batch_matmul"]
)
@pytest.mark.skip(reason="unsupported op aten::lift_fresh")
def test_forward_index():
"""test_forward_index"""
torch.set_grad_enabled(False)
input_shape = [3, 4, 5, 6]
class Index0(Module):
def forward(self, x):
return x[[0, 1], [0, 2], :2, 4]
input_data = torch.rand(input_shape).float()
verify_model(Index0().eval(), input_data=input_data)
class Index1(Module):
def forward(self, x):
return x[[0], [1, 2, 3, 0], [3, 1, 2, 2], [4, 2, 1, 0]]
input_data = torch.rand(input_shape).float()
verify_model(Index1().eval(), input_data=input_data)
class Index2(Module):
def forward(self, x):
return x[None, [2, 2]]
input_data = torch.rand(input_shape).float()
verify_model(Index2().eval(), input_data=input_data)
class Index3(Module):
def forward(self, x):
return x[None, [0, 1, 2], 1, [2, 3, 4]]
input_data = torch.rand(input_shape).float()
verify_model(Index3().eval(), input_data=input_data)
class Index4(Module):
def forward(self, x):
return x[None, [0, 0], None, np.array([[0], [1], [2]]), None]
input_data = torch.rand(input_shape).float()
verify_model(Index4().eval(), input_data=input_data)
class Index5(Module):
def forward(self, x):
return x[None, None, [0, 0], np.array([[0], [1], [2]]), None]
input_data = torch.rand(input_shape).float()
verify_model(Index5().eval(), input_data=input_data)
class Index6(Module):
def forward(self, x):
return x[None, 1, None, [1, 2, 3]]
input_data = torch.rand(input_shape).float()
verify_model(Index6().eval(), input_data=input_data)
def test_fn_bool_mask():
return lambda data, mask: data[0, mask]
data = torch.tensor([[1, 2, 3], [4, 5, 6]])
mask = torch.tensor([True, True, False])
verify_trace_model(test_fn_bool_mask(), [data, mask], ["llvm", "cuda"])
def test_logsumexp():
"""test_logsumexp"""
class Logsumexp(Module):
def __init__(self, dim, keepdim=False):
super().__init__()
self.dim = dim
self.keepdim = keepdim
def forward(self, x):
return torch.logsumexp(x, self.dim, self.keepdim)
input_shape = (100, 100)
input_data = torch.rand(input_shape)
verify_model(Logsumexp(0), input_data=input_data)
verify_model(Logsumexp(0, keepdim=True), input_data=input_data)
# Also test on double
verify_model(Logsumexp(1, keepdim=True), input_data=input_data.double())
def test_stack():
"""test_stack"""
class Stack(torch.nn.Module):
def __init__(self, axis=0):
super().__init__()
self.axis = axis
def forward(self, x):
return torch.stack((x, x), dim=self.axis)
inp = torch.randn(8, 8, 8)
verify_model(Stack(), input_data=inp)
verify_model(Stack(axis=-1), input_data=inp)
verify_model(Stack(axis=3), input_data=inp)
verify_model(Stack(axis=-4), input_data=inp)
def test_stack_dynamic():
"""test_stack_dynamic"""
class Stack(torch.nn.Module):
def forward(self, x):
tensor_list = []
for i in range(x.size(0)):
# this is a workaround to avoid generating impure aten::append op
tensor_list += [x[i]]
# relay tensor array only supports stacking on the first axis
return torch.stack(tensor_list, dim=0)
verify_script_model(Stack(), [(8, 8, 8)], _get_default_vm_targets())
def test_forward_unbind():
"""test_forward_unbind"""
class Unbind(torch.nn.Module):
def __init__(self, axis=0):
super().__init__()
self.axis = axis
def forward(self, x):
return torch.unbind(x, self.axis)
inp = torch.randn(8, 8, 8)
verify_model(Unbind(0), input_data=inp)
verify_model(Unbind(1), input_data=inp)
verify_model(Unbind(2), input_data=inp)
def test_forward_nonzero():
"""test_forward_nonzero"""
class Nonzero(Module):
def __init__(self, as_tuple=False):
super().__init__()
self.as_tuple = as_tuple
def forward(self, data):
return torch.nonzero(data, as_tuple=self.as_tuple)
inp = torch.Tensor(np.array([[0, 1, 0], [2, 0, 9], [-1, -1, 0]]).astype("float32"))
verify_trace_model(Nonzero(), [inp], ["llvm"])
def test_forward_scatter():
"""test_forward_scatter"""
# integer cannot be traced
def test_fn_scatter(dim):
return lambda data, index, src: torch.scatter(data, dim=dim, index=index, src=src)
def test_fn_scatter_add(dim):
return lambda data, index, src: torch.scatter_add(data, dim=dim, index=index, src=src)
in_data = torch.zeros(3, 5)
in_index = torch.tensor([[0, 1, 2, 0, 0], [2, 0, 0, 1, 2]])
in_src = torch.rand(2, 5)
targets = ["llvm", "cuda"]
verify_trace_model(test_fn_scatter(0), [in_data, in_index, in_src], targets)
verify_trace_model(test_fn_scatter_add(0), [in_data, in_index, in_src], targets)
in_data = torch.zeros(2, 4)
in_index = torch.tensor([[2], [3]])
in_src = torch.rand(2, 1)
verify_trace_model(test_fn_scatter(1), [in_data, in_index, in_src], targets)
verify_trace_model(test_fn_scatter_add(1), [in_data, in_index, in_src], targets)
# Check empty indices
in_data = torch.zeros(2, 4)
in_index = torch.empty((0,))
in_src = torch.rand(2, 1)
verify_trace_model(test_fn_scatter(0), [in_data, in_index, in_src], targets)
verify_trace_model(test_fn_scatter_add(0), [in_data, in_index, in_src], targets)
# Check scalar source
# TODO(vvchernov): Scalar source is supported on TVM side, but torch failes with
# input Tuple(Tensor, Tensor, float). What does scalar mean for torch in this case?
def test_forward_scatter_reduce():
"""test_forward_scatter_reduce"""
# integer cannot be traced
def test_fn_scatter_reduce(dim, reduce):
return lambda data, index, src: torch.scatter_reduce(
data, dim=dim, index=index, src=src, reduce=reduce
)
in_data = torch.rand(3, 5) - 1
in_index = torch.tensor([[0, 1, 2, 0, 0], [2, 0, 0, 1, 2]])
in_src = torch.rand(2, 5) - 1
targets = ["llvm", "cuda"]
for reduce in ["sum", "prod", "amin", "amax", "mean"]:
verify_trace_model(test_fn_scatter_reduce(0, reduce), [in_data, in_index, in_src], targets)
in_data = torch.rand(2, 4) - 1
in_index = torch.tensor([[2], [3]])
in_src = torch.rand(2, 1) - 1
for reduce in ["sum", "prod", "amin", "amax", "mean"]:
verify_trace_model(test_fn_scatter_reduce(1, reduce), [in_data, in_index, in_src], targets)
def test_forward_index_put():
"""test_forward_index_put"""
# torch.index_put for 2D tensor and default accumulate (False)
def test_fn_index_put2():
return lambda data, xidx, yidx, values: torch.index_put(
data, indices=[xidx, yidx], values=values
)
# torch.index_put for 3D tensor and accumulate=True
def test_fn_index_put3a():
return lambda data, xidx, yidx, zidx, values: torch.index_put(
data, indices=[xidx, yidx, zidx], values=values, accumulate=True
)
shape = (3, 5)
in_data = torch.zeros(shape)
xidx = torch.tensor([0, 1, 2, 2])
yidx = torch.tensor([0, 1, 3, 4])
values = torch.tensor([2.0, 4.0, 7.0, 9.0])
targets = ["llvm", "cuda"]
verify_trace_model(test_fn_index_put2(), [in_data, xidx, yidx, values], targets)
shape = (3, 5, 3)
in_data = torch.zeros(shape)
xidx = torch.tensor([0, 1, 2, 2, 0])
yidx = torch.tensor([0, 1, 3, 4, 0])
zidx = torch.tensor([0, 1, 1, 2, 0])
values = torch.tensor([2.0, 4.0, 7.0, 9.0, 1.0])
verify_trace_model(test_fn_index_put3a(), [in_data, xidx, yidx, zidx, values], targets)
def test_numel():
"""test_numel"""
class Numel(Module):
def forward(self, data):
return torch.tensor(torch.numel(data))
targets = _get_default_vm_targets()
verify_script_model(Numel(), [(1,)], targets)
verify_script_model(Numel(), [(3, 5)], targets)
verify_script_model(Numel(), [(3, 5, 8)], targets)
def test_empty():
"""Test for aten::empty"""
def test_func():
return torch.empty([1, 3, 10, 10])
verify_model_with_input(test_func, [], assert_shape_only=True)
def test_empty_like():
"""Test for aten::empty_like"""
def test_func(data):
return torch.empty_like(data)
verify_model_with_input(test_func, [torch.rand([1, 3, 10, 10]).float()], assert_shape_only=True)
@tvm.testing.uses_gpu
def test_new_empty():
"""test_forward_new_ones"""
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
def test_func(input_tensor):
return input_tensor.new_empty([3, 10, 10])
verify_model_with_input(test_func, [torch.rand(input_shape).float()], assert_shape_only=True)
def test_func1(input_tensor):
return input_tensor.new_empty([3, 10, 10], dtype=torch.int32)
verify_model_with_input(test_func1, [torch.rand(input_shape).float()], assert_shape_only=True)
def test_randn():
"""Test for aten::randn"""
def test_func():
return torch.randn([1, 3, 10, 10])
verify_model_with_input(test_func, [], assert_shape_only=True, validate_structural_equal=False)
def test_func1():
return torch.randn(1, 3, 10, 10)
verify_model_with_input(test_func1, [], assert_shape_only=True, validate_structural_equal=False)
def test_forward_pretrained_bert_base_uncased():
######################################################################
# This is an example how to run BERT models using TVM
# ---------------------------------------------------
"""
Refer the bert example given in https://pypi.org/project/pytorch-pretrained-bert
# To get started, pretrained bert package needs to be installed as prerequisite.
.. code-block:: bash
# install bert package
pip install pytorch_pretrained_bert==0.6.2 --user
"""
# pylint: disable=import-outside-toplevel
try:
from pytorch_pretrained_bert import BertForMaskedLM, BertTokenizer
except ImportError:
print("Torch pretrained bert package must be installed to run this script.")
return
######################################################################
# Load the tokenizer and tokenize the input
# -----------------------------------------
# Load pre-trained model tokenizer (vocabulary)
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
# Tokenized input
text = "[CLS] Who was Jim Henson ? [SEP] Jim Henson was a puppeteer [SEP]"
tokenized_text = tokenizer.tokenize(text)
# Mask a token that we will try to predict back with `BertForMaskedLM`
masked_index = 8
tokenized_text[masked_index] = "[MASK]"
assert tokenized_text == [
"[CLS]",
"who",
"was",
"jim",
"henson",
"?",
"[SEP]",
"jim",
"[MASK]",
"was",
"a",
"puppet",
"##eer",
"[SEP]",
]
# Convert token to vocabulary indices
indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text)
# Define sentence A and B indices associated to 1st and 2nd sentences (see paper)
segments_ids = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]
# Convert inputs to PyTorch tensors
tokens_tensor = torch.tensor([indexed_tokens])
segments_tensors = torch.tensor([segments_ids])
######################################################################
# Load a pretrained PyTorch model bert-base-uncased
# -------------------------------------------------
# Bert Model with a language modeling
model = BertForMaskedLM.from_pretrained("bert-base-uncased")
model.eval()
######################################################################
# Predict all tokens with pytorch
# -------------------------------
with torch.no_grad():
torch_preds = model(tokens_tensor, segments_tensors)
######################################################################
# Make TorchScripted model via jit trace
# --------------------------------------
scripted_model = torch.jit.trace(model, (tokens_tensor, segments_tensors)).eval()
######################################################################
# Import the graph to Relay
# -------------------------
# Convert PyTorch graph to Relay graph. The input name can be arbitrary.
input_1 = "input_ids"
input_2 = "input.2"
shape_list = [(input_1, list(tokens_tensor.shape)), (input_2, list(segments_tensors.shape))]
mod, params = relay.frontend.from_pytorch(scripted_model, shape_list)
######################################################################
# Compile the model with relay
# ----------------------------
target = "llvm"
with tvm.transform.PassContext(opt_level=3):
relay_graph, relay_lib, relay_params = relay.build(mod, target=target, params=params)
######################################################################
# Execute on TVM
# --------------
dev = tvm.device(target, 0)
relay_model = graph_executor.create(relay_graph, relay_lib, dev)
relay_model.set_input(**relay_params)
relay_model.set_input(input_1, tokens_tensor)
relay_model.set_input(input_2, segments_tensors)
relay_model.run()
compiled_output = relay_model.get_output(0).numpy()
######################################################################
# Validate the outputs
# --------------------
# Compare the torch and tvm outputs
tvm.testing.assert_allclose(torch_preds, compiled_output, rtol=1e-3, atol=1e-3)
######################################################################
# Process the output
# ------------------
# Process the model output to token.
# Torch output to token
torch_pred_idx = torch.argmax(torch_preds[0, masked_index]).item()
torch_pred_token = tokenizer.convert_ids_to_tokens([torch_pred_idx])[0]
# TVM output to token
tvm_pred_idx = compiled_output[0, masked_index].argmax()
tvm_pred_token = tokenizer.convert_ids_to_tokens([tvm_pred_idx])[0]
assert torch_pred_idx == tvm_pred_idx
assert torch_pred_token == tvm_pred_token
# Print the outputs
print(f"Torch top-1 id: {torch_pred_idx}, token: {torch_pred_idx}")
print(f"TVM top-1 id: {tvm_pred_idx}, token: {tvm_pred_token}")
@pytest.mark.skipif(
platform.machine() == "aarch64",
reason="Currently failing on AArch64",
)
def test_convert_torch_script_with_input_types():
"""test_convert_torch_script_with_input_types"""
def model_fn(x, y):
x = x.to(dtype=torch.int32)
y = x + y
return y
ishape = (4, 5)
input_x = torch.rand(ishape, dtype=torch.float32)
input_y = torch.randint(low=0, high=100, size=ishape, dtype=torch.int32)
inputs = [input_x, input_y]
verify_model(model_fn, input_data=inputs)
def test_bincount():
"""test_bincount"""
def test_fn(x, weights=None):
return torch.bincount(x, weights=weights)
inp = torch.randint(0, 100, (10000,), dtype=torch.int64)
weights = torch.linspace(0, 100, steps=10000)
targets = ["llvm", "cuda"]
verify_trace_model(test_fn, [inp], targets)
verify_trace_model(test_fn, [inp, weights], targets)
def test_hard_swish():
"""test_hard_swish"""
examples = [torch.rand(8).float(), torch.rand(8, 10).float(), torch.rand(1, 1, 10).float()]
for input_data in examples:
verify_model(torch.nn.Hardswish().eval(), input_data=input_data)
verify_model(torch.nn.Hardswish(inplace=True).eval(), input_data=input_data)
def test_hard_sigmoid():
"""test_hard_sigmoid"""
examples = [torch.rand(8).float(), torch.rand(8, 10).float(), torch.rand(1, 1, 10).float()]
for input_data in examples:
verify_model(torch.nn.Hardsigmoid().eval(), input_data=input_data)
verify_model(torch.nn.Hardsigmoid(inplace=True).eval(), input_data=input_data)
def test_cumsum():
"""test_cumsum"""
def test_fn(dim, dtype=None):
return lambda x: torch.cumsum(x, dim=dim, dtype=dtype)
inp = torch.randint(0, 100, (10000,), dtype=torch.int32)
verify_model(test_fn(0), [inp])
verify_model(test_fn(0), [inp.to(torch.int64)])
verify_model(test_fn(0, dtype=torch.int64), [inp.to(torch.int64)])
inp = torch.randn((100, 100), dtype=torch.float32)
verify_model(test_fn(dim=0, dtype=torch.float64), [inp])
verify_model(test_fn(dim=1), [inp])
inp = torch.randn((100, 100), dtype=torch.float32) > 0.5
verify_model(test_fn(dim=0, dtype=torch.int32), [inp])
def test_masked_fill():
"""test_transformer"""
def test_fn(x, mask):
return torch.masked_fill(x, mask, 0.0)
inp = torch.randn(100, 100)
verify_model(test_fn, [inp, inp > 0.5])
verify_model(test_fn, [inp.to(torch.float64), inp > 0.5])
@pytest.mark.skip(reason="unsupported op: 'aten::scaled_dot_product_attention', 'aten::unflatten'")
def test_transformer():
"""test_transformer"""
model = torch.nn.Transformer(d_model=256, nhead=8, num_encoder_layers=6, num_decoder_layers=6)
model = model.eval()
src = torch.rand((10, 32, 256))
tgt = torch.rand((20, 32, 256))
verify_model(model.eval(), input_data=[src, tgt])
def test_argsort():
"""test_argsort"""
def test_fn(dim, descending):
return lambda x: torch.argsort(x, dim=dim, descending=descending)
inp = torch.randn(100)
verify_model(test_fn(0, True), [inp])
verify_model(test_fn(0, False), [inp])
inp = torch.randn(100, 100)
verify_model(test_fn(0, True), [inp])
verify_model(test_fn(0, False), [inp])
verify_model(test_fn(1, True), [inp])
verify_model(test_fn(1, False), [inp])
def test_sort():
"""test_sort"""
def test_fn(dim, descending):
return lambda x: torch.sort(x, dim=dim, descending=descending)
inp = torch.randn(100)
verify_model(test_fn(0, True), [inp])
verify_model(test_fn(-1, False), [inp])
inp = torch.randn(100, 100)
verify_model(test_fn(0, True), [inp])
verify_model(test_fn(-2, False), [inp])
verify_model(test_fn(1, True), [inp])
verify_model(test_fn(-1, False), [inp])
def test_logical_and():
"""test_logical_and"""
def test_fn(x, y):
return torch.logical_and(x, y)
a = torch.tensor([0, 1, 10, 0], dtype=torch.int8)
b = torch.tensor([4, 0, 1, 0], dtype=torch.int8)
verify_model(test_fn, [a, b])
a = torch.tensor([True, False, True])
b = torch.tensor([True, False, False])
verify_model(test_fn, [a, b])
def test_masked_select():
"""test_masked_select"""
def test_fn(x, mask):
return torch.masked_select(x, mask)
for shape in [(10,), (3, 4), (16, 32, 64)]:
x = torch.randn(*shape)
mask = x.ge(0.5)
verify_trace_model(test_fn, [x, mask], ["llvm", "cuda"])
def test_unique():
"""test_unique"""
def test_fn(is_sorted, return_inverse, return_counts):
return lambda x: torch.unique(x, is_sorted, return_inverse, return_counts)
in_data = torch.randint(0, 20, (10,), dtype=torch.int32)
targets = ["llvm", "cuda"]
verify_trace_model(test_fn(True, True, True), [in_data], targets)
verify_trace_model(test_fn(True, False, True), [in_data], targets)
verify_trace_model(test_fn(True, True, False), [in_data], targets)
verify_trace_model(test_fn(True, False, True), [in_data], targets)
in_data = torch.randint(0, 20, (20,), dtype=torch.int64)
verify_trace_model(test_fn(True, True, True), [in_data], targets)
verify_trace_model(test_fn(True, False, True), [in_data], targets)
verify_trace_model(test_fn(True, True, False), [in_data], targets)
verify_trace_model(test_fn(True, False, True), [in_data], targets)
def test_forward_nll_loss():
"""test_forward_nll_loss"""
torch.set_grad_enabled(False)
N, C = 10, 3
predictions = torch.rand((N, C)).float()
targets = torch.randint(0, 3, (N,))
weights = torch.tensor([1, 2, 3]).float()
verify_model(torch.nn.NLLLoss().eval(), input_data=[predictions, targets])
verify_model(torch.nn.NLLLoss(weight=weights).eval(), input_data=[predictions, targets])
verify_model(torch.nn.NLLLoss(ignore_index=1).eval(), input_data=[predictions, targets])
verify_model(torch.nn.NLLLoss(reduction="sum").eval(), input_data=[predictions, targets])
verify_model(torch.nn.NLLLoss(reduction="none").eval(), input_data=[predictions, targets])
# multidimension nll loss (aten::nll_loss2d)
d1, d2 = 2, 3
predictions = torch.rand((N, C, d1, d2)).float()
targets = torch.randint(0, 3, (N, d1, d2))
verify_model(torch.nn.NLLLoss().eval(), input_data=[predictions, targets])
verify_model(torch.nn.NLLLoss(weight=weights).eval(), input_data=[predictions, targets])
verify_model(torch.nn.NLLLoss(ignore_index=1).eval(), input_data=[predictions, targets])
verify_model(torch.nn.NLLLoss(reduction="sum").eval(), input_data=[predictions, targets])
verify_model(torch.nn.NLLLoss(reduction="none").eval(), input_data=[predictions, targets])
def test_cross_entropy_loss():
"""test_cross_entropy_loss"""
torch.set_grad_enabled(False)
N, C = 10, 3
# class indices
predictions = torch.rand((N, C)).float()
targets = torch.randint(0, 3, (N,))
weights = torch.tensor([1, 2, 3]).float()
verify_model(torch.nn.CrossEntropyLoss().eval(), input_data=[predictions, targets])
verify_model(
torch.nn.CrossEntropyLoss(weight=weights).eval(), input_data=[predictions, targets]
)
# class probabilities
predictions = torch.randn(N, C).float()
targets = torch.randn(N, C)
verify_model(torch.nn.CrossEntropyLoss().eval(), input_data=[predictions, targets])
def test_forward_l1_loss():
"""test_forward_l1_loss"""
torch.set_grad_enabled(False)
N, C = 10, 3
predictions = torch.rand((N, C)).float()
targets = torch.rand((N, C)).float()
verify_model(torch.nn.L1Loss().eval(), input_data=[predictions, targets])
verify_model(torch.nn.L1Loss(reduction="sum").eval(), input_data=[predictions, targets])
verify_model(torch.nn.L1Loss(reduction="none").eval(), input_data=[predictions, targets])
# multidimension l1 loss
d1, d2 = 2, 3
predictions = torch.rand((N, C, d1, d2)).float()
targets = torch.rand((N, C, d1, d2)).float()
verify_model(torch.nn.L1Loss().eval(), input_data=[predictions, targets])
verify_model(torch.nn.L1Loss(reduction="sum").eval(), input_data=[predictions, targets])
verify_model(torch.nn.L1Loss(reduction="none").eval(), input_data=[predictions, targets])
def test_forward_mse_loss():
"""test_forward_mse_loss"""
torch.set_grad_enabled(False)
N, C = 10, 3
predictions = torch.rand((N, C)).float()
targets = torch.rand((N, C)).float()
verify_model(torch.nn.MSELoss().eval(), input_data=[predictions, targets])
verify_model(torch.nn.MSELoss(reduction="sum").eval(), input_data=[predictions, targets])
verify_model(torch.nn.MSELoss(reduction="none").eval(), input_data=[predictions, targets])
# multidimension mse loss
d1, d2 = 2, 3
predictions = torch.rand((N, C, d1, d2)).float()
targets = torch.rand((N, C, d1, d2)).float()
verify_model(torch.nn.MSELoss().eval(), input_data=[predictions, targets])
verify_model(torch.nn.MSELoss(reduction="sum").eval(), input_data=[predictions, targets])
verify_model(torch.nn.MSELoss(reduction="none").eval(), input_data=[predictions, targets])
@tvm.testing.uses_gpu
def test_forward_flip():
"""Test for aten::flip"""
torch.set_grad_enabled(False)
class Flip(Module):
def __init__(self, axis=0):
super().__init__()
self.axis = axis
def forward(self, x):
return x.flip([self.axis])
input_t = torch.randn(2, 3, 4)
verify_model(Flip(axis=0), input_data=input_t)
verify_model(Flip(axis=1), input_data=input_t)
verify_model(Flip(axis=2), input_data=input_t)
verify_model(Flip(axis=-1), input_data=input_t)
def test_annotate_span():
"""test_annotate_span"""
model = torchvision.models.resnet18().eval()
inp = torch.randn([1, 3, 224, 224])
trace = torch.jit.trace(model, inp).eval()
mod, _ = relay.frontend.from_pytorch(
trace, [("input", inp.shape)], use_parser_friendly_name=True
)
relay.transform.AnnotateSpans()(mod)
@tvm.testing.uses_gpu
def test_all_any():
"""test_all_any"""
def test_fn(f, dim=None, keepdim=False):
return lambda x: f(x, dim=dim, keepdim=keepdim)
def test_fn_no_arg(f):
return lambda x: f(x) # pylint: disable=unnecessary-lambda
for f in [torch.all, torch.any]:
verify_model(test_fn(f, 0), [torch.rand(1, 2).bool()])
verify_model(test_fn(f, 0), [torch.arange(0, 3).to(torch.uint8)])
verify_model(test_fn(f, 1), [torch.rand(4, 2).bool()])
verify_model(test_fn(f, 0, keepdim=True), [torch.rand(4, 2).bool()])
verify_model(test_fn_no_arg(f), [torch.rand(1, 2).bool()])
verify_model(test_fn_no_arg(f), [torch.arange(0, 3).to(torch.uint8)])
@tvm.testing.uses_gpu
def test_searchsorted():
"""test_searchsorted"""
def test_fn(out_int32=False, right=False):
return lambda x, y: torch.searchsorted(x, y, out_int32=out_int32, right=right)
sorted_sequence = torch.tensor([[1, 3, 5, 7, 9], [2, 4, 6, 8, 10]])
values = torch.tensor([[3, 6, 9], [3, 6, 9]])
verify_model(test_fn(), [sorted_sequence, values])
verify_model(test_fn(out_int32=True), [sorted_sequence[0], values[0]])
verify_model(test_fn(right=True), [sorted_sequence, values])
sorted_sequence_1d = torch.tensor([1, 3, 5, 7, 9])
values = torch.tensor([[3, 6, 9], [4, 2, 7]])
verify_model(test_fn(), [sorted_sequence_1d, values])
verify_model(test_fn(), [sorted_sequence_1d, torch.tensor(6)])
@tvm.testing.uses_gpu
def test_bucketize():
"""test_bucketize"""
def test_fn(out_int32=False, right=False):
return lambda x, y: torch.bucketize(x, y, out_int32=out_int32, right=right)
boundaries = torch.tensor([1, 3, 5, 7, 9])
values = torch.tensor([3, 6, 9])
verify_model(test_fn(), [values, boundaries])
verify_model(test_fn(out_int32=True, right=True), [values, boundaries])
@tvm.testing.uses_gpu
def test_roll():
"""Test for aten::roll"""
def test_fn(shifts, dims):
return lambda x: torch.roll(x, shifts, dims)
x = torch.tensor([1, 2, 3, 4, 5, 6, 7, 8]).view(4, 2)
verify_model(test_fn(1, 0), [x])
verify_model(test_fn(-1, 0), [x])
verify_model(test_fn(shifts=(2, 1), dims=(0, 1)), [x])
@tvm.testing.uses_gpu
def test_einsum():
"""test_einsum"""
def test_fn(equation):
return lambda *x: torch.einsum(equation, *x)
x = torch.ones([2, 3])
y = torch.ones([3, 4])
z = torch.ones([4, 5])
verify_model(test_fn("ij,jk"), [x, y])
verify_model(test_fn("ij,jk,km->im"), [x, y, z])
def test_stft():
"""test_stft"""
def test_fn(n_fft, hop_length, win_length, center, pad_mode, normalized, onesided):
return lambda input, window=None: torch.stft(
input=input,
n_fft=n_fft,
hop_length=hop_length,
win_length=win_length,
window=window,
center=center,
pad_mode=pad_mode,
normalized=normalized,
onesided=onesided,
return_complex=False,
)
input_t = torch.rand([1, 12]).float()
window = torch.tensor([2, 3, 4], dtype=torch.int32)
targets = ["llvm", "cuda"]
verify_trace_model(test_fn(3, 3, 3, False, "constant", False, True), [input_t, window], targets)
verify_trace_model(test_fn(3, 3, 3, True, "constant", False, True), [input_t, window], targets)
verify_trace_model(test_fn(3, 3, 3, False, "reflect", False, True), [input_t, window], targets)
verify_trace_model(test_fn(3, 3, 3, True, "reflect", False, True), [input_t, window], targets)
verify_trace_model(test_fn(3, 3, 3, True, "reflect", True, True), [input_t, window], targets)
verify_trace_model(test_fn(3, 3, 3, True, "reflect", False, False), [input_t, window], targets)
input_t = torch.rand([2, 12]).float()
window = torch.tensor([2, 3, 4], dtype=torch.int32)
verify_trace_model(test_fn(3, 3, 3, False, "reflect", False, True), [input_t, window], targets)
window = torch.tensor([1, 3], dtype=torch.int32)
verify_trace_model(test_fn(2, 1, 2, False, "reflect", False, True), [input_t, window], targets)
verify_trace_model(test_fn(2, 1, 2, False, "reflect", False, True), [input_t], targets)
@tvm.testing.uses_gpu
def test_dot():
"""Test for aten::dot"""
def test_fn(x):
return x.dot(x)
x = torch.randn([4])
verify_model(test_fn, [x])
@tvm.testing.uses_gpu
def test_mv():
"""Test for aten::mv"""
def test_fn(m, v):
return m.mv(v)
verify_model(test_fn, [torch.randn(4, 4), torch.randn(4)])
verify_model(test_fn, [torch.randn(2, 2), torch.randn(2)])
verify_model(test_fn, [torch.randn(3, 8), torch.randn(8)])
def test_grid_sample():
"""test_grid_sample"""
class Grid_sample(Module):
def __init__(self, method, padding_mode, align_corners):
super().__init__()
self._method = method
self._padding_mode = padding_mode
self._align_corners = align_corners
def forward(self, x, y):
return torch.nn.functional.grid_sample(
input=x,
grid=y,
mode=self._method,
padding_mode=self._padding_mode,
align_corners=self._align_corners,
)
methods = ["nearest", "bilinear", "bicubic"]
padding_modes = ["zeros", "border", "reflection"]
align_corners = [True, False]
data_2D = torch.rand([4, 4, 8, 8]).float()
grid_2D = torch.rand([4, 16, 16, 2]).float()
# choosing smaller sizes to be testable on weaker GPUs
data_3D = torch.rand([4, 4, 4, 4, 4]).float()
grid_3D = torch.rand([4, 8, 8, 8, 3]).float()
for _method in methods:
# bicubic was introduced when pytorch > 1.7.1
torch_version = package_version.parse(torch.__version__)
if _method == "bicubic" and torch_version <= package_version.parse("1.7.1"):
continue
for _padding in padding_modes:
for _align in align_corners:
# ATTENTION:
# "nearest" + "reflection" result may be different with pytorch on cpu device,
# because pytorch's cpu result is different with gpu result,
# and gpu result used here as baseline in tvm topi.image.grid_sample.
model = Grid_sample(_method, _padding, _align)
verify_model(model, input_data=[data_2D, grid_2D])
# 3D "bicubic"(tricubic) is not supported in pytorch
if _method != "bicubic":
verify_model(model, input_data=[data_3D, grid_3D])
def test_list_tuple():
"""test compilation error for a Python list followed by a prim::TupleConstruct."""
class List_tuple(Module):
"""List_tuple"""
def forward(self, x):
"""forward"""
merged = []
mask_list = []
for i in range(3):
w0 = torch.sigmoid(x)
merged.append((w0, w0))
mask_list.append(x)
for i in range(3):
merged[i] = merged[i][0] + merged[i][1]
return mask_list[2], merged
x = torch.rand([4, 4, 16, 32]).float()
script_module = torch.jit.trace(List_tuple(), x, strict=False).eval()
relay.frontend.from_pytorch(script_module, [("x", x.shape)])
# pylint: disable=unnecessary-dunder-call
@tvm.testing.uses_gpu
def test_binary_bitwise():
"""Test for binary bitwise"""
def test_ior(x, y):
return x.__ior__(y)
def test_iand(x, y):
return x.__iand__(y)
def test_ixor(x, y):
return x.__ixor__(y)
x = torch.tensor([7, 49, 16, 1, 2, 3], dtype=torch.uint8)
y = torch.tensor([39, 128, 99, 228, 63, 17], dtype=torch.uint8)
for test_fn in [test_ior, test_iand, test_ixor]:
verify_model(test_fn, [x, y])
@tvm.testing.uses_gpu
def test_shift():
"""Test for aten::__lshift__, aten::__rshift__"""
def test_lshift(x, y):
return x << y
def test_rshift(x, y):
return x >> y
x = torch.tensor([39, 128, 99, 228, 63, 17], dtype=torch.int32)
y = torch.tensor([3, 2, 7, 4, 5, 9], dtype=torch.int32)
for test_fn in [test_lshift, test_rshift]:
verify_model(test_fn, [x, y])
@tvm.testing.uses_gpu
def test_mod():
"""Test for aten::fmod"""
def test_fmod(x, y):
return torch.fmod(x, y)
def test_remainder(x, y):
return torch.remainder(x, y)
for test_fn in [test_fmod, test_remainder]:
verify_model(test_fn, [torch.tensor([-3.0, -2, -1, 1, 2, 3]), torch.tensor(2)])
verify_model(test_fn, [torch.tensor([1, 2, 3, 4, 5]), torch.tensor(-1.5)])
def test_softmax_fuse():
"""test_softmax_fuse"""
# https://github.com/apache/tvm/issues/12001
class Model(torch.nn.Module):
"""Pytorch model module"""
def __init__(self, nchwc_post_op=False) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(3, 3, (1, 1), 1)
self.nchwc_post_op = nchwc_post_op
@torch.no_grad()
def forward(self, x):
"""forward"""
t0a = self.conv(x)
t0b = torch.floor(x)
t2b = torch.softmax(t0a, dim=2)
if self.nchwc_post_op:
t3a = t0a - t0b
t4a = t2b - t0b
t6a = t3a + t4a
return t6a
return t2b + 1
sh = [3, 3, 10, 1]
inp = torch.ones(*sh, dtype=torch.float32)
for model in [Model(nchwc_post_op=False).eval(), Model(nchwc_post_op=True).eval()]:
output_torch = model(inp).numpy()
mod, params = relay.frontend.from_pytorch(torch.jit.trace(model, inp), [("inp0", sh)])
with tvm.transform.PassContext(opt_level=4):
out = (
relay.create_executor("graph", mod, params=params)
.evaluate()(inp0=inp.numpy())
.numpy()
)
tvm.testing.assert_allclose(out, output_torch, rtol=1e-5, atol=1e-5)
@tvm.testing.uses_gpu
def test_lerp():
"""test_lerp"""
def test_fn(x, y, w):
return torch.lerp(x, y, w)
input_shape = [16]
x = torch.rand(input_shape).float()
y = torch.rand(input_shape).float()
w = torch.rand(input_shape).float()
# weight can be tensor or scalar
verify_model(test_fn, [x, y, w])
verify_model(test_fn, [x, y, w[0]])
def test_trilu():
def _test_trilu(op, diagonal):
return lambda inp: op(inp, diagonal)
for op in [torch.triu, torch.tril]:
verify_model(_test_trilu(op, 0), [torch.rand(size=[3, 3])])
verify_model(_test_trilu(op, 1), [torch.rand(size=[6, 6])])
verify_model(_test_trilu(op, -2), [torch.rand(size=[6, 6])])
def test_multinomial():
"""test_multinomial"""
def _test_multinomial(num_samples):
return lambda inp: torch.multinomial(inp, num_samples=num_samples, replacement=True)
# Dont check output since it's random. Instead we'll just make sure shapes are right.
verify_model(
_test_multinomial(2),
[torch.rand(size=[3]).float()],
cpu_only=True,
check_correctness=False,
validate_structural_equal=False,
)
verify_model(
_test_multinomial(1),
[torch.rand(size=[4, 5]).float()],
cpu_only=True,
check_correctness=False,
validate_structural_equal=False,
)
def test_weight_norm():
"""Test for atten::_weight_norm"""
in_channels = 32
out_channels = 64
input_data_conv = torch.rand((1, in_channels, 32, 32)).float()
conv_wn = torch.nn.utils.weight_norm(torch.nn.Conv2d(in_channels, out_channels, kernel_size=3))
verify_model(conv_wn.eval().float(), input_data_conv)
conv_wn_groups = torch.nn.utils.weight_norm(
torch.nn.Conv2d(in_channels, out_channels, kernel_size=3, groups=2)
)
verify_model(conv_wn_groups.eval().float(), input_data_conv)
conv_wn = torch.nn.utils.weight_norm(
torch.nn.Conv2d(in_channels, out_channels, kernel_size=3), dim=1
)
verify_model(conv_wn.eval().float(), input_data_conv)
linear_wn = torch.nn.utils.weight_norm(torch.nn.Linear(in_channels, out_channels))
input_data_linear = torch.rand((128, in_channels)).float()
verify_model(linear_wn.eval().float(), input_data_linear)
@tvm.testing.uses_gpu
def test_baddbmm():
def test_fn(alpha, beta):
return lambda inp, batch1, batch2: torch.baddbmm(
inp, batch1, batch2, beta=beta, alpha=alpha
)
M = torch.randn(10, 3, 5)
batch1 = torch.randn(10, 3, 4)
batch2 = torch.randn(10, 4, 5)
verify_model(test_fn(0.5, 1.0), [M, batch1, batch2])
def test_exporting_renamed_c_graph():
"""test exproting model when export_renamed_model is set"""
# model definition
class Conv2D(Module):
def __init__(self):
super(Conv2D, self).__init__()
self.conv = torch.nn.Conv2d(3, 6, 3, bias=True)
def forward(self, *args):
return self.conv(args[0])
input_name, input_shape = "input", [1, 3, 10, 10]
shape_list = [(input_name, input_shape)]
temp_dir = utils.tempdir().path
script_module = torch.jit.trace(Conv2D(), [torch.rand(input_shape)])
_, _ = relay.frontend.from_pytorch(
script_module, shape_list, export_renamed_c_graph_path=temp_dir
)
exported_c_graph_name = os.listdir(temp_dir)[0]
assert "tvm_exported_c_graph_" in exported_c_graph_name
# make sure the renamed output variable presents in the restored _C.Graph
with open(f"{temp_dir}/{exported_c_graph_name}", "r") as f:
graph = f.read()
assert "%aten::_convolution_0" in graph
class TestSetSpan:
"""test structural equal between translated / hand-crafted relay IR with span tagged."""
def _verify(self, res_fptr, golden_fptr):
with tvm.testing.enable_span_filling():
with_span = res_fptr()
with tvm.testing.disable_span_filling():
without_span = res_fptr()
assert tvm.ir.structural_equal(with_span, without_span)
_verify_structural_equal_with_span(with_span, golden_fptr())
def test_conv2d_bias_add(self):
ker_sz, in_chs, out_chs = 7, 3, 6
input_shape = [1, 3, 10, 10]
def _res():
# model definition
class Conv2D(Module):
def __init__(self):
super(Conv2D, self).__init__()
self.conv = torch.nn.Conv2d(in_chs, out_chs, ker_sz, bias=True)
def forward(self, *args):
return self.conv(args[0])
# get frontend model
mod = gen_ir_module(Conv2D(), [torch.rand(input_shape)])
return mod["main"]
def _golden():
conv_si = "aten::_convolution_0"
input_name = "input0"
input_0 = relay.var(
input_name,
shape=tuple(input_shape),
span=_create_span(f"{conv_si}.{input_name}"),
)
weight_name = f"{conv_si}.weight"
conv_weight = relay.var(
weight_name,
shape=(out_chs, in_chs, ker_sz, ker_sz),
span=_create_span(weight_name),
)
bias_name = f"{conv_si}.bias"
conv_bias = relay.var(
bias_name,
shape=(out_chs,),
span=_create_span(bias_name),
)
conv_out = _set_span(
relay.nn.conv2d(
input_0,
conv_weight,
padding=[0] * 4,
channels=out_chs,
kernel_size=[ker_sz] * 2,
),
conv_si,
)
bias_out = _set_span(relay.nn.bias_add(conv_out, conv_bias), conv_si)
return relay.Function([input_0, conv_weight, conv_bias], bias_out)
self._verify(_res, _golden)
def test_batchnorm_span(self):
features = 16
input_shape = [1, 16, 10, 10]
def _res():
# model definition
bn_2d = torch.nn.BatchNorm2d(features)
# get frontend model
mod = gen_ir_module(bn_2d, [torch.rand(input_shape)])
return mod["main"]
def _golden():
bn_si = "aten::batch_norm_0"
input_name = "input0"
input_0 = relay.var(
input_name,
shape=tuple(input_shape),
span=_create_span(f"{bn_si}.{input_name}"),
)
weight_name = f"{bn_si}.weight"
bn_weight = relay.var(
weight_name,
shape=(features,),
span=_create_span(weight_name),
)
bias_name = f"{bn_si}.bias"
bn_bias = relay.var(
bias_name,
shape=(features,),
span=_create_span(bias_name),
)
rm_name = f"{bn_si}.running_mean"
bn_rm = relay.var(
rm_name,
shape=(features,),
span=_create_span(rm_name),
)
rv_name = f"{bn_si}.running_var"
bn_rv = relay.var(
rv_name,
shape=(features,),
span=_create_span(rv_name),
)
bn_out = _set_span(
relay.nn.batch_norm(input_0, bn_weight, bn_bias, bn_rm, bn_rv),
bn_si,
)
bn_tuple_get_item = _set_span(relay.TupleGetItem(bn_out.tuple_value, 0), bn_si)
return relay.Function([input_0, bn_weight, bn_bias, bn_rm, bn_rv], bn_tuple_get_item)
self._verify(_res, _golden)
def test_reshape_span(self):
input_shape = [2, 1, 10, 1, 10]
new_shape = [2, 1, 10, 10]
def _res():
# model definition
class Reshape(Module):
def forward(self, *args):
return args[0].reshape(new_shape)
# get frontend model
mod = gen_ir_module(Reshape(), [torch.rand(input_shape)])
return mod["main"]
def _golden():
reshape_si = "aten::reshape_0"
input_name = "input0"
input_0 = relay.var(
input_name,
shape=tuple(input_shape),
span=_create_span(f"{reshape_si}.{input_name}"),
)
reshape_out = _set_span(
relay.reshape(input_0, newshape=new_shape),
reshape_si,
)
return relay.Function([input_0], reshape_out)
self._verify(_res, _golden)
def test_dense_bias_add(self):
in_f, out_f = 10, 7
input_shape = [in_f, in_f]
def _res():
# model definition
class Dense(Module):
def __init__(self):
super(Dense, self).__init__()
self.linear = torch.nn.Linear(in_f, out_f, bias=True)
def forward(self, *args):
return self.linear(args[0])
# get frontend model
mod = gen_ir_module(Dense(), [torch.rand(input_shape)])
return mod["main"]
def _golden():
dense_si = "aten::linear_0"
input_name = "input0"
input_0 = relay.var(
input_name,
shape=tuple(input_shape),
span=_create_span(f"{dense_si}.{input_name}"),
)
weight_name = f"{dense_si}.weight"
dense_weight = relay.var(
weight_name,
shape=(out_f, in_f),
span=_create_span(weight_name),
)
bias_name = f"{dense_si}.bias"
dense_bias = relay.var(
bias_name,
shape=(out_f,),
span=_create_span(bias_name),
)
dense_out = _set_span(
relay.nn.dense(input_0, dense_weight),
dense_si,
)
bias_out = _set_span(
relay.nn.bias_add(dense_out, dense_bias, axis=-1),
dense_si,
)
return relay.Function([input_0, dense_weight, dense_bias], bias_out)
self._verify(_res, _golden)
if __name__ == "__main__":
tvm.testing.main()
| 181,726 | 32.005267 | 108 | py |
tvm | tvm-main/tests/python/frontend/pytorch/qnn_test.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Tests on quantized torch model conversion """
import os
import numpy as np
import torch
import tvm
import tvm.testing
from PIL import Image
from torch import nn
from torch.quantization import (
DeQuantStub,
QuantStub,
QuantWrapper,
fuse_modules,
get_default_qat_qconfig,
prepare_qat,
)
from tvm import relay
from tvm.contrib.download import download_testdata
from tvm.relay.frontend.pytorch_utils import is_version_greater_than
from tvm.relay.op.contrib.register import get_pattern_table, register_pattern_table
def torch_version_check():
from packaging import version
return version.parse(torch.__version__) > version.parse("1.4.0")
def get_tvm_runtime(script_module, input_name, ishape, keep_quantized_weight=False, target="llvm"):
input_shapes = [(input_name, ishape)]
with tvm.testing.disable_span_filling():
mod, params = relay.frontend.from_pytorch(
script_module, input_shapes, keep_quantized_weight=keep_quantized_weight
)
with tvm.testing.enable_span_filling():
mod_with_span, _ = relay.frontend.from_pytorch(
script_module, input_shapes, keep_quantized_weight=keep_quantized_weight
)
assert tvm.ir.structural_equal(mod, mod_with_span, map_free_vars=True)
if keep_quantized_weight:
for p in params.values():
assert p.dtype in ["int8", "int32"]
with tvm.transform.PassContext(opt_level=3):
# test on only cpu for now, torch cannot run quant models on cuda
# also not to make CI too slow
lib = relay.build(mod, target=target, params=params)
runtime = tvm.contrib.graph_executor.GraphModule(lib["default"](tvm.device(target, 0)))
return runtime
def get_qconfig(per_channel):
from torch.quantization.observer import (
MovingAverageMinMaxObserver,
default_weight_observer,
)
if per_channel:
return torch.quantization.get_default_qconfig("fbgemm")
else:
act = MovingAverageMinMaxObserver.with_args(reduce_range=False)
return torch.quantization.QConfig(activation=act, weight=default_weight_observer)
def quantize_model(model, inp, per_channel=False):
model.fuse_model()
model.qconfig = get_qconfig(per_channel)
torch.quantization.prepare(model, inplace=True)
model(inp)
torch.quantization.convert(model, inplace=True)
class ConvBn(nn.Module):
def __init__(self, with_relu=False):
super().__init__()
layers = [nn.Conv2d(3, 32, 3, bias=True), nn.BatchNorm2d(32)]
if with_relu:
layers.append(nn.ReLU())
self.conv = nn.Sequential(*layers)
self.quant_wrap = QuantWrapper(self.conv)
self.with_relu = with_relu
def forward(self, x):
return self.quant_wrap(x)
def fuse_model(self):
indices = ["0", "1"]
if self.with_relu:
indices.append("2")
fuse_modules(self.conv, indices, inplace=True)
class ConvTranspose(nn.Module):
def __init__(self):
super().__init__()
layers = [nn.ConvTranspose2d(3, 32, 3, bias=True)]
self.conv = nn.Sequential(*layers)
self.quant_wrap = QuantWrapper(self.conv)
def forward(self, x):
return self.quant_wrap(x)
def fuse_model(self):
pass
class Linear(nn.Module):
def __init__(self, with_relu=False):
super().__init__()
layers = [nn.Linear(16, 32)]
if with_relu:
layers.append(nn.ReLU())
self.fc = nn.Sequential(*layers)
self.quant_wrap = QuantWrapper(self.fc)
self.with_relu = with_relu
def forward(self, x):
return self.quant_wrap(x)
def fuse_model(self):
if self.with_relu:
fuse_modules(self.fc, ["0", "1"], inplace=True)
class ReLU(nn.Module):
def __init__(self):
super().__init__()
self.relu = QuantWrapper(nn.ReLU())
def forward(self, x):
return self.relu(x)
def fuse_model(self):
pass
class LeakyReLU(nn.Module):
def __init__(self):
super().__init__()
self.leaky_relu = QuantWrapper(nn.LeakyReLU())
def forward(self, x):
return self.leaky_relu(x)
def fuse_model(self):
pass
# Mobilenet V3 related modules
class Hsigmoid(nn.Module):
def __init__(self, add_stub=False):
super().__init__()
self.quant = QuantStub()
self.dequant = DeQuantStub()
self.add_stub = add_stub
self.hsigmoid = nn.Hardsigmoid()
def forward(self, x):
if self.add_stub:
x = self.quant(x)
x = self.hsigmoid(x)
if self.add_stub:
x = self.dequant(x)
return x
def fuse_model(self):
pass
class Hswish(nn.Module):
def __init__(self, add_stub=False):
super().__init__()
self.hswish = QuantWrapper(nn.Hardswish())
def forward(self, x):
return self.hswish(x)
def fuse_model(self):
pass
class SqueezeExcite(nn.Module):
def __init__(self, channel, reduction=4, add_stub=False):
super(SqueezeExcite, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(channel, channel // reduction, bias=False),
nn.ReLU(inplace=True),
nn.Linear(channel // reduction, channel, bias=False),
Hsigmoid(add_stub=False),
)
self.fmul = nn.quantized.FloatFunctional()
self.quant = QuantStub()
self.dequant = DeQuantStub()
self.add_stub = add_stub
def forward(self, x):
b, c, _, _ = x.size()
if self.add_stub:
x = self.quant(x)
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
out = self.fmul.mul(x, y.expand_as(x))
if self.add_stub:
return self.dequant(out)
else:
return out
def fuse_model(self):
fuse_modules(self.fc, ["0", "1"], inplace=True)
# test on quantized::mul_scalar with negative scale
class MulScalarNegative(nn.Module):
def __init__(self):
super().__init__()
self.float_op = nn.quantized.FloatFunctional()
self.quant = QuantStub()
self.dequant = DeQuantStub()
def forward(self, x):
x = self.quant(x)
mul = self.float_op.mul_scalar(x, -0.3)
return self.dequant(mul)
def fuse_model(self):
pass
class UpsamplingBilinear(nn.Module):
def __init__(self):
super().__init__()
self.quant = QuantStub()
self.dequant = DeQuantStub()
def forward(self, x):
x = self.quant(x)
upsample = nn.functional.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True)
return self.dequant(upsample)
def fuse_model(self):
pass
class AvgPool2d(nn.Module):
def __init__(self):
super().__init__()
self.pool = QuantWrapper(nn.AvgPool2d(kernel_size=2))
def forward(self, x):
return self.pool(x)
def fuse_model(self):
pass
class AdaptiveAvgPool2d(nn.Module):
def __init__(self):
super().__init__()
self.pool = QuantWrapper(nn.AdaptiveAvgPool2d((1, 1)))
def forward(self, x):
return self.pool(x)
def fuse_model(self):
pass
def test_quantized_modules():
imagenet_ishape = (1, 3, 224, 224)
qmodules = [
("relu", imagenet_ishape, ReLU(), False),
("upsample bilinear", (1, 3, 64, 64), UpsamplingBilinear(), False),
("avgpool", imagenet_ishape, AvgPool2d(), False),
]
for per_channel in [False, True]:
if per_channel:
postfix = ", per_channel"
else:
postfix = ""
qmodules += [
("conv_bn" + postfix, imagenet_ishape, ConvBn(), per_channel),
("conv_bn_relu" + postfix, imagenet_ishape, ConvBn(with_relu=True), per_channel),
("linear" + postfix, (16, 16), Linear(), per_channel),
("linear_relu" + postfix, (16, 16), Linear(with_relu=True), per_channel),
("conv_transpose", imagenet_ishape, ConvTranspose(), False),
("hsigmoid", imagenet_ishape, Hsigmoid(add_stub=True), False),
("hswish", imagenet_ishape, Hswish(), False),
("semodule", (1, 16, 64, 64), SqueezeExcite(16, add_stub=True), False),
("semodule, per_channel", (1, 16, 64, 64), SqueezeExcite(16, add_stub=True), True),
("mul_scalar negative", imagenet_ishape, MulScalarNegative(), False),
("leaky_relu", imagenet_ishape, LeakyReLU(), False),
]
for (module_name, ishape, raw_module, per_channel) in qmodules:
raw_module.eval()
inp = torch.rand(ishape)
# quantized conv_transpose2d is supported only with qnnpack engine before torch v1.8.0.
if module_name == "conv_transpose" and not is_version_greater_than("1.7.1"):
prev_engine = torch.backends.quantized.engine
torch.backends.quantized.engine = "qnnpack"
quantize_model(raw_module, inp, per_channel=per_channel)
torch.backends.quantized.engine = prev_engine
else:
quantize_model(raw_module, inp, per_channel=per_channel)
script_module = torch.jit.trace(raw_module, inp).eval()
with torch.no_grad():
pt_result = script_module(inp.clone()).numpy()
input_name = "input"
runtime = get_tvm_runtime(script_module, input_name, ishape)
runtime.set_input(input_name, inp.numpy().copy())
runtime.run()
tvm_result = runtime.get_output(0).numpy()
max_abs_diff = np.max(np.abs(tvm_result - pt_result))
mean_abs_diff = np.mean(np.abs(tvm_result - pt_result))
num_identical = np.sum(tvm_result == pt_result)
match_ratio = num_identical / float(np.prod(tvm_result.shape))
print(module_name, max_abs_diff, mean_abs_diff, match_ratio)
if "linear" in module_name and tvm.get_global_func("tvm.contrib.cublas.matmul", True):
runtime = get_tvm_runtime(script_module, input_name, ishape, target="cuda -libs=cublas")
runtime.set_input(input_name, inp.numpy().copy())
runtime.run()
cublas_result = runtime.get_output(0).numpy()
# It is generally safe to enable this assertion, but disabled for CI
# tvm.testing.assert_allclose(cublas_result, pt_result, atol=1e-5, rtol=1e-5)
print(np.max(np.abs(cublas_result - pt_result)))
# sample outputs
"""
relu 0.0039215684 2.6052087e-08 0.9999933567176871
leaky_relu 0.0 0.0 1.0
upsample bilinear 0.0 0.0 1.0
conv_bn 0.22062653 0.011478779 0.6909348115006899
conv_bn_relu 0.3700896 0.010921672 0.7489366477964451
linear 0.15987062 0.009231662 0.794921875
linear_relu 0.14180502 0.0053220326 0.8828125
conv_transpose 0.0033792555 4.4658788e-07 0.9998678439971806
conv_bn, per_channel 0.01654929 2.9486866e-06 0.9998218235127019
conv_bn_relu, per_channel 0.009089053 1.4926576e-06 0.9998357732732732
linear, per_channel 0.0 0.0 1.0
linear_relu, per_channel 0.0 0.0 1.0
hsigmoid 0.002614379 0.00020525524 0.9214896896258503
hswish 0.0026143193 1.7367661e-08 0.9999933567176871
hswish, per_channel 0.0 0.0 1.0
semodule, per_channel 0.0039885044 0.0008620687 0.7838592529296875
mul_scalar negative 0.0011764616 7.815566e-09 0.9999933567176871
"""
# we cannot make any guarantee on how close the raw output is to torch
# tvm.testing.assert_allclose(tvm_result, pt_result, rtol=1e-1, atol=1e-1)
def test_quantized_imagenet():
def get_transform():
import torchvision.transforms as transforms
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
return transforms.Compose(
[transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize]
)
def get_real_image(im_height, im_width):
repo_base = "https://github.com/dmlc/web-data/raw/main/tensorflow/models/InceptionV1/"
img_name = "elephant-299.jpg"
image_url = os.path.join(repo_base, img_name)
img_path = download_testdata(image_url, img_name, module="data")
return Image.open(img_path).resize((im_height, im_width))
def get_imagenet_input():
im = get_real_image(224, 224)
preprocess = get_transform()
pt_tensor = preprocess(im)
return np.expand_dims(pt_tensor.numpy(), 0)
from torchvision.models.quantization import googlenet as qgooglenet
from torchvision.models.quantization import inception as qinception
from torchvision.models.quantization import mobilenet as qmobilenet
from torchvision.models.quantization import (
mobilenet_v3_large as qmobilenet_v3_large,
)
from torchvision.models.quantization import resnet as qresnet
per_channel = True
qmodels = [
("resnet18", qresnet.resnet18(pretrained=True), per_channel),
("mobilenet_v2", qmobilenet.mobilenet_v2(pretrained=True), per_channel),
("inception_v3", qinception.inception_v3(pretrained=True), per_channel),
# tracing quantized googlenet broken as of v1.6
# ("googlenet", qgooglenet(pretrained=True), per_channel),
# As of v1.10, quantized mobilenet v3 has a weird segfault issue
# during make_conv_packed_param
# See https://ci.tlcpack.ai/blue/organizations/jenkins/tvm/detail/ci-docker-staging/192
# ("mobilenet_v3_large", qmobilenet_v3_large(pretrained=True, quantize=True).eval(), True)
]
results = []
for (model_name, raw_model, per_channel) in qmodels:
raw_model.eval()
if per_channel:
model_name += ", per channel quantization"
else:
model_name += ", per tensor quantization"
inp = get_imagenet_input()
pt_inp = torch.from_numpy(inp)
if "mobilenet_v3_large" not in model_name:
# mv3 was qat-ed, quantize=True option above makes it already quantized
quantize_model(raw_model, pt_inp, per_channel=per_channel)
script_module = torch.jit.trace(raw_model, pt_inp).eval()
with torch.no_grad():
pt_result = script_module(pt_inp).numpy()
input_name = "image"
runtime = get_tvm_runtime(script_module, input_name, (1, 3, 224, 224))
runtime.set_input(input_name, inp)
runtime.run()
tvm_result = runtime.get_output(0).numpy()
results.append((model_name, pt_result[0], tvm_result[0]))
for (model_name, pt_result, tvm_result) in results:
max_abs_diff = np.max(np.abs(tvm_result - pt_result))
mean_abs_diff = np.mean(np.abs(tvm_result - pt_result))
num_identical = np.sum(tvm_result == pt_result)
pt_top3_labels = np.argsort(pt_result)[::-1][:3]
tvm_top3_labels = np.argsort(tvm_result)[::-1][:3]
print("\nModel name: %s" % model_name)
print("PyTorch top3 label:", pt_top3_labels)
print("TVM top3 label:", tvm_top3_labels)
print("max abs diff:", max_abs_diff)
print("mean abs_diff:", mean_abs_diff)
print("%d in 1000 raw outputs identical." % num_identical)
assert set(pt_top3_labels) == set(tvm_top3_labels)
# sample outputs
"""
Model name: resnet18, per tensor quantization
PyTorch top3 label: [386 101 385]
TVM top3 label: [386 101 385]
max abs diff: 0.65681696
mean abs_diff: 0.14055882
236 in 1000 raw outputs identical.
Model name: mobilenet_v2, per tensor quantization
PyTorch top3 label: [101 386 385]
TVM top3 label: [101 386 385]
max abs diff: 2.1262953
mean abs_diff: 0.41025686
101 in 1000 raw outputs identical.
Model name: inception_v3, per tensor quantization
PyTorch top3 label: [101 386 385]
TVM top3 label: [101 386 385]
max abs diff: 0.9994669
mean abs_diff: 0.098697364
272 in 1000 raw outputs identical.
Model name: googlenet, per tensor quantization
PyTorch top3 label: [101 386 385]
TVM top3 label: [101 386 385]
max abs diff: 0.28248847
mean abs_diff: 0.0634469
274 in 1000 raw outputs identical.
Model name: resnet18, per channel quantization
PyTorch top3 label: [101 386 385]
TVM top3 label: [101 386 385]
max abs diff: 0.65908074
mean abs_diff: 0.1274223
469 in 1000 raw outputs identical.
Model name: mobilenet_v2, per channel quantization
PyTorch top3 label: [101 386 385]
TVM top3 label: [101 386 385]
max abs diff: 0.71120834
mean abs_diff: 0.15883648
423 in 1000 raw outputs identical.
Model name: inception_v3, per channel quantization
PyTorch top3 label: [386 101 385]
TVM top3 label: [386 101 385]
max abs diff: 1.3372154
mean abs_diff: 0.1225224
401 in 1000 raw outputs identical.
Model name: googlenet, per channel quantization
PyTorch top3 label: [101 386 385]
TVM top3 label: [101 386 385]
max abs diff: 0.34015465
mean abs_diff: 0.054197952
558 in 1000 raw outputs identical.
"""
def test_serialized_modules():
ishape = (1, 16, 64, 64)
raw_module = AdaptiveAvgPool2d().eval()
inp = torch.rand(ishape)
quantize_model(raw_module, inp)
script_module = torch.jit.trace(raw_module, inp).eval()
fname = "tmp.pt"
torch.jit.save(script_module, fname)
loaded = torch.jit.load(fname)
os.remove(fname)
with torch.no_grad():
pt_result = loaded(inp.clone()).numpy()
input_name = "input"
runtime = get_tvm_runtime(loaded, input_name, ishape)
runtime.set_input(input_name, inp.numpy().copy())
runtime.run()
tvm_result = runtime.get_output(0).numpy()
# with 0.5ish results, 1e-2 is relative accuracy close to 2**-6.
# for simple layers like here this should be achievable
# with 8 bit quantization
# we only require 90% match just to be sure
num_identical = np.sum(np.abs(tvm_result - pt_result) < 1e-2)
match_ratio = num_identical / float(np.prod(tvm_result.shape))
assert match_ratio > 0.90
def test_quantize_dynamic():
# A wrapper is required for quantize_dynamic to work correctly
class LinearWrapper(nn.Module):
def __init__(self, in_dim, hidden_dim):
super().__init__()
self.linear = nn.Linear(in_dim, hidden_dim)
def forward(self, inp):
return self.linear(inp)
torch.manual_seed(0)
mod = LinearWrapper(16, 32)
for qconfig in [
torch.quantization.per_channel_dynamic_qconfig,
torch.quantization.default_dynamic_qconfig,
]:
for ishape in [(16, 16), (10, 16, 16)]:
qspec = {nn.Linear: qconfig}
qmod = torch.quantization.quantize_dynamic(mod, qconfig_spec=qspec, dtype=torch.qint8)
inp = torch.randn(*ishape)
script_module = torch.jit.trace(qmod, inp).eval()
with torch.no_grad():
pt_result = script_module(inp.clone()).numpy()
input_name = "input"
runtime = get_tvm_runtime(script_module, "input", inp.shape)
runtime.set_input(input_name, inp.numpy().copy())
runtime.run()
tvm_result = runtime.get_output(0).numpy()
# Only compare with the PyTorch result for version v1.6 or newer
# Have seen a strange accuracy problem from PyTorch 1.4 and 1.5
# Even with the manual random seed set, the same PyTorch
# version can outputs slightly different results depending on an environment.
# Outputs from v1.6 seem reliable. TVM's outputs are always the same
if is_version_greater_than("1.5.1"):
tvm.testing.assert_allclose(tvm_result, pt_result, rtol=1e-4, atol=1e-4)
def make_qnn_add_pattern():
from tvm.relay.dataflow_pattern import is_op, wildcard
lhs = wildcard()
rhs = wildcard()
lhs_scale = wildcard()
lhs_zero_point = wildcard()
rhs_scale = wildcard()
rhs_zero_point = wildcard()
output_scale = wildcard()
output_zero_point = wildcard()
qadd = is_op("qnn.add")(
lhs,
rhs,
lhs_scale,
lhs_zero_point,
rhs_scale,
rhs_zero_point,
output_scale,
output_zero_point,
)
return qadd.optional(is_op("clip"))
@register_pattern_table("test_table")
def pattern_table():
return [
("qnn_add", make_qnn_add_pattern()),
]
def run_qnn_mergecomposite(script_module, input_name, ishape):
input_shapes = [(input_name, ishape)]
with tvm.testing.disable_span_filling():
mod, params = relay.frontend.from_pytorch(script_module, input_shapes)
with tvm.testing.enable_span_filling():
mod_with_span, _ = relay.frontend.from_pytorch(script_module, input_shapes)
assert tvm.ir.structural_equal(mod, mod_with_span, map_free_vars=True)
pattern_table = get_pattern_table("test_table")
with tvm.transform.PassContext(opt_level=3):
pass_list = [
tvm.relay.transform.SimplifyInference(),
tvm.relay.transform.MergeComposite(pattern_table),
]
composite_partition = tvm.transform.Sequential(pass_list)
partitioned = composite_partition(mod)
def test_qnn_mergecomposite():
from torchvision.models.quantization import resnet as qresnet
model = qresnet.resnet18(pretrained=True)
model.eval()
inp = torch.zeros((1, 3, 224, 224))
model.fuse_model()
model.qconfig = torch.quantization.get_default_qconfig("fbgemm")
torch.quantization.prepare(model, inplace=True)
model(inp)
torch.quantization.convert(model, inplace=True)
script_module = torch.jit.trace(model, inp).eval()
input_name = "image"
run_qnn_mergecomposite(script_module, input_name, inp.shape)
def test_keep_quantized_weight():
qmodules = []
for per_channel in [False, True]:
qmodules += [
((1, 3, 224, 224), ConvBn(), per_channel),
((16, 16), Linear(), per_channel),
]
for (ishape, raw_module, per_channel) in qmodules:
raw_module.eval()
inp = torch.rand(ishape)
quantize_model(raw_module, inp, per_channel=per_channel)
script_module = torch.jit.trace(raw_module, inp).eval()
input_name = "input"
runtime = get_tvm_runtime(script_module, input_name, ishape, keep_quantized_weight=False)
runtime.set_input(input_name, inp.numpy().copy())
runtime.run()
tvm_result = runtime.get_output(0).numpy()
runtime_int8_weight = get_tvm_runtime(
script_module, input_name, ishape, keep_quantized_weight=True
)
runtime_int8_weight.set_input(input_name, inp.numpy().copy())
runtime_int8_weight.run()
tvm_result_int8_weight = runtime_int8_weight.get_output(0).numpy()
tvm.testing.assert_allclose(tvm_result, tvm_result_int8_weight)
def test_tuple_lowered():
# See the following discuss thread for details
# https://discuss.tvm.apache.org/t/bug-frontend-pytorch-relay-ir-is-inconsistent-with-that-of-the-original-model/12010
class ConvBnRelu(nn.Module):
def __init__(self, inp, oup, kernel_size=3, stride=1, padding=1, bias=True, groups=1):
super(ConvBnRelu, self).__init__()
if groups > 1:
self.conv = nn.Conv2d(
inp, inp, kernel_size, stride, padding, bias=bias, groups=groups
)
self.bn = nn.BatchNorm2d(inp)
else:
self.conv = nn.Conv2d(
inp, oup, kernel_size, stride, padding, bias=bias, groups=groups
)
self.bn = nn.BatchNorm2d(oup)
self.relu = nn.ReLU(inplace=True)
def forward(self, inputs):
x = self.conv(inputs)
x = self.bn(x)
x = self.relu(x)
return x
def conv_bn(inp, oup, stride=1, width_multiplier=1):
return ConvBnRelu(inp, oup, kernel_size=3, stride=stride, padding=1, bias=False)
def conv_dw(inp, oup, stride, width_multiplier=1, padding=1):
dw_block = nn.Sequential()
depth_wise = ConvBnRelu(
inp, oup, kernel_size=3, stride=stride, padding=padding, bias=False, groups=inp
)
point_wise = ConvBnRelu(inp, oup, kernel_size=1, stride=1, padding=0, bias=False)
dw_block.add_module("depth_wise", depth_wise)
dw_block.add_module("point_wise", point_wise)
return dw_block
class Backbone(nn.Module):
def __init__(self, width_multiplier=1):
super(Backbone, self).__init__()
self.width_multiplier = width_multiplier
self.conv1 = conv_bn(3, 16, 2, self.width_multiplier)
self.conv2 = conv_dw(16, 32, 1, self.width_multiplier)
def forward(self, inputs):
x1 = self.conv1(inputs)
x2 = self.conv2(x1)
return [x1, x2]
class QuantizableBackbone(nn.Module):
def __init__(self, inputsize=(128, 128)):
super(QuantizableBackbone, self).__init__()
self.quant = QuantStub()
self.dequant = DeQuantStub()
self.backbone = Backbone()
def fuse_model(self):
fuse_modules_qat = getattr(torch.ao.quantization, "fuse_modules_qat", fuse_modules)
for idx, m in enumerate(self.modules()):
if type(m) == ConvBnRelu:
fuse_modules_qat(m, ["conv", "bn", "relu"], inplace=True)
def forward(self, input):
input = self.quant(input)
y0, y1 = self.backbone(input)
y0 = self.dequant(y0)
y1 = self.dequant(y1)
return y0, y1
fp32_input = torch.randn(1, 3, 128, 128)
model = QuantizableBackbone()
model.train()
model.fuse_model()
model.qconfig = get_default_qat_qconfig("qnnpack")
prepare_qat(model, inplace=True)
model.eval()
model(fp32_input)
model_int8 = torch.quantization.convert(model, inplace=True)
script_module = torch.jit.trace(model_int8, fp32_input).eval()
input_infos = [("input", (fp32_input.shape, "float32"))]
with tvm.testing.disable_span_filling():
mod, _ = relay.frontend.from_pytorch(script_module, input_infos)
with tvm.testing.enable_span_filling():
mod_with_span, _ = relay.frontend.from_pytorch(script_module, input_infos)
assert tvm.ir.structural_equal(mod, mod_with_span, map_free_vars=True)
output = mod["main"].body
assert isinstance(output, relay.Tuple) and len(output) == 2
dq1, dq2 = output
assert dq1.op.name == "qnn.dequantize" and dq2.op.name == "qnn.dequantize"
scale1 = dq1.args[1].data.numpy().item()
scale2 = dq2.args[1].data.numpy().item()
assert scale1 != scale2
| 28,002 | 33.829602 | 122 | py |
tvm | tvm-main/tests/python/frontend/pytorch/test_object_detection.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=import-self, invalid-name, unused-argument
"""Test torch vision fasterrcnn and maskrcnn models"""
import numpy as np
import cv2
import torch
import torchvision
import tvm
import tvm.testing
from tvm import relay
from tvm.runtime.vm import VirtualMachine
from tvm.relay.frontend.pytorch_utils import (
rewrite_nms_to_batched_nms,
rewrite_batched_nms_with_max_out_size,
rewrite_scatter_to_gather,
)
from tvm.contrib.download import download
in_size = 300
def process_image(img):
img = cv2.imread(img).astype("float32")
img = cv2.resize(img, (in_size, in_size))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = torch.from_numpy(img / 255.0).permute(2, 0, 1).float()
img = torch.unsqueeze(img, axis=0)
return img
def do_trace(model, inp, in_size=in_size):
model_trace = torch.jit.trace(model, inp)
model_trace.eval()
return model_trace
def dict_to_tuple(out_dict):
if "masks" in out_dict.keys():
return out_dict["boxes"], out_dict["scores"], out_dict["labels"], out_dict["masks"]
return out_dict["boxes"], out_dict["scores"], out_dict["labels"]
class TraceWrapper(torch.nn.Module):
def __init__(self, model):
super().__init__()
self.model = model
def forward(self, inp):
out = self.model(inp)
return dict_to_tuple(out[0])
def generate_jit_model(index):
model_funcs = [
torchvision.models.detection.fasterrcnn_resnet50_fpn,
torchvision.models.detection.maskrcnn_resnet50_fpn,
]
model_func = model_funcs[index]
model = TraceWrapper(model_func(pretrained=True, rpn_pre_nms_top_n_test=1000))
model.eval()
inp = torch.Tensor(np.random.uniform(0.0, 250.0, size=(1, 3, in_size, in_size)))
with torch.no_grad():
out = model(inp)
script_module = do_trace(model, inp)
script_out = script_module(inp)
assert len(out[0]) > 0 and len(script_out[0]) > 0
return script_module
def test_detection_models():
img = "test_street_small.jpg"
img_url = (
"https://raw.githubusercontent.com/dmlc/web-data/master/gluoncv/detection/street_small.jpg"
)
download(img_url, img)
input_shape = (1, 3, in_size, in_size)
input_name = "input0"
shape_list = [(input_name, input_shape)]
scripted_model = generate_jit_model(1)
with tvm.testing.disable_span_filling():
mod, params = relay.frontend.from_pytorch(scripted_model, shape_list)
with tvm.testing.enable_span_filling():
mod_with_span, _ = relay.frontend.from_pytorch(scripted_model, shape_list)
assert tvm.ir.structural_equal(mod, mod_with_span, map_free_vars=True)
data = process_image(img)
data_np = data.detach().numpy()
with torch.no_grad():
pt_res = scripted_model(data)
def compile_and_run_vm(mod, params, data_np, target):
with tvm.transform.PassContext(opt_level=3):
vm_exec = relay.vm.compile(mod, target=target, params=params)
dev = tvm.device(target, 0)
vm = VirtualMachine(vm_exec, dev)
vm.set_input("main", **{input_name: data_np})
return vm.run()
for target in ["llvm"]:
tvm_res = compile_and_run_vm(mod, params, data_np, target)
# Bounding boxes
tvm.testing.assert_allclose(
pt_res[0].cpu().numpy(), tvm_res[0].numpy(), rtol=1e-5, atol=1e-5
)
# Scores
tvm.testing.assert_allclose(
pt_res[1].cpu().numpy(), tvm_res[1].numpy(), rtol=1e-5, atol=1e-5
)
# Class ids
np.testing.assert_equal(pt_res[2].cpu().numpy(), tvm_res[2].numpy())
score_threshold = 0.9
print("Num boxes:", pt_res[0].cpu().numpy().shape[0])
print("Num valid boxes:", np.sum(pt_res[1].cpu().numpy() >= score_threshold))
before = mod["main"]
mod = rewrite_nms_to_batched_nms(mod)
after = mod["main"]
assert not tvm.ir.structural_equal(after, before)
# TODO(masahi): It seems this rewrite causes flaky segfaults on CI
# See https://github.com/apache/tvm/issues/7363
# before = mod["main"]
# mod = rewrite_batched_nms_with_max_out_size(mod)
# after = mod["main"]
# assert not tvm.ir.structural_equal(after, before)
before = mod["main"]
mod = rewrite_scatter_to_gather(mod, 4) # num_scales is 4 for maskrcnn_resnet50_fpn
after = mod["main"]
assert not tvm.ir.structural_equal(after, before)
tvm_res_after_rewrite = compile_and_run_vm(mod, params, data_np, "llvm")
# Results should be equivalent after rewriting
for res1, res2 in zip(tvm_res, tvm_res_after_rewrite):
tvm.testing.assert_allclose(res1.numpy(), res2.numpy())
| 5,488 | 31.672619 | 99 | py |
tvm | tvm-main/tests/python/frontend/pytorch/test_rnns.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import torch
import tvm
import tvm.testing
import onnx
import io
import sys
from tvm import relay
from tvm.contrib import graph_executor
from torch import nn
## LSTM parameters
lstm_feature_size = 16
lstm_hidden_size = 32
lstm_projection_size = 20
## GRU parameters
gru_feature_size = 8
gru_hidden_size = 16
num_layers = 2
seqs_length = 2
batch_size = 2
##RNN parameters
rnn_feature_size = 8
rnn_hidden_size = 16
class RNN_Model(nn.Module):
"""
It is base class for RNN layer classes.
It contains some common fields and methods for child classes.
"""
def __init__(
self,
):
super().__init__()
# model is defined in child class
self.model = None
def forward(self, input, hidden_init=None):
"""
Computes the output tensor after input inference along RNN layer.
:param input: batch of data as a tensor of shape (seqs_length, batch_size, feature_size) or (batch_size, seqs_length, feature_size) if self.batch_first = True
:param hidden_init: initial hidden state(s) of the RNN as a tensor(s) of shape (num_layers, batch_size, hidden_size). Will default to a tensor of zeros if None.
:return: the output tensor of shape (batch_size, hidden_size)
"""
if self.model is None:
raise NotImplementedError("self.model must be defined in subclasses!")
out, _ = self.model(input, hidden_init)
return out
def gen_rnd_weights(self):
"""
Generate random weigths for the model
"""
if self.model is None:
raise NotImplementedError("self.model must be defined in subclasses!")
with torch.no_grad():
for weight_group in self.model.all_weights:
for weight in weight_group:
weight.data = torch.rand(weight.shape)
def get_dummy_inputs(self):
raise NotImplementedError("subclasses must override get_dummy_inputs()!")
def get_input_names(self):
raise NotImplementedError("subclasses must override get_input_names()!")
def get_shape_desc(self, frontend_type):
raise NotImplementedError("subclasses must override get_shape_desc(frontend_type)!")
def get_tvm_inputs(self, dtype):
raise NotImplementedError("subclasses must override get_tvm_inputs(dtype)!")
class RNN_Model_Impl(RNN_Model):
def __init__(
self,
seq_len=seqs_length,
batch_size=batch_size,
feature_size=rnn_feature_size,
hidden_size=rnn_hidden_size,
batch_first=False,
layer_num=1,
bidirectional=False,
use_bias=True,
rnd_weights_init=False,
nonlinearity="tanh",
dropout=0.0,
):
super().__init__()
# Shapes
self.shape = [seq_len, batch_size, feature_size]
if batch_first:
self.shape = [batch_size, seq_len, feature_size]
layers_num = 2 * layer_num if bidirectional else layer_num
self.h0_shape = [layers_num, batch_size, hidden_size]
# Dummy inputs
self.dummy_inputs = (torch.rand(self.shape), torch.zeros(self.h0_shape))
self.model = nn.RNN(
input_size=feature_size,
hidden_size=hidden_size,
num_layers=layer_num,
nonlinearity=nonlinearity,
bias=use_bias,
batch_first=batch_first,
dropout=dropout,
bidirectional=bidirectional,
)
if rnd_weights_init:
self.gen_rnd_weights()
def gen_rnd_weights(self):
super().gen_rnd_weights()
def get_dummy_inputs(self):
return self.dummy_inputs
def get_input_names(self):
return ["input", "h0"]
def get_shape_desc(self, frontend_type):
shape_desc = None
if frontend_type == "pt": # PyTorch
shape_desc = [("input", self.shape)]
elif frontend_type == "onnx": # ONNX
shape_desc = {
"input": self.shape,
"h0": self.h0_shape,
}
return shape_desc
def get_tvm_inputs(self, dtype):
return {
"input": tvm.nd.array(self.dummy_inputs[0].numpy().astype(dtype)),
"h0": tvm.nd.array(self.dummy_inputs[1].numpy().astype(dtype)),
}
class GRU_Model(RNN_Model):
def __init__(
self,
seq_len=seqs_length,
batch_size=batch_size,
feature_size=gru_feature_size,
hidden_size=gru_hidden_size,
batch_first=False,
layer_num=1,
bidirectional=False,
use_bias=True,
rnd_weights_init=False,
):
super().__init__()
# Shapes
self.shape = [seq_len, batch_size, feature_size]
if batch_first:
self.shape = [batch_size, seq_len, feature_size]
layers_num = 2 * layer_num if bidirectional else layer_num
self.h0_shape = [layers_num, batch_size, hidden_size]
# Dummy inputs
self.dummy_inputs = (torch.rand(self.shape), torch.zeros(self.h0_shape))
self.model = nn.GRU(
input_size=feature_size,
hidden_size=hidden_size,
num_layers=layer_num,
bidirectional=bidirectional,
batch_first=batch_first,
bias=use_bias,
)
if rnd_weights_init:
self.gen_rnd_weights()
def gen_rnd_weights(self):
"""
Generate random weigths for the model with biases
For first uni- and bidirectional weights group:
Wi (3*hidden_size, feature_size)
Wh (3*hidden_size, hidden_size)
Bi (3*hidden_size)
Bh (3*hidden_size)
For other weights group:
Wi (3*hidden_size, hidden_size)
Wh (3*hidden_size, hidden_size)
Bi (3*hidden_size)
Bh (3*hidden_size)
For generation of random weigths for the model without biases the Bi and Bh weights are skipped
"""
super().gen_rnd_weights()
def get_dummy_inputs(self):
return self.dummy_inputs
def get_input_names(self):
return ["input", "h0"]
def get_shape_desc(self, frontend_type):
shape_desc = None
if frontend_type == "pt": # PyTorch
shape_desc = [("input", self.shape)]
elif frontend_type == "onnx": # ONNX
shape_desc = {
"input": self.shape,
"h0": self.h0_shape,
}
return shape_desc
def get_tvm_inputs(self, dtype):
return {
"input": tvm.nd.array(self.dummy_inputs[0].numpy().astype(dtype)),
"h0": tvm.nd.array(self.dummy_inputs[1].numpy().astype(dtype)),
}
def check_torch_version_for_proj_in_lstm():
"""
proj_size parameter is supported in torch.nn.LSTM layer started from 1.8.0 torch version
"""
me = False
version = torch.__version__
major, minor, micro = version.split(".")
if int(major) > 1:
me = True
elif int(major) == 1:
if int(minor) >= 8:
me = True
return me
class LSTM_Model(RNN_Model):
def __init__(
self,
seq_len=seqs_length,
batch_size=batch_size,
feature_size=lstm_feature_size,
hidden_size=lstm_hidden_size,
batch_first=False,
layer_num=1,
bidirectional=False,
proj_size=0,
use_bias=True,
rnd_weights_init=False,
):
super().__init__()
# Shapes
self.shape = [seq_len, batch_size, feature_size]
if batch_first:
self.shape = [batch_size, seq_len, feature_size]
layers_num = 2 * layer_num if bidirectional else layer_num
self.h0_shape = [layers_num, batch_size, hidden_size]
if proj_size > 0:
self.h0_shape = [layers_num, batch_size, proj_size]
self.c0_shape = [layers_num, batch_size, hidden_size]
# Dummy inputs
self.dummy_inputs = (
torch.rand(self.shape),
(torch.zeros(self.h0_shape), torch.zeros(self.c0_shape)),
)
if check_torch_version_for_proj_in_lstm():
self.model = nn.LSTM(
input_size=lstm_feature_size,
hidden_size=lstm_hidden_size,
num_layers=layer_num,
bidirectional=bidirectional,
proj_size=proj_size,
batch_first=batch_first,
bias=use_bias,
)
else:
if proj_size > 0:
print(
"WARNING: projection is not supported for torch version less than 1.8.0! ",
"LSTM was constructed without projection!",
)
# sys.exit()
self.model = nn.LSTM(
input_size=lstm_feature_size,
hidden_size=lstm_hidden_size,
num_layers=layer_num,
bidirectional=bidirectional,
batch_first=batch_first,
bias=use_bias,
)
if rnd_weights_init:
self.gen_rnd_weights()
def gen_rnd_weights(self):
"""
Generate random weigths for the model with biases
Without projection:
For first weights group:
Wi (4*lstm_hidden_size, lstm_feature_size)
Wh (4*lstm_hidden_size, lstm_hidden_size)
Bi (4*lstm_hidden_size)
Bh (4*lstm_hidden_size)
For first bidirectional weights group:
Wi (4*lstm_hidden_size, lstm_feature_size)
Wh (4*lstm_hidden_size, lstm_hidden_size)
Bi (4*lstm_hidden_size)
Bh (4*lstm_hidden_size)
For other weights group:
Wi (4*lstm_hidden_size, lstm_hidden_size)
Wh (4*lstm_hidden_size, lstm_hidden_size)
Bi (4*lstm_hidden_size)
Bh (4*lstm_hidden_size)
With projection:
For first weights group:
Wi (4*lstm_hidden_size, lstm_feature_size)
Wh (4*lstm_hidden_size, proj_size)
Bi (4*lstm_hidden_size)
Bh (4*lstm_hidden_size)
P (proj_size, lstm_hidden_size)
For first bidirectional weights group:
Wi (4*lstm_hidden_size, lstm_feature_size)
Wh (4*lstm_hidden_size, proj_size)
Bi (4*lstm_hidden_size)
Bh (4*lstm_hidden_size)
P (proj_size, lstm_hidden_size)
For other weights group:
Wi (4*lstm_hidden_size, proj_size * num_directions)
Wh (4*lstm_hidden_size, proj_size)
Bi (4*lstm_hidden_size)
Bh (4*lstm_hidden_size)
P (proj_size, lstm_hidden_size)
For generation of random weigths for the model without biases Bi and Bh are skipped
"""
super().gen_rnd_weights()
def get_dummy_inputs(self):
return self.dummy_inputs
def get_input_names(self):
return ["input", "h0", "c0"]
def get_shape_desc(self, frontend_type):
shape_desc = None
if frontend_type == "pt": # PyTorch
shape_desc = [("input", self.shape)]
elif frontend_type == "onnx": # ONNX
shape_desc = {
"input": self.shape,
"h0": self.h0_shape,
"c0": self.c0_shape,
}
return shape_desc
def get_tvm_inputs(self, dtype):
return {
"input": tvm.nd.array(self.dummy_inputs[0].numpy().astype(dtype)),
"h0": tvm.nd.array(self.dummy_inputs[1][0].numpy().astype(dtype)),
"c0": tvm.nd.array(self.dummy_inputs[1][1].numpy().astype(dtype)),
}
def compare(input, gold_data, rtol=1e-5, atol=1e-5):
tvm.testing.assert_allclose(input, gold_data, rtol=rtol, atol=atol)
def check_rnn(rnn_type, rnn_mod, target=tvm.target.Target("llvm -mcpu=core-avx2"), dev=tvm.cpu(0)):
def get_model(
rnn_type,
rnn_mod,
args,
):
# Fill args
if "b" in rnn_mod:
args["bidirectional"] = True
if "s" in rnn_mod:
args["layer_num"] = num_layers
if "tanh" in rnn_mod:
args["nonlinearity"] = "tanh"
if "relu" in rnn_mod:
args["nonlinearity"] = "relu"
if rnn_type == "GRU":
RNN_Model_selector = GRU_Model
elif rnn_type == "LSTM":
RNN_Model_selector = LSTM_Model
if "p" in rnn_mod:
args["proj_size"] = lstm_projection_size
elif rnn_type == "RNN":
RNN_Model_selector = RNN_Model_Impl
return RNN_Model_selector(**args)
def get_onnx_model(model):
onnx_io = io.BytesIO()
with torch.no_grad():
input_names = model.get_input_names()
inputs = model.get_dummy_inputs()
# default export (without dynamic input)
torch.onnx.export(model, inputs, onnx_io, input_names=input_names)
onnx_io.seek(0, 0)
return onnx.load_model(onnx_io)
model = None
dtype = "float32"
device = torch.device("cpu")
for batch_first in (True, False):
for use_bias in (True, False):
for rnd_weights in [True]: # (True, False):
model_inputs = {
"batch_first": batch_first,
"use_bias": use_bias,
"rnd_weights_init": rnd_weights,
}
model = get_model(rnn_type, rnn_mod, model_inputs)
model.to(device)
model.eval()
# Get golden output from original model
dummy_inputs = model.get_dummy_inputs()
golden_output = model.forward(dummy_inputs[0].to(device)).detach().cpu().numpy()
tvm_output = None
for format in ["pt"]: # ["pt", "onnx"]:
shape_desc = model.get_shape_desc(format)
if format == "pt":
# Use torch.jit.trace to generate a torch.jit.ScriptModule via tracing.
traced_script_module = torch.jit.trace(model, dummy_inputs[0]).eval()
# Import model to Relay
with tvm.testing.disable_span_filling():
mod, params = relay.frontend.from_pytorch(
traced_script_module, shape_desc
)
with tvm.testing.enable_span_filling():
mod_with_span, _ = relay.frontend.from_pytorch(
traced_script_module, shape_desc
)
assert tvm.ir.structural_equal(mod, mod_with_span, map_free_vars=True)
elif format == "onnx":
try:
onnx_model = get_onnx_model(model)
except:
print(
"WARNING: torch.onnx.export does not support conversion LSTM with projection "
"from pytorch! TODO: waiting for the support and correct test after that."
)
continue
# Import model to Relay
with tvm.testing.disable_span_filling():
mod, params = relay.frontend.from_onnx(onnx_model, shape_desc)
with tvm.testing.enable_span_filling():
mod_with_span, _ = relay.frontend.from_onnx(onnx_model, shape_desc)
assert tvm.ir.structural_equal(mod, mod_with_span, map_free_vars=True)
# Model compilation by tvm
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target=target, params=params)
# Inference of the model with given input data
m = graph_executor.GraphModule(lib["default"](dev))
# Set inputs
tvm_inputs = model.get_tvm_inputs(dtype)
m.set_input(**tvm_inputs)
# Execute
m.run()
# Get outputs (converted to numpy array)
tvm_output = m.get_output(0).numpy()
compare(tvm_output, golden_output)
@tvm.testing.uses_gpu
def test_rnns():
for target, dev in tvm.testing.enabled_targets():
# RNN types: GRU, LSTM
# GRU modifications: unidirectional, stacked, bidirectional, stacked bidirectional
for mod_type in ["uni", "s", "b", "sb"]:
check_rnn("GRU", mod_type, target, dev)
# LSTM modifications: unidirectional, stacked, bidirectional, stacked bidirectional,
# and all these types with projection ("p", "sp", "bp", "sbp")
# The latter are skiped for test acceleration
for mod_type in ["uni", "s", "b", "sb"]:
check_rnn("LSTM", mod_type, target, dev)
for mod_type in ["uni", "s", "b", "sb", "tanh", "relu"]:
check_rnn("RNN", mod_type, target, dev)
if __name__ == "__main__":
test_rnns()
| 18,137 | 33.747126 | 168 | py |
tvm | tvm-main/tests/python/frontend/tensorflow/test_forward.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=import-self, invalid-name, unused-argument, ungrouped-imports, wrong-import-order
"""
Tensorflow testcases
====================
This article is a test script to test tensorflow operator with Relay.
"""
from __future__ import print_function
from distutils.version import LooseVersion
import threading
import platform
import os.path
from packaging import version as package_version
import numpy as np
import pytest
from PIL import Image
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import graph_util
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops import init_ops
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import gen_functional_ops
from tensorflow.python.client import device_lib
try:
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
except ImportError:
import tensorflow as tf
import tvm
from tvm import relay, ir
from tvm.runtime.vm import VirtualMachine
from tvm.relay.frontend.tensorflow import from_tensorflow
from tvm.contrib import graph_executor
from tvm.contrib import utils
import tvm.testing
import tvm.relay.testing.tf as tf_testing
from relay.utils.tag_span import _set_span, _create_span, _verify_structural_equal_with_span
# Only allow TF to run on half the GPU RAM to save the other half
# For TVM
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5)
gpu_sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
gpu_sess.close()
#######################################################################
# Generic run functions for TVM & tensorflow
# ------------------------------------------
def convert_to_list(x):
if not isinstance(x, list):
x = [x]
return x
tf_dtypes = {
"float32": tf.float32,
"float16": tf.float16,
"float64": tf.float64,
"int32": tf.int32,
"uint8": tf.uint8,
"int8": tf.int8,
"int16": tf.int16,
"uint16": tf.uint16,
"int64": tf.int64,
}
def vmobj_to_list(o):
"""Converts TVM objects returned by VM execution to Python List."""
if isinstance(o, tvm.nd.NDArray):
return [o.numpy()]
elif isinstance(o, tvm.runtime.container.ADT):
result = []
for f in o:
result.extend(vmobj_to_list(f))
return result
elif isinstance(o, tvm.relay.backend.interpreter.ConstructorValue):
if o.constructor.name_hint == "Cons":
tl = vmobj_to_list(o.fields[1])
hd = vmobj_to_list(o.fields[0])
hd.extend(tl)
return hd
elif o.constructor.name_hint == "Nil":
return []
elif "tensor_nil" in o.constructor.name_hint:
return [0]
elif "tensor" in o.constructor.name_hint:
return [o.fields[0].numpy()]
else:
raise RuntimeError(f"Unknown object type: {o.constructor.name_hint}")
else:
raise RuntimeError(f"Unknown object type: {type(o)}")
def run_tvm_graph(
graph_def,
input_data,
input_node,
num_output=1,
target="llvm",
out_names=None,
opt_level=3,
mode="graph_executor",
cuda_layout="NCHW",
layout=None,
disabled_pass=None,
ignore_in_shape=False,
serialize=False,
convert_config=None,
):
"""Generic function to compile on relay and execute on tvm"""
input_data = convert_to_list(input_data)
input_node = convert_to_list(input_node)
if target == "cuda":
layout = cuda_layout
target_host = None
if ignore_in_shape:
shape_dict = None
else:
shape_dict = {
e: i.shape if hasattr(i, "shape") else () for e, i in zip(input_node, input_data)
}
with tvm.testing.disable_span_filling():
mod, params = relay.frontend.from_tensorflow(
graph_def,
layout=layout,
shape=shape_dict,
outputs=out_names,
convert_config=convert_config,
)
with tvm.testing.enable_span_filling():
mod_with_span, _ = relay.frontend.from_tensorflow(
graph_def,
layout=layout,
shape=shape_dict,
outputs=out_names,
convert_config=convert_config,
)
assert tvm.ir.structural_equal(mod["main"], mod_with_span["main"], map_free_vars=True)
dev = tvm.device(target, 0)
if mode == "debug":
inputs = []
for param in mod["main"].params:
found = False
for i, n in enumerate(input_node):
if n == param.name_hint:
found = True
inputs.append(tvm.nd.array(input_data[i]))
break
# Interpreter doesn't bind constants, so still need to find in params
if not found:
inputs.append(tvm.nd.array(params[param.name_hint]))
result = relay.create_executor(mode, mod=mod, device=tvm.cpu(), target="llvm").evaluate()(
*inputs
)
return vmobj_to_list(result)
elif mode == "vm":
with tvm.transform.PassContext(opt_level=opt_level, disabled_pass=disabled_pass):
mod = relay.transform.InferType()(mod)
vm_exec = relay.vm.compile(mod, target="llvm", params=params)
if serialize:
code, lib = vm_exec.save()
vm_exec = tvm.runtime.vm.Executable.load_exec(code, lib)
vm = VirtualMachine(vm_exec, tvm.cpu())
inputs = {}
for e, i in zip(input_node, input_data):
inputs[e] = tvm.nd.array(i)
result = vm.invoke("main", **inputs)
return vmobj_to_list(result)
else:
with tvm.transform.PassContext(opt_level=opt_level, disabled_pass=disabled_pass):
target = tvm.target.Target(target, target_host)
graph, lib, params = relay.build(mod, target=target, params=params)
m = graph_executor.create(graph, lib, dev)
# set inputs
for e, i in zip(input_node, input_data):
if e != "":
m.set_input(e, tvm.nd.array(i))
m.set_input(**params)
# execute
m.run()
# get outputs
assert out_names is None or num_output == len(
out_names
), f"out_names: {out_names} num_output: {num_output}"
tvm_output_list = [m.get_output(i).numpy() for i in range(num_output)]
return tvm_output_list
def run_tf_graph(sess, input_data, input_node, output_node):
"""Generic function to execute tensorflow"""
input_data = convert_to_list(input_data)
input_node = convert_to_list(input_node)
output_node = convert_to_list(output_node)
tensor = [sess.graph.get_tensor_by_name(output_name) for output_name in output_node]
input_dict = {e: input_data[i] for i, e in enumerate(input_node)}
if len(input_node) == 1 and input_node[0] == "":
output_data = sess.run(tensor)
else:
output_data = sess.run(tensor, input_dict)
return output_data
def compare_tf_with_tvm(
in_data,
in_name,
out_name,
init_global_variables=False,
no_gpu=False,
opt_level=3,
mode="graph_executor",
cuda_layout="NCHW",
add_shapes_to_graph_def=True,
targets=None,
ignore_in_shape=False,
convert_config=None,
atol=1e-5,
rtol=1e-5,
):
"""Generic function to generate and compare tensorflow and TVM output"""
def name_without_num(name):
return name.split(":")[0] if ":" in name else name
out_name = convert_to_list(out_name)
out_node = [name_without_num(name) for name in out_name]
in_data = convert_to_list(in_data)
in_name = convert_to_list(in_name)
in_node = [name_without_num(name) for name in in_name]
with tf.Session() as sess:
if init_global_variables:
sess.run(variables.global_variables_initializer())
final_graph_def = (
tf_testing.AddShapesToGraphDef(sess, out_node)
if add_shapes_to_graph_def
else tf.get_default_graph().as_graph_def()
)
tf_output = run_tf_graph(sess, in_data, in_name, out_name)
devices = targets if targets else ["llvm", "cuda"]
for device in devices:
_ = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print(f"Skip because {device} is not enabled")
continue
if no_gpu and device == "cuda":
continue
if "cublas" in device and not tvm.get_global_func("tvm.contrib.cublas.matmul", True):
print(f"Skip because cublas is not enabled: {device}")
continue
tvm_output = run_tvm_graph(
final_graph_def,
in_data,
in_node,
target=device,
out_names=out_name,
num_output=len(out_name),
opt_level=opt_level,
mode=mode,
cuda_layout=cuda_layout,
ignore_in_shape=ignore_in_shape,
convert_config=convert_config,
)
# since the names from tensorflow and relay runs are not exactly same,
# first len(tf_output) will be compared
for i, tf_out in enumerate(tf_output):
if not isinstance(tf_out, np.ndarray):
assert len(tvm_output[i].shape) == 0 # pylint: disable=len-as-condition
tvm.testing.assert_allclose(tf_out, tvm_output[i], atol=atol, rtol=rtol)
sess.close()
def is_gpu_available():
"""Verify gpu is available"""
local_device_protos = device_lib.list_local_devices()
gpu_list = [x.name for x in local_device_protos if x.device_type == "GPU"]
if gpu_list:
print("Tensorflow GPU:", gpu_list)
return True
else:
return False
#######################################################################
# Pooling
# -------
def _test_pooling_iteration(input_shape, **kwargs):
"""One iteration of pool operation with given shapes and attributes"""
x = -np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) - 1
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=input_shape, dtype="float32")
nn_ops.pool(in_data, **kwargs)
if kwargs["pooling_type"] == "MAX":
out_name = "max_pool:0"
else:
out_name = "avg_pool:0"
compare_tf_with_tvm(x, "Placeholder:0", out_name)
def _test_pooling(input_shape, **kwargs):
_test_pooling_iteration(input_shape, **kwargs)
if is_gpu_available():
if len(input_shape) == 4:
input_shape = [input_shape[ii] for ii in (0, 3, 1, 2)]
if isinstance(kwargs["padding"], list):
kwargs["padding"] = [kwargs["padding"][ii] for ii in (0, 3, 1, 2)]
kwargs["data_format"] = "NCHW"
_test_pooling_iteration(input_shape, **kwargs)
def _test_pooling_dynamic(input_shape, np_shape, **kwargs):
"""Pooling with dynamic height and width dimensions."""
x = -np.arange(np.prod(np_shape), dtype=np.float32).reshape(np_shape) - 1
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=input_shape, dtype="float32")
nn_ops.pool(in_data, **kwargs)
if kwargs["pooling_type"] == "MAX":
out_name = "max_pool:0"
else:
out_name = "avg_pool:0"
compare_tf_with_tvm(x, "Placeholder:0", out_name, mode="vm", ignore_in_shape=True)
@tvm.testing.uses_gpu
def test_forward_pooling():
"""Pooling"""
# TensorFlow only supports NDHWC for max_pool3d on CPU
for pool_type in ["AVG", "MAX"]:
# NDHWC is the default layout for max_pool3d and avg_pool3d in TensorFlow
_test_pooling(
input_shape=[1, 3, 32, 32, 32],
window_shape=[2, 2, 2],
padding="VALID",
pooling_type=pool_type,
dilation_rate=[1, 1, 1],
strides=[2, 2, 2],
)
_test_pooling(
input_shape=[1, 3, 32, 32, 32],
window_shape=[1, 1, 1],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1, 1],
strides=[1, 1, 1],
)
_test_pooling(
input_shape=[1, 3, 32, 32, 32],
window_shape=[2, 2, 2],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1, 1],
strides=[2, 2, 2],
)
_test_pooling_dynamic(
input_shape=[1, None, None, 3],
np_shape=[1, 32, 32, 3],
window_shape=[2, 2],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1],
strides=[1, 1],
)
# test cases for max_pool3d & avg_pool3d with layout NCDHW
# TensorFlow pool3d doesn't support NCDHW on cpu
if is_gpu_available():
_test_pooling(
input_shape=[1, 3, 32, 32, 32],
window_shape=[1, 1, 1],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1, 1],
strides=[1, 1, 1],
data_format="NCDHW",
)
_test_pooling(
input_shape=[1, 3, 32, 32, 32],
window_shape=[2, 2, 2],
padding="VALID",
pooling_type=pool_type,
dilation_rate=[1, 1, 1],
strides=[2, 2, 2],
data_format="NCDHW",
)
_test_pooling(
input_shape=[2, 9, 10, 2],
window_shape=[1, 1],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1],
strides=[1, 1],
)
_test_pooling(
input_shape=[2, 10, 9, 2],
window_shape=[1, 1],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1],
strides=[1, 1],
)
_test_pooling(
input_shape=[2, 9, 10, 2],
window_shape=[2, 1],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1],
strides=[1, 1],
)
_test_pooling(
input_shape=[2, 10, 9, 2],
window_shape=[2, 3],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1],
strides=[2, 1],
)
# Tests involving SpaceToBatchND
_test_pooling(
input_shape=[1, 1, 2, 1],
window_shape=[1, 1],
padding="VALID",
pooling_type=pool_type,
dilation_rate=[1, 2],
)
_test_pooling(
input_shape=[1, 2, 1],
window_shape=[1],
padding="VALID",
pooling_type=pool_type,
dilation_rate=[2],
)
# Explicit padding
if package_version.parse(tf.VERSION) >= package_version.parse("2.4.1"):
_test_pooling(
input_shape=[2, 9, 10, 2],
window_shape=[4, 4],
padding=[[0, 0], [0, 1], [2, 3], [0, 0]],
pooling_type="MAX",
dilation_rate=[1, 1],
strides=[1, 1],
)
#######################################################################
# Convolution
# -----------
def _test_convolution(
opname,
tensor_in_sizes,
filter_in_sizes,
dilations,
strides,
padding,
data_format,
deconv_output_shape=None,
add_shapes_to_graph_def=True,
):
"""One iteration of convolution with given shapes and attributes"""
deconv_output_shape = deconv_output_shape or []
total_size_1 = np.prod(tensor_in_sizes)
total_size_2 = np.prod(filter_in_sizes)
# Initializes the input tensor with array containing incrementing
# numbers from 1.
data_array = [f * 1.0 for f in range(1, total_size_1 + 1)]
filter_array = [f * 1.0 for f in range(1, total_size_2 + 1)]
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype="float32")
in_filter = constant_op.constant(filter_array, shape=filter_in_sizes, dtype="float32")
if data_format == "NHWC":
strides = [1] + strides + [1]
dilations = [1] + dilations + [1]
else:
strides = [1, 1] + strides
dilations = [1, 1] + dilations
if opname == "conv":
nn_ops.conv2d(
in_data,
in_filter,
strides=strides,
dilations=dilations,
padding=padding,
data_format=data_format,
)
compare_tf_with_tvm(
np.reshape(data_array, tensor_in_sizes).astype("float32"),
"Placeholder:0",
"Conv2D:0",
add_shapes_to_graph_def=add_shapes_to_graph_def,
)
elif opname == "conv_transpose":
nn_ops.conv2d_transpose(
in_data,
in_filter,
output_shape=deconv_output_shape,
strides=strides,
padding=padding,
data_format=data_format,
)
compare_tf_with_tvm(
np.reshape(data_array, tensor_in_sizes).astype("float32"),
"Placeholder:0",
"conv2d_transpose:0",
add_shapes_to_graph_def=add_shapes_to_graph_def,
)
else:
nn_ops.depthwise_conv2d_native(
in_data,
in_filter,
strides=strides,
dilations=dilations,
padding=padding,
data_format=data_format,
)
compare_tf_with_tvm(
np.reshape(data_array, tensor_in_sizes).astype("float32"),
"Placeholder:0",
"DepthwiseConv2dNative:0",
add_shapes_to_graph_def=add_shapes_to_graph_def,
)
@pytest.mark.skip(reason="See https://github.com/apache/tvm/issues/10275")
@tvm.testing.uses_gpu
def test_forward_convolution():
"""Convolution"""
if is_gpu_available():
_test_convolution("conv", [4, 176, 8, 8], [1, 1, 176, 32], [1, 1], [1, 1], "SAME", "NCHW")
_test_convolution("conv", [4, 19, 17, 17], [3, 3, 19, 19], [1, 1], [2, 2], "VALID", "NCHW")
_test_convolution("conv", [4, 124, 17, 17], [1, 1, 124, 19], [1, 1], [1, 1], "SAME", "NCHW")
_test_convolution("conv", [4, 12, 17, 17], [3, 3, 12, 32], [1, 1], [2, 2], "VALID", "NCHW")
_test_convolution(
"depthwise", [4, 176, 8, 8], [1, 1, 176, 1], [1, 1], [1, 1], "SAME", "NCHW"
)
_test_convolution(
"depthwise", [4, 19, 17, 17], [3, 3, 19, 1], [1, 1], [2, 2], "VALID", "NCHW"
)
_test_convolution(
"depthwise", [4, 124, 17, 17], [1, 1, 124, 1], [1, 1], [1, 1], "SAME", "NCHW"
)
_test_convolution(
"depthwise", [4, 12, 17, 17], [3, 3, 12, 1], [1, 1], [2, 2], "VALID", "NCHW"
)
_test_convolution(
"depthwise", [4, 12, 17, 17], [3, 3, 12, 2], [1, 1], [2, 2], "VALID", "NCHW"
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[1, 1, 176, 32],
[1, 1],
[1, 1],
"SAME",
"NCHW",
[4, 176, 8, 8],
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[2, 2, 176, 32],
[1, 1],
[1, 1],
"SAME",
"NCHW",
[4, 176, 8, 8],
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[2, 2, 176, 32],
[1, 1],
[2, 2],
"SAME",
"NCHW",
[4, 176, 15, 15],
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[3, 3, 176, 32],
[1, 1],
[1, 1],
"SAME",
"NCHW",
[4, 176, 8, 8],
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[3, 3, 176, 32],
[1, 1],
[2, 2],
"SAME",
"NCHW",
[4, 176, 15, 15],
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[3, 3, 176, 32],
[1, 1],
[2, 2],
"SAME",
"NCHW",
[4, 176, 16, 16],
)
_test_convolution(
"conv_transpose",
[4, 19, 8, 8],
[3, 3, 19, 19],
[1, 1],
[2, 2],
"VALID",
"NCHW",
[4, 19, 17, 17],
)
_test_convolution(
"conv_transpose",
[4, 19, 17, 17],
[1, 1, 124, 19],
[1, 1],
[1, 1],
"SAME",
"NCHW",
[4, 124, 17, 17],
)
_test_convolution(
"conv_transpose",
[4, 19, 17, 17],
[3, 3, 124, 19],
[1, 1],
[1, 1],
"SAME",
"NCHW",
[4, 124, 17, 17],
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[3, 3, 12, 32],
[1, 1],
[2, 2],
"VALID",
"NCHW",
[4, 12, 17, 17],
)
# kernel 2x2, strides (2,2)
_test_convolution(
"conv_transpose",
[4, 19, 8, 8],
[2, 2, 19, 19],
[1, 1],
[2, 2],
"VALID",
"NCHW",
[4, 19, 16, 16],
)
_test_convolution(
"conv_transpose",
[4, 32, 8, 8],
[2, 2, 12, 32],
[1, 1],
[2, 2],
"VALID",
"NCHW",
[4, 12, 16, 16],
)
# output channel is 1
_test_convolution(
"conv_transpose",
[1, 19, 8, 8],
[1, 1, 1, 19],
[1, 1],
[1, 1],
"VALID",
"NCHW",
[1, 1, 8, 8],
)
_test_convolution(
"conv_transpose",
[4, 19, 8, 8],
[2, 2, 66, 19],
[1, 1],
[2, 2],
"VALID",
"NCHW",
[4, 66, 16, 16],
)
_test_convolution("conv", [4, 8, 8, 176], [1, 1, 176, 32], [1, 1], [1, 1], "SAME", "NHWC")
_test_convolution("conv", [4, 17, 17, 19], [3, 3, 19, 19], [1, 1], [2, 2], "VALID", "NHWC")
_test_convolution("conv", [4, 17, 17, 124], [1, 1, 124, 19], [1, 1], [1, 1], "SAME", "NHWC")
_test_convolution("conv", [4, 17, 17, 12], [3, 3, 12, 32], [1, 1], [2, 2], "VALID", "NHWC")
_test_convolution(
"conv",
[4, 17, 17, 12],
[3, 3, 12, 32],
[1, 1],
[2, 2],
"VALID",
"NHWC",
add_shapes_to_graph_def=False,
)
_test_convolution("depthwise", [4, 8, 8, 176], [1, 1, 176, 1], [1, 1], [1, 1], "SAME", "NHWC")
_test_convolution("depthwise", [4, 17, 17, 19], [3, 3, 19, 1], [1, 1], [2, 2], "VALID", "NHWC")
_test_convolution("depthwise", [4, 17, 17, 124], [1, 1, 124, 1], [1, 1], [1, 1], "SAME", "NHWC")
_test_convolution("depthwise", [4, 17, 17, 12], [3, 3, 12, 1], [1, 1], [2, 2], "VALID", "NHWC")
_test_convolution("depthwise", [4, 17, 17, 12], [3, 3, 12, 2], [1, 1], [2, 2], "VALID", "NHWC")
_test_convolution(
"depthwise",
[4, 17, 17, 12],
[3, 3, 12, 2],
[1, 1],
[2, 2],
"VALID",
"NHWC",
add_shapes_to_graph_def=False,
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[1, 1, 176, 32],
[1, 1],
[1, 1],
"SAME",
"NHWC",
[4, 8, 8, 176],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[2, 2, 176, 32],
[1, 1],
[1, 1],
"SAME",
"NHWC",
[4, 8, 8, 176],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[2, 2, 176, 32],
[1, 1],
[2, 2],
"SAME",
"NHWC",
[4, 15, 15, 176],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[3, 3, 176, 32],
[1, 1],
[1, 1],
"SAME",
"NHWC",
[4, 8, 8, 176],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[3, 3, 176, 32],
[1, 1],
[2, 2],
"SAME",
"NHWC",
[4, 15, 15, 176],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[3, 3, 176, 32],
[1, 1],
[2, 2],
"SAME",
"NHWC",
[4, 16, 16, 176],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 19],
[3, 3, 19, 19],
[1, 1],
[2, 2],
"VALID",
"NHWC",
[4, 17, 17, 19],
)
_test_convolution(
"conv_transpose",
[4, 17, 17, 19],
[1, 1, 124, 19],
[1, 1],
[1, 1],
"SAME",
"NHWC",
[4, 17, 17, 124],
)
_test_convolution(
"conv_transpose",
[4, 17, 17, 19],
[3, 3, 124, 19],
[1, 1],
[1, 1],
"SAME",
"NHWC",
[4, 17, 17, 124],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[3, 3, 12, 32],
[1, 1],
[2, 2],
"VALID",
"NHWC",
[4, 17, 17, 12],
)
# kernel 2x2, strides (2,2)
_test_convolution(
"conv_transpose",
[4, 8, 8, 19],
[2, 2, 19, 19],
[1, 1],
[2, 2],
"VALID",
"NHWC",
[4, 16, 16, 19],
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[2, 2, 12, 32],
[1, 1],
[2, 2],
"VALID",
"NHWC",
[4, 16, 16, 12],
)
# output channel is 1
_test_convolution(
"conv_transpose",
[1, 8, 8, 19],
[1, 1, 1, 19],
[1, 1],
[1, 1],
"VALID",
"NHWC",
[1, 8, 8, 1],
)
# Test without adding shapes to graph def
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[1, 1, 176, 32],
[1, 1],
[1, 1],
"SAME",
"NHWC",
[4, 8, 8, 176],
add_shapes_to_graph_def=False,
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 19],
[2, 2, 66, 19],
[1, 1],
[2, 2],
"VALID",
"NHWC",
[4, 16, 16, 66],
)
# Explicit padding
if package_version.parse(tf.VERSION) >= package_version.parse("2.4.1"):
_test_convolution(
"conv",
[4, 8, 8, 16],
[1, 1, 16, 32],
[1, 1],
[1, 1],
[[0, 0], [2, 3], [0, 1], [0, 0]],
"NHWC",
)
_test_convolution(
"depthwise",
[4, 8, 8, 16],
[1, 1, 16, 1],
[1, 1],
[1, 1],
[[0, 0], [2, 3], [0, 1], [0, 0]],
"NHWC",
)
_test_convolution(
"conv_transpose",
[4, 8, 8, 32],
[3, 3, 176, 32],
[1, 1],
[2, 2],
[[0, 0], [1, 0], [1, 0], [0, 0]],
"NHWC",
[4, 16, 16, 176],
)
#######################################################################
# Convolution3D
# -------------
def _test_convolution3d(
opname,
tensor_in_sizes,
filter_in_sizes,
dilations,
strides,
padding,
data_format,
deconv_output_shape=None,
add_shapes_to_graph_def=True,
):
"""One iteration of 3D convolution with given shapes and attributes"""
deconv_output_shape = deconv_output_shape or []
total_size_1 = np.prod(tensor_in_sizes)
total_size_2 = np.prod(filter_in_sizes)
# Initializes the input tensor with array containing incrementing
# numbers from 1.
data_array = [f * 1.0 for f in range(1, total_size_1 + 1)]
filter_array = [f * 1.0 for f in range(1, total_size_2 + 1)]
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype="float32")
in_filter = constant_op.constant(filter_array, shape=filter_in_sizes, dtype="float32")
if data_format == "NDHWC":
strides = [1] + strides + [1]
dilations = [1] + dilations + [1]
else:
strides = [1, 1] + strides
dilations = [1, 1] + dilations
if opname == "conv":
nn_ops.conv3d(
in_data,
in_filter,
strides=strides,
dilations=dilations,
padding=padding,
data_format=data_format,
)
compare_tf_with_tvm(
np.reshape(data_array, tensor_in_sizes).astype("float32"),
"Placeholder:0",
"Conv3D:0",
cuda_layout="NCDHW",
add_shapes_to_graph_def=add_shapes_to_graph_def,
)
@tvm.testing.uses_gpu
def test_forward_convolution3d():
"""Convolution3d"""
if is_gpu_available():
_test_convolution3d(
"conv", [4, 176, 8, 8, 8], [1, 1, 1, 176, 32], [1, 1, 1], [1, 1, 1], "SAME", "NCDHW"
)
_test_convolution3d(
"conv", [4, 19, 17, 17, 17], [3, 3, 3, 19, 19], [1, 1, 1], [2, 2, 2], "VALID", "NCDHW"
)
_test_convolution3d(
"conv", [4, 124, 17, 17, 17], [1, 1, 1, 124, 19], [1, 1, 1], [1, 1, 1], "SAME", "NCDHW"
)
_test_convolution3d(
"conv", [4, 12, 17, 17, 17], [3, 3, 3, 12, 32], [1, 1, 1], [2, 2, 2], "VALID", "NCDHW"
)
_test_convolution3d(
"conv", [4, 8, 8, 8, 176], [1, 1, 1, 176, 32], [1, 1, 1], [1, 1, 1], "SAME", "NDHWC"
)
_test_convolution3d(
"conv", [4, 17, 17, 17, 19], [3, 3, 3, 19, 19], [1, 1, 1], [2, 2, 2], "VALID", "NDHWC"
)
_test_convolution3d(
"conv", [4, 17, 17, 17, 124], [1, 1, 1, 124, 19], [1, 1, 1], [1, 1, 1], "SAME", "NDHWC"
)
_test_convolution3d(
"conv", [4, 17, 17, 17, 12], [3, 3, 3, 12, 32], [1, 1, 1], [2, 2, 2], "VALID", "NDHWC"
)
# Test without adding shapes to graph def
_test_convolution3d(
"conv",
[4, 17, 17, 17, 12],
[3, 3, 3, 12, 32],
[1, 1, 1],
[2, 2, 2],
"VALID",
"NDHWC",
add_shapes_to_graph_def=False,
)
#######################################################################
# Convolution3D Transpose
# -----------------------
def _test_convolution3d_transpose(
data_shape,
filter_shape,
strides,
padding,
output_shape,
data_format="NCDHW",
add_shapes_to_graph_def=True,
):
"""One iteration of 3D convolution transpose with given shapes and attributes"""
dtype = "float32"
data_array = np.random.uniform(size=data_shape).astype(dtype)
filter_array = np.random.uniform(size=filter_shape).astype(dtype)
if data_format == "NDHWC":
strides = [1] + strides + [1]
else:
strides = [1, 1] + strides
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data_shape, dtype=dtype)
in_filter = constant_op.constant(filter_array, shape=filter_shape, dtype=dtype)
nn_ops.conv3d_transpose(
in_data,
in_filter,
output_shape=output_shape,
strides=strides,
padding=padding,
data_format=data_format,
)
compare_tf_with_tvm(
data_array,
"Placeholder:0",
"conv3d_transpose:0",
cuda_layout="NDHWC",
add_shapes_to_graph_def=add_shapes_to_graph_def,
)
@tvm.testing.uses_gpu
def test_forward_convolution3d_transpose():
"""Convolution3d transpose"""
if is_gpu_available():
_test_convolution3d_transpose(
data_shape=[1, 10, 8, 8, 8],
filter_shape=[1, 1, 1, 6, 10],
strides=[1, 1, 1],
padding="VALID",
output_shape=[1, 6, 8, 8, 8],
)
_test_convolution3d_transpose(
data_shape=[4, 9, 8, 8, 8],
filter_shape=[1, 1, 1, 6, 9],
strides=[1, 1, 1],
padding="VALID",
output_shape=[4, 6, 8, 8, 8],
)
_test_convolution3d_transpose(
data_shape=[1, 3, 8, 8, 8],
filter_shape=[1, 1, 1, 6, 3],
strides=[2, 2, 2],
padding="SAME",
output_shape=[1, 6, 15, 15, 15],
)
_test_convolution3d_transpose(
data_shape=[1, 16, 8, 8, 8],
filter_shape=[3, 3, 3, 6, 16],
strides=[3, 3, 3],
padding="VALID",
output_shape=[1, 6, 24, 24, 24],
)
_test_convolution3d_transpose(
data_shape=[1, 8, 8, 8, 10],
filter_shape=[1, 1, 1, 6, 10],
strides=[1, 1, 1],
padding="VALID",
output_shape=[1, 8, 8, 8, 6],
data_format="NDHWC",
)
_test_convolution3d_transpose(
data_shape=[4, 8, 8, 8, 9],
filter_shape=[1, 1, 1, 6, 9],
strides=[1, 1, 1],
padding="VALID",
output_shape=[4, 8, 8, 8, 6],
data_format="NDHWC",
)
_test_convolution3d_transpose(
data_shape=[1, 8, 8, 8, 3],
filter_shape=[1, 1, 1, 6, 3],
strides=[2, 2, 2],
padding="SAME",
output_shape=[1, 15, 15, 15, 6],
data_format="NDHWC",
)
_test_convolution3d_transpose(
data_shape=[1, 8, 8, 8, 16],
filter_shape=[3, 3, 3, 6, 16],
strides=[3, 3, 3],
padding="VALID",
output_shape=[1, 24, 24, 24, 6],
data_format="NDHWC",
)
# Test without adding shapes to graph def
_test_convolution3d_transpose(
data_shape=[1, 8, 8, 8, 16],
filter_shape=[3, 3, 3, 6, 16],
strides=[3, 3, 3],
padding="VALID",
output_shape=[1, 24, 24, 24, 6],
data_format="NDHWC",
add_shapes_to_graph_def=False,
)
#######################################################################
# BiasAdd
# -----------
def _test_biasadd(tensor_in_sizes, data_format):
"""One iteration of biasadd with given shapes and attributes"""
total_size_1 = 1
for s in tensor_in_sizes:
total_size_1 *= s
tensor_bias_sizes = [tensor_in_sizes[1]] if data_format == "NCHW" else [tensor_in_sizes[3]]
total_size_2 = tensor_bias_sizes[0]
# Initializes the input tensor with array containing incrementing
# numbers from 1.
data_array = [f * 1.0 for f in range(1, total_size_1 + 1)]
bias_array = [f * 1.0 for f in range(1, total_size_2 + 1)]
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype="float32")
in_bias = constant_op.constant(bias_array, shape=tensor_bias_sizes, dtype="float32")
nn_ops.bias_add(in_data, in_bias, data_format=data_format)
compare_tf_with_tvm(
np.reshape(data_array, tensor_in_sizes).astype("float32"), "Placeholder:0", "BiasAdd:0"
)
@tvm.testing.uses_gpu
def test_forward_biasadd():
"""Bias add"""
if is_gpu_available():
_test_biasadd([4, 176, 8, 8], "NCHW")
_test_biasadd([1, 100, 1, 1], "NCHW")
_test_biasadd([4, 19, 17, 17], "NCHW")
_test_biasadd([4, 124, 3, 3], "NCHW")
_test_biasadd([4, 8, 8, 176], "NHWC")
_test_biasadd([1, 1, 1, 100], "NHWC")
_test_biasadd([4, 17, 17, 19], "NHWC")
_test_biasadd([4, 3, 3, 124], "NHWC")
def _test_forward_where(input_shape):
with tf.Graph().as_default():
dtype = tf.float32
t = tf.constant(
np.random.choice([0, 1, -2, 3, -1, 0.1, -0.2], size=input_shape).astype(dtype.name)
)
out = tf.where(t)
compare_tf_with_tvm([], [], out.name, mode="debug")
compare_tf_with_tvm([], [], out.name, mode="vm")
def test_forward_argwhere():
_test_forward_where((5,))
_test_forward_where((5, 5))
_test_forward_where((5, 5, 5))
_test_forward_where((5, 5, 5, 5))
_test_forward_where((5, 5, 5, 5, 5))
def _test_forward_where_with_broadcast(in_shape, cond_shape):
choice_list = list(np.arange(10).astype("float32"))
t1 = np.random.choice(choice_list, size=cond_shape)
t2 = np.random.choice(choice_list, size=cond_shape)
x = np.random.choice(choice_list, size=in_shape)
y = np.random.choice(choice_list, size=in_shape)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=cond_shape, dtype="float32", name="in1")
in2 = tf.placeholder(shape=cond_shape, dtype="float32", name="in2")
condition = math_ops.less(in1, in2, name="less")
lhs = tf.placeholder(shape=in_shape, dtype="float32", name="x")
rhs = tf.placeholder(shape=in_shape, dtype="float32", name="y")
out = tf.where(condition, lhs, rhs)
compare_tf_with_tvm([t1, t2, x, y], ["in1:0", "in2:0", "x:0", "y:0"], out.name)
def test_forward_where_with_broadcast():
_test_forward_where_with_broadcast((5, 2), (5,))
_test_forward_where_with_broadcast((5, 7), (5,))
_test_forward_where_with_broadcast((3, 2, 5), (3,))
#######################################################################
# SpaceToBatchND
# --------------
def _test_space_to_batch_nd(input_shape, block_shape, paddings, dtype="int32"):
data = np.random.uniform(0, 5, size=input_shape).astype(dtype)
with tf.Graph().as_default():
in_data = tf.placeholder(shape=input_shape, dtype=dtype)
out = tf.space_to_batch_nd(in_data, block_shape, paddings)
compare_tf_with_tvm(data, in_data.name, out.name)
def _test_space_to_batch_nd_infer_paddings(input_shape, block_shape, dtype="int32"):
data = np.random.uniform(0, 5, size=input_shape).astype(dtype)
padding_np = np.array([0, 1]).astype(np.int32).reshape((1, 2))
with tf.Graph().as_default():
in_data = tf.placeholder(shape=input_shape, dtype=dtype)
const1 = tf.constant(padding_np, dtype=tf.int32)
# make paddings an input to tf.transpose, but not an input to the graph,
# so it can be extracted with infer_value_simulated
paddings = tf.reverse(const1, axis=[-1])
out = tf.space_to_batch_nd(in_data, block_shape, paddings)
compare_tf_with_tvm(data, in_data.name, out.name)
def test_forward_space_to_batch_nd():
"""SpaceToBatchNd"""
# test cases: https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/space-to-batch-n-d
_test_space_to_batch_nd(input_shape=[1, 2, 2, 1], block_shape=[2, 2], paddings=[[0, 0], [0, 0]])
_test_space_to_batch_nd(input_shape=[1, 2, 2, 3], block_shape=[2, 2], paddings=[[0, 0], [0, 0]])
_test_space_to_batch_nd(input_shape=[1, 4, 4, 1], block_shape=[2, 2], paddings=[[0, 0], [0, 0]])
_test_space_to_batch_nd(
input_shape=[2, 2, 4, 1], block_shape=[2, 2], paddings=[[0, 0], [2, 0]], dtype="int64"
)
# pylint: disable=line-too-long
# https://github.com/tensorflow/tensorflow/blob/24f578/tensorflow/python/kernel_tests/spacetobatch_op_test.py
_test_space_to_batch_nd(input_shape=[2, 3], block_shape=[2], paddings=[[1, 0]], dtype="float32")
_test_space_to_batch_nd(
input_shape=[2, 3, 2], block_shape=[2], paddings=[[1, 0]], dtype="float64"
)
_test_space_to_batch_nd_infer_paddings(input_shape=[2, 3, 2], block_shape=[2])
#######################################################################
# BatchToSpaceND
# --------------
def _test_batch_to_space_nd(input_shape, block_shape, crops, dtype="int32"):
data = np.random.uniform(0, 5, size=input_shape).astype(dtype)
with tf.Graph().as_default():
in_data = tf.placeholder(shape=input_shape, dtype=dtype)
out = tf.batch_to_space_nd(in_data, block_shape, crops)
compare_tf_with_tvm(data, in_data.name, out.name)
def test_forward_batch_to_space_nd():
"""BatchToSpaceNd"""
# test cases: https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/batch-to-space-n-d
_test_batch_to_space_nd(input_shape=[4, 1, 1, 1], block_shape=[2, 2], crops=[[0, 0], [0, 0]])
_test_batch_to_space_nd(input_shape=[4, 1, 1, 3], block_shape=[2, 2], crops=[[0, 0], [0, 0]])
_test_batch_to_space_nd(input_shape=[4, 2, 2, 1], block_shape=[2, 2], crops=[[0, 0], [0, 0]])
_test_batch_to_space_nd(
input_shape=[8, 1, 3, 1], block_shape=[2, 2], crops=[[0, 0], [2, 0]], dtype="int64"
)
# pylint: disable=line-too-long
# https://github.com/tensorflow/tensorflow/blob/24f578/tensorflow/python/kernel_tests/batchtospace_op_test.py
_test_batch_to_space_nd(
input_shape=[18, 2, 1, 2], block_shape=[2, 3], crops=[[1, 1], [0, 0]], dtype="float32"
)
_test_batch_to_space_nd(
input_shape=[20, 5, 8, 7], block_shape=[2, 2], crops=[[1, 1], [1, 1]], dtype="float64"
)
#######################################################################
# Reshape
# -------
def _test_reshape(data, out_shape):
"""One iteration of reshape operation with given data and out shape"""
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
array_ops.reshape(in_data, out_shape)
compare_tf_with_tvm(data, "Placeholder:0", "Reshape:0")
def _test_reshape_with_call():
"""relay.expr.Call as shape"""
data = np.zeros((6, 4, 2))
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
out_shape = tf.constant([1, 2, 3], dtype="int32")
out_shape = tf.multiply(out_shape, 2)
array_ops.reshape(in_data, out_shape)
compare_tf_with_tvm(data, "Placeholder:0", "Reshape:0")
def _test_reshape_like(data, shape_like):
"""A special case for reshape."""
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
in_shape_like = array_ops.placeholder(shape=shape_like.shape, dtype=data.dtype)
out_shape = array_ops.shape(in_shape_like)
array_ops.reshape(in_data, out_shape)
compare_tf_with_tvm(data, "Placeholder:0", "Reshape:0")
def _test_reshape_symbolic(data, a_data, b_data):
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
a = array_ops.placeholder(shape=a_data.shape, dtype=a_data.dtype)
b = array_ops.placeholder(shape=b_data.shape, dtype=b_data.dtype)
newshape = tf.add(a, b)
out = array_ops.reshape(in_data, newshape)
for mode in ["debug", "vm"]:
compare_tf_with_tvm(
[data, a_data, b_data], [in_data.name, a.name, b.name], out.name, mode=mode
)
def test_forward_reshape():
"""Reshape"""
_test_reshape(np.arange(6.0), [2, 3])
_test_reshape(np.arange(6), [-1, 2])
_test_reshape(np.arange(6), [3, -1])
_test_reshape(np.arange(6), [-1])
_test_reshape_with_call()
_test_reshape_like(np.zeros((3, 6)), np.zeros((9, 2)))
_test_reshape_symbolic(np.arange(6.0), np.array([2, 0]), np.array([0, 3]))
_test_reshape_symbolic(np.arange(6), np.array([-1, 0]), np.array([0, 2]))
_test_reshape_symbolic(np.arange(6), np.array([3, 0]), np.array([3, -1]))
_test_reshape_symbolic(np.arange(6), np.array([0]), np.array([-1]))
#######################################################################
# DepthToSpace
# ------------
def _test_depthtospace(data, block_size):
"""One iteration of depth_to_space operation with given data and block size"""
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
array_ops.depth_to_space(in_data, block_size)
compare_tf_with_tvm(data, "Placeholder:0", "DepthToSpace:0")
def test_forward_depthtospace():
_test_depthtospace(np.random.normal(size=[1, 32, 32, 4]), 2)
_test_depthtospace(np.random.normal(size=[1, 16, 8, 32]), 4)
#######################################################################
# SpaceToDepth
# ------------
def _test_spacetodepth(data, block_size):
"""One iteration of space_to_depth operation with given data and block size"""
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
array_ops.space_to_depth(in_data, block_size)
compare_tf_with_tvm(data, "Placeholder:0", "SpaceToDepth:0")
def test_forward_spacetodepth():
_test_spacetodepth(np.random.normal(size=[1, 32, 32, 4]), 2)
_test_spacetodepth(np.random.normal(size=[1, 16, 8, 32]), 4)
#######################################################################
# Squeeze
# -------
def _test_squeeze(data, squeeze_dims=None):
"""One iteration of squeeze"""
if squeeze_dims is None:
squeeze_dims = []
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
if squeeze_dims:
array_ops.squeeze(in_data, squeeze_dims)
else:
array_ops.squeeze(in_data)
compare_tf_with_tvm(data, "Placeholder:0", "Squeeze:0")
def test_forward_squeeze():
"""Squeeze"""
# Nothing to squeeze.
_test_squeeze(np.arange(2).reshape((2)))
_test_squeeze(np.arange(6).reshape((2, 3)))
# Squeeze the middle element away.
_test_squeeze(np.arange(4).reshape((2, 1, 2)))
# Squeeze on both ends.
_test_squeeze(np.arange(6).reshape((1, 2, 1, 3, 1)))
# Positive squeeze dim index.
_test_squeeze(np.arange(6).reshape((1, 2, 1, 3, 1)), [0])
_test_squeeze(np.arange(6).reshape((1, 2, 1, 3, 1)), [2, 4])
_test_squeeze(np.arange(6).reshape((1, 2, 1, 3, 1)), [0, 4, 2])
# Negative squeeze dim index.
_test_squeeze(np.arange(6).reshape((1, 2, 1, 3, 1)), [-1])
_test_squeeze(np.arange(6).reshape((1, 2, 1, 3, 1)), [-3, -5])
_test_squeeze(np.arange(6).reshape((1, 2, 1, 3, 1)), [-3, -5, -1])
#######################################################################
# TensorArray
# -----------
def test_tensor_array_write_read():
"""Tensor array write read"""
def run(dtype_str, infer_shape, element_shape):
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
np_data = np.array([[1.0, 2.0], [3.0, 4.0]]).astype(dtype_str)
_ = [np_data, np_data]
t1 = tf.constant(np_data, dtype=dtype)
t2 = tf.constant(np_data, dtype=dtype)
ta1 = tf.TensorArray(
dtype=dtype, size=2, infer_shape=infer_shape, element_shape=element_shape
)
ta2 = ta1.write(0, t1)
ta3 = ta2.write(1, t2)
_ = ta3.read(0)
_ = tf.get_default_graph()
compare_tf_with_tvm([], [], "TensorArrayReadV3:0", mode="vm")
for dtype in ["float32", "int8"]:
run(dtype, False, None)
run(dtype, False, tf.TensorShape([None, 2]))
run(dtype, True, None)
def test_tensor_array_scatter():
"""Tensor array scatter"""
def run(dtype_str, infer_shape):
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
if infer_shape:
element_shape = tf.TensorShape([tf.Dimension(None)])
else:
element_shape = None
ta0 = _construct_scatter(dtype, dtype_str, element_shape, infer_shape, 3)
_ = ta0.read(0)
_ = ta0.read(1)
_ = ta0.read(2)
ta1 = _construct_scatter(dtype, dtype_str, element_shape, infer_shape, 4)
out4 = ta1.read(0)
_ = tf.get_default_graph()
compare_tf_with_tvm([], [], ["TensorArrayReadV3:0"], mode="vm")
compare_tf_with_tvm([], [], ["TensorArrayReadV3_1:0"], mode="vm")
compare_tf_with_tvm([], [], ["TensorArrayReadV3_2:0"], mode="vm")
compare_tf_with_tvm([], [], ["TensorArrayReadV3_2:0", out4.name], mode="vm")
def _construct_scatter(dtype, dtype_str, element_shape, infer_shape, size):
arr = [[float(i)] for i in range(size)] # pylint: disable=unnecessary-comprehension
indices_arr = list(range(size - 1, -1, -1))
t = tf.constant(np.array(arr).astype(dtype_str), dtype=dtype)
indices = tf.constant(indices_arr)
ta1 = tf.TensorArray(
dtype=dtype, size=size, infer_shape=infer_shape, element_shape=element_shape
)
ta2 = ta1.scatter(indices, t)
return ta2
for dtype in ["float32", "int8"]:
run(dtype, False)
run(dtype, True)
def test_tensor_array_gather():
"""tensor array gather"""
def run(dtype_str, infer_shape):
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
t = tf.constant(np.array([[1.0], [2.0], [3.0]]).astype(dtype_str))
scatter_indices = tf.constant([2, 1, 0])
gather_indices = tf.constant([1, 2])
ta1 = tf.TensorArray(dtype=dtype, size=3, infer_shape=infer_shape)
ta2 = ta1.scatter(scatter_indices, t)
_ = ta2.gather(gather_indices)
_ = tf.get_default_graph()
compare_tf_with_tvm([], [], ["TensorArrayGatherV3:0"], mode="vm")
for dtype in ["float32", "int8"]:
run(dtype, True)
def test_tensor_array_split():
"""tensor array split"""
def run(dtype_str, infer_shape):
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
t = tf.constant(
np.array([[1.0], [2.0], [3.0], [4.0], [5.0], [6.0], [7.0], [8.0]]).astype(
dtype_str
),
dtype=dtype,
)
split_length = tf.constant([2, 2, 2, 2], dtype=tf.int32)
ta1 = tf.TensorArray(dtype=dtype, size=4, infer_shape=infer_shape)
ta2 = ta1.split(t, split_length)
_ = ta2.read(0)
_ = ta2.read(1)
_ = ta2.read(2)
_ = ta2.read(3)
_ = tf.get_default_graph()
compare_tf_with_tvm([], [], ["TensorArrayReadV3:0"], mode="debug")
compare_tf_with_tvm([], [], ["TensorArrayReadV3_1:0"], mode="debug")
compare_tf_with_tvm([], [], ["TensorArrayReadV3_2:0"], mode="debug")
compare_tf_with_tvm([], [], ["TensorArrayReadV3_3:0"], mode="debug")
for dtype in ["float32", "int8"]:
run(dtype, False)
run(dtype, True)
def test_tensor_array_concat():
"""Tensor array concat"""
def run(dtype_str, infer_shape):
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
t = tf.constant(
np.array([[1.0], [2.0], [3.0], [4.0], [5.0], [6.0], [7.0], [8.0]]).astype(
dtype_str
),
dtype=dtype,
)
split_length = tf.constant([2, 2, 2, 2], dtype=tf.int32)
ta1 = tf.TensorArray(dtype=dtype, size=4, infer_shape=infer_shape)
ta2 = ta1.split(t, split_length)
t = ta2.concat()
_ = tf.identity(t)
compare_tf_with_tvm([], [], ["Identity:0"], mode="debug")
for dtype in ["float32", "int8"]:
run(dtype, False)
run(dtype, True)
def test_tensor_array_size():
"""Tensor array size"""
if package_version.parse(tf.VERSION) >= package_version.parse("1.15.0"):
pytest.skip("Needs fixing for tflite >= 1.15.0")
def run(dtype_str, infer_shape):
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
np_data = np.array([[1.0, 2.0], [3.0, 4.0]]).astype(dtype_str)
_ = [np_data, np_data]
t1 = tf.constant(np_data, dtype=dtype)
t2 = tf.constant(np_data, dtype=dtype)
ta1 = tf.TensorArray(dtype=dtype, size=2, infer_shape=infer_shape)
ta2 = ta1.write(0, t1)
ta3 = ta2.write(1, t2)
_ = ta3.size()
_ = tf.get_default_graph()
compare_tf_with_tvm([], [], "TensorArraySizeV3:0", mode="debug")
for dtype in ["float32", "int8"]:
run(dtype, False)
run(dtype, True)
def test_tensor_array_stack():
"""Tensor array stack"""
def run(dtype_str, infer_shape):
if package_version.parse(tf.VERSION) >= package_version.parse("1.15.0"):
pytest.skip("Needs fixing for tflite >= 1.15.0")
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
t = tf.constant(np.array([[1.0], [2.0], [3.0]]).astype(dtype_str))
scatter_indices = tf.constant([2, 1, 0])
ta1 = tf.TensorArray(dtype=dtype, size=3, infer_shape=infer_shape)
ta2 = ta1.scatter(scatter_indices, t)
t1 = ta2.stack()
print(t1)
_ = tf.get_default_graph()
compare_tf_with_tvm([], [], ["TensorArrayStack/TensorArrayGatherV3:0"], mode="vm")
for dtype in ["float32", "int8"]:
run(dtype, True)
def test_tensor_array_unstack():
"""Tensor array unstack"""
def run(dtype_str, input_shape, infer_shape):
if package_version.parse(tf.VERSION) >= package_version.parse("1.15.0"):
pytest.skip("Needs fixing for tflite >= 1.15.0")
with tf.Graph().as_default():
dtype = tf_dtypes[dtype_str]
t = tf.constant(np.random.choice([0, 1, 2, 3], size=input_shape).astype(dtype.name))
ta1 = tf.TensorArray(dtype=dtype, infer_shape=infer_shape, size=input_shape[0])
ta2 = ta1.unstack(t)
_ = ta2.size()
_ = ta2.read(0)
compare_tf_with_tvm([], [], "TensorArraySizeV3:0", mode="debug")
compare_tf_with_tvm([], [], "TensorArrayReadV3:0", mode="debug")
for dtype in ["float32", "int8"]:
run(dtype, (5,), False)
run(dtype, (5, 5), True)
run(dtype, (5, 5, 5), False)
run(dtype, (5, 5, 5, 5), True)
#######################################################################
# ConcatV2
# --------
def _test_concat_v2(shape1, shape2, dim):
"""One iteration of ConcatV2"""
with tf.Graph().as_default():
dtype = "float32"
in1 = tf.placeholder(shape=shape1, dtype=dtype, name="in1")
in2 = tf.placeholder(shape=shape2, dtype=dtype, name="in2")
array_ops.concat_v2([in1, in2], dim)
np_data1 = np.random.uniform(size=shape1).astype(dtype)
np_data2 = np.random.uniform(size=shape2).astype(dtype)
compare_tf_with_tvm([np_data1, np_data2], ["in1:0", "in2:0"], "ConcatV2:0")
def test_forward_concat_v2():
if tf.__version__ < LooseVersion("1.4.1"):
return
_test_concat_v2([2, 3], [2, 3], 0)
_test_concat_v2([10, 3, 5], [2, 3, 5], 0)
_test_concat_v2([2, 3], [2, 3], 1)
_test_concat_v2([5, 8], [5, 4], 1)
_test_concat_v2([2, 8, 5], [2, 8, 6], -1)
#######################################################################
# Sigmoid
# -------
def _test_sigmoid(data):
"""One iteration of sigmoid"""
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
_ = math_ops.sigmoid(in_data)
compare_tf_with_tvm(data, "Placeholder:0", "Sigmoid:0")
def test_forward_sigmoid():
"""Sigmoid"""
_test_sigmoid(np.random.uniform(size=(3, 4, 4, 3)).astype("float32"))
#######################################################################
# Argmin/Argmax
# -------------
def _test_argx(func, data, **kwargs):
with tf.Graph().as_default():
inp = array_ops.placeholder(shape=data.shape, dtype=data.dtype, name="c0")
func(inp, name="argx0", **kwargs)
compare_tf_with_tvm(data, "c0:0", "argx0:0")
def test_forward_argminmax():
for output_type in [tf.int64, tf.int32]:
for axis in [None, 0, 1, 2]:
data = np.random.uniform(size=(8, 4, 9)).astype("float32")
_test_argx(tf.argmax, data=data, axis=axis, output_type=output_type)
_test_argx(tf.argmin, data=data, axis=axis, output_type=output_type)
#######################################################################
# Variable
# --------
def _test_variable(data):
"""One iteration of a variable"""
tf.reset_default_graph()
with tf.Graph().as_default():
input_op = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
input_tensor = array_ops.reshape(input_op, data.shape)
size = input_tensor.shape.dims[1]
with variable_scope.variable_scope("linear", reuse=None):
w = variable_scope.get_variable("w", shape=[size, size], dtype=input_tensor.dtype)
math_ops.matmul(input_tensor, w)
compare_tf_with_tvm(data, "Placeholder:0", "MatMul:0", init_global_variables=True)
def test_forward_variable():
"""Variable type op test"""
_test_variable(np.random.uniform(size=(32, 100)).astype("float32"))
@tvm.testing.parametrize_targets("llvm", "cuda")
def test_read_variable_op(target, dev):
"""Read Variable op test"""
tf.reset_default_graph()
data = np.random.uniform(size=(32, 100)).astype("float32")
input_tensor = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
size = input_tensor.shape.dims[1]
var_data = np.random.uniform(-5, 5, size=[size, size]).astype(np.float32)
input_var = tf.Variable(var_data, name="var1", use_resource=True)
math_ops.matmul(input_tensor, input_var)
out_name = ["MatMul:0"]
out_node = ["MatMul"]
in_name = ["Placeholder:0"]
in_node = ["Placeholder"]
in_data = [data]
with tf.Session() as sess:
sess.run(variables.global_variables_initializer())
final_graph_def = sess.graph.as_graph_def(add_shapes=True)
tf_output = run_tf_graph(sess, in_data, in_name, out_name)
shape_dict = {e: i.shape for e, i in zip(in_name, in_data)}
with pytest.raises(Exception) as execinfo:
with tvm.testing.disable_span_filling():
mod, _ = relay.frontend.from_tensorflow(
final_graph_def, layout=None, shape=shape_dict, outputs=None
)
with tvm.testing.enable_span_filling():
mod_with_span, _ = relay.frontend.from_tensorflow(
final_graph_def, layout=None, shape=shape_dict, outputs=None
)
assert tvm.ir.structural_equal(mod["main"], mod_with_span["main"])
assert execinfo.value.args[0].startswith("Graph is not frozen. Provide a frozen graph")
# Now convert the variables to constant and run inference on the converted graph
final_graph_def = tf.graph_util.convert_variables_to_constants(
sess,
sess.graph.as_graph_def(add_shapes=True),
out_node,
)
tvm_output = run_tvm_graph(
final_graph_def,
in_data,
in_node,
target=target,
out_names=out_name,
num_output=len(out_name),
)
for i, tf_out in enumerate(tf_output):
tvm.testing.assert_allclose(tf_out, tvm_output[i], atol=1e-4, rtol=1e-5)
sess.close()
#######################################################################
# MatMul, BatchMatMul, BatchMatMulV2
# ----------------------------------
def _test_matmul(i, j, k, dtype, outer=None):
"""One iteration of matmul"""
A_shape_init = [i, j]
B_shape_init = [j, k]
for transpose_a in [False, True]:
for transpose_b in [False, True]:
outer = outer or []
A_shape = outer + (A_shape_init[::-1] if transpose_a else A_shape_init)
B_shape = outer + (B_shape_init[::-1] if transpose_b else B_shape_init)
with tf.Graph().as_default():
A = tf.placeholder(shape=A_shape, dtype=dtype, name="A")
B = tf.placeholder(shape=B_shape, dtype=dtype, name="B")
result = tf.matmul(A, B, transpose_a=transpose_a, transpose_b=transpose_b)
A_np = np.random.uniform(high=5.0, size=A_shape).astype(dtype)
B_np = np.random.uniform(high=5.0, size=B_shape).astype(dtype)
compare_tf_with_tvm(
[A_np, B_np], [A.name, B.name], result.name, convert_config={"use_dense": True}
)
compare_tf_with_tvm(
[A_np, B_np], [A.name, B.name], result.name, convert_config={"use_dense": False}
)
def test_forward_matmul():
"""MatMul op test"""
_test_matmul(1, 3, 6, "int32")
_test_matmul(5, 3, 1, "float64")
def _test_batch_matmul(A_shape, B_shape, dtype, adjoint_a=False, adjoint_b=False):
with tf.Graph().as_default():
A = tf.placeholder(shape=A_shape, dtype=dtype, name="A")
B = tf.placeholder(shape=B_shape, dtype=dtype, name="B")
result = tf.matmul(A, B, adjoint_a=adjoint_a, adjoint_b=adjoint_b, name="batchmatmul")
A_np = np.random.uniform(high=5.0, size=A_shape).astype(dtype)
B_np = np.random.uniform(high=5.0, size=B_shape).astype(dtype)
compare_tf_with_tvm(
[A_np, B_np],
[A.name, B.name],
result.name,
convert_config={"use_nt_batch_matmul": True},
)
compare_tf_with_tvm(
[A_np, B_np],
[A.name, B.name],
result.name,
convert_config={"use_nt_batch_matmul": False},
)
def _test_batch_matmul_dynamic(
A_shape, B_shape, A_np_shape, B_np_shape, dtype, adjoint_a=False, adjoint_b=False
):
with tf.Graph().as_default():
A = tf.placeholder(shape=A_shape, dtype=dtype, name="A")
B = tf.placeholder(shape=B_shape, dtype=dtype, name="B")
result = tf.matmul(A, B, adjoint_a=adjoint_a, adjoint_b=adjoint_b, name="batchmatmul")
A_np = np.random.uniform(high=5.0, size=A_np_shape).astype(dtype)
B_np = np.random.uniform(high=5.0, size=B_np_shape).astype(dtype)
# for now, in TOPI, only llvm & cublas's implementation support dynamic shape
# TODO add more backends support in TOPI
compare_tf_with_tvm(
[A_np, B_np],
[A.name, B.name],
result.name,
mode="vm",
targets=["llvm", "cuda -libs=cublas"],
convert_config={"use_nt_batch_matmul": True},
)
compare_tf_with_tvm(
[A_np, B_np],
[A.name, B.name],
result.name,
mode="vm",
targets=["llvm", "cuda -libs=cublas"],
convert_config={"use_nt_batch_matmul": False},
)
def test_forward_batch_matmul():
"""TF op BatchMatMul, BatchMatMulV2 test"""
_test_batch_matmul((3, 5, 4), (3, 4, 5), "int32")
_test_batch_matmul((3, 5, 4), (3, 4, 5), "float32", True, True)
_test_batch_matmul((3, 5, 4), (3, 5, 4), "int32", True, False)
_test_batch_matmul((3, 5, 4), (3, 5, 4), "float32", False, True)
_test_batch_matmul((2, 3, 4, 5, 6), (2, 3, 4, 6, 5), "int32")
_test_batch_matmul((1, 2, 3, 4, 5, 6), (1, 2, 3, 4, 6, 5), "float32", True, True)
_test_batch_matmul((3, 4, 5, 6), (3, 4, 5, 6), "int32", True, False)
_test_batch_matmul((2, 3, 4, 2, 3, 4, 5, 6), (2, 3, 4, 2, 3, 4, 5, 6), "float32", False, True)
_test_batch_matmul((1, 8, 64, 2), (2, 1), "float32", False, False)
_test_batch_matmul((1, 8, 8, 64), (64, 1), "float32", False, False)
_test_batch_matmul((1, 8, 64), (64, 1), "float32", False, False)
def test_forward_batch_matmul_dynamic():
"""Dynamic batch matmul"""
_test_batch_matmul_dynamic((None, 5, 4), (None, 4, 5), (3, 5, 4), (3, 4, 5), "int32")
_test_batch_matmul_dynamic(
(None, 5, 4), (None, 4, 5), (3, 5, 4), (3, 4, 5), "float32", True, True
)
_test_batch_matmul_dynamic(
(None, 5, 4), (None, 5, 4), (3, 5, 4), (3, 5, 4), "int32", True, False
)
_test_batch_matmul_dynamic(
(None, 5, 4), (None, 5, 4), (3, 5, 4), (3, 5, 4), "float32", False, True
)
_test_batch_matmul_dynamic(
(None, 4, 5, 6), (None, 4, 6, 5), (3, 4, 5, 6), (3, 4, 6, 5), "float32"
)
_test_batch_matmul_dynamic(
(None, None, 5, 6), (None, None, 6, 5), (3, 4, 5, 6), (3, 4, 6, 5), "float32"
)
_test_batch_matmul_dynamic(
(None, None, None, 5, 6),
(None, None, None, 6, 5),
(2, 3, 4, 5, 6),
(2, 3, 4, 6, 5),
"float32",
)
_test_batch_matmul_dynamic(
(None, None, None, 5, 6),
(6, None),
(2, 3, 4, 5, 6),
(6, 1),
"float32",
)
_test_batch_matmul_dynamic(
(None, 5, 6),
(6, None),
(24, 5, 6),
(6, 1),
"float32",
)
#######################################################################
# SparseTensorDenseMatMul
# ----------------------------------
def _test_sparse_dense_matmul(indices, values, A_inp_shape, B_inp_shape, dtype, flip=False):
"""One iteration of sparse_dense_matmul"""
for adjoint_a in [False, True]:
for adjoint_b in [False, True]:
A_shape = A_inp_shape[::-1] if adjoint_a else A_inp_shape
B_shape = B_inp_shape[::-1] if adjoint_b else B_inp_shape
with tf.Graph().as_default():
A_sp = tf.sparse.SparseTensor(indices=indices, values=values, dense_shape=A_shape)
B = tf.placeholder(shape=B_shape, dtype=dtype, name="B")
if flip:
result = tf.sparse.sparse_dense_matmul(
B, A_sp, adjoint_a=adjoint_b, adjoint_b=adjoint_a
)
else:
result = tf.sparse.sparse_dense_matmul(
A_sp, B, adjoint_a=adjoint_a, adjoint_b=adjoint_b
)
B_np = np.random.uniform(high=5.0, size=B_shape).astype(dtype)
compare_tf_with_tvm([B_np], [B.name], result.name)
def test_forward_sparse_dense_matmul():
"""sparse_dense_matmul op test"""
###################################################################
#
# In order to create a SparseTensor, it requires 3 input as below:
# SparseTensor(indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
#
# Above Sparse can be represented in Dense as below :
# [[1, 0, 0, 0]
# [0, 0, 2, 0]
# [0, 0, 0, 0]]
#
# ------------------------------------------------------------------
_test_sparse_dense_matmul([[0, 0], [1, 2]], [4.0, 8.0], [3, 4], [4, 3], "float32")
_test_sparse_dense_matmul([[0, 0], [1, 2]], [4.0, 8.0], [3, 3], [3, 3], "float32")
_test_sparse_dense_matmul([[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [5, 5], [5, 5], "float32")
_test_sparse_dense_matmul([[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [7, 9], [9, 5], "float32")
_test_sparse_dense_matmul([[0, 0], [1, 2]], [4.0, 8.0], [4, 3], [3, 4], "float32", True)
_test_sparse_dense_matmul([[0, 0], [1, 2]], [4.0, 8.0], [3, 3], [3, 3], "float32", True)
_test_sparse_dense_matmul(
[[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [5, 5], [5, 5], "float32", True
)
_test_sparse_dense_matmul(
[[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [9, 5], [7, 9], "float32", True
)
#######################################################################
# SparseFillEmptyRows
# ------------
def _test_sparse_fill_empty_rows(indices_np, values_np, dense_shape_np, default_value_int, use_dyn):
with tf.Graph().as_default():
if use_dyn:
indices = tf.placeholder(shape=(None, None), dtype=indices_np.dtype, name="indices")
values = tf.placeholder(shape=(None), dtype=values_np.dtype, name="values")
dense_shape = tf.placeholder(
shape=(None), dtype=dense_shape_np.dtype, name="dense_shape"
)
else:
indices = tf.placeholder(shape=indices_np.shape, dtype=indices_np.dtype, name="indices")
values = tf.placeholder(shape=values_np.shape, dtype=values_np.dtype, name="values")
dense_shape = tf.placeholder(
shape=dense_shape_np.shape, dtype=dense_shape_np.dtype, name="dense_shape"
)
default_value = tf.placeholder(shape=(), dtype=values_np.dtype, name="default_value")
sp_input = tf.sparse.SparseTensor(indices=indices, values=values, dense_shape=dense_shape)
_ = tf.sparse.fill_empty_rows(sp_input, default_value, name="sparse_fill_empty_rows")
compare_tf_with_tvm(
[indices_np, values_np, dense_shape_np, default_value_int],
[indices.name, values.name, dense_shape.name, default_value.name],
[
"sparse_fill_empty_rows/SparseFillEmptyRows:0",
"sparse_fill_empty_rows/SparseFillEmptyRows:1",
"sparse_fill_empty_rows/SparseFillEmptyRows:2",
],
mode="vm",
)
@pytest.mark.parametrize(
"sparse_indices_np, sparse_values_np, dense_shape_np, default_value_int",
[
(
np.array([[1, 1], [0, 3], [0, 1], [2, 0], [3, 1]], dtype=np.int64),
np.array([1, 2, 3, 4, 5], dtype=np.int64),
np.array([5, 6], dtype=np.int64),
10,
),
(
np.array([[1, 1], [0, 3], [2, 0], [3, 1]], dtype=np.int64),
np.array([1, 2, 3, 4], dtype=np.int64),
np.array([5, 6], dtype=np.int64),
10,
),
(
np.array([[0, 1], [0, 3], [2, 0], [3, 1]], dtype=np.int64),
np.array([1, 2, 3, 4], dtype=np.int64),
np.array([5, 6], dtype=np.int64),
10,
),
(
np.array([[1, 1, 1], [1, 3, 1], [2, 0, 5], [3, 1, 6]], dtype=np.int64),
np.array([1, 2, 3, 4], dtype=np.int64),
np.array([7, 7, 7], dtype=np.int64),
5,
),
(
np.array([[1], [2]], dtype=np.int64),
np.array([7, 8], dtype=np.int64),
np.array([5], dtype=np.int64),
4,
),
(
np.ones((0, 1), dtype=np.int64),
np.array([], dtype=np.int64),
np.array([5], dtype=np.int64),
4,
),
(
np.ones((0, 3), dtype=np.int64),
np.array([], dtype=np.int64),
np.array([9, 3, 7], dtype=np.int64),
100,
),
],
)
@pytest.mark.parametrize("use_dyn", [True, False])
def test_forward_sparse_fill_empty_rows(
sparse_indices_np, sparse_values_np, dense_shape_np, default_value_int, use_dyn
):
"""sparse_fill_empty_rows op test"""
###################################################################
#
# In order to create a SparseTensor, it requires 3 input as below:
# SparseTensor(indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
#
# Above Sparse can be represented in Dense as below :
# [[1, 0, 0, 0]
# [0, 0, 2, 0]
# [0, 0, 0, 0]]
#
# ------------------------------------------------------------------
_test_sparse_fill_empty_rows(
sparse_indices_np, sparse_values_np, dense_shape_np, default_value_int, use_dyn
)
#######################################################################
# SparseReshape
# ------------
def _test_sparse_reshape(indices_np, values_np, prev_shape_np, new_shape_np, use_dyn=False):
with tf.Graph().as_default():
if use_dyn:
indices = tf.placeholder(shape=(None, None), dtype=indices_np.dtype, name="indices")
values = tf.placeholder(shape=(None), dtype=values_np.dtype, name="values")
prev_shape = tf.placeholder(shape=(None), dtype=prev_shape_np.dtype, name="prev_shape")
new_shape = tf.placeholder(shape=(None), dtype=new_shape_np.dtype, name="new_shape")
else:
indices = tf.placeholder(shape=indices_np.shape, dtype=indices_np.dtype, name="indices")
values = tf.placeholder(shape=values_np.shape, dtype=values_np.dtype, name="values")
prev_shape = tf.placeholder(
shape=prev_shape_np.shape, dtype=prev_shape_np.dtype, name="prev_shape"
)
new_shape = tf.placeholder(
shape=new_shape_np.shape, dtype=new_shape_np.dtype, name="new_shape"
)
sp_input = tf.sparse.SparseTensor(indices=indices, values=values, dense_shape=prev_shape)
_ = tf.sparse.reshape(sp_input, new_shape, name="sparse_reshape")
compare_tf_with_tvm(
[indices_np, values_np, prev_shape_np, new_shape_np],
[indices.name, values.name, prev_shape.name, new_shape.name],
["sparse_reshape:0", "sparse_reshape:1", "sparse_reshape/Identity:0"],
mode="vm",
)
@pytest.mark.parametrize(
"sparse_indices_np, sparse_values_np, prev_shape_np, new_shape_np",
[
(
np.ones((0, 1), dtype=np.int64),
np.array([], dtype=np.int64),
np.array([4], dtype=np.int64),
np.array([2, -1], dtype=np.int64),
),
(
np.ones((0, 1), dtype=np.int64),
np.array([], dtype=np.int64),
np.array([4], dtype=np.int64),
np.array([2, 2], dtype=np.int64),
),
(
np.ones((0, 2), dtype=np.int64),
np.array([], dtype=np.int64),
np.array([3, 6], dtype=np.int64),
np.array([-1, 2], dtype=np.int64),
),
(
np.array([[0, 0, 0], [0, 0, 1], [0, 1, 0], [1, 0, 0], [1, 2, 3]], dtype=np.int64),
np.array([7, 5, 6, 3, 9], dtype=np.int64),
np.array([2, 3, 6], dtype=np.int64),
np.array([-1, 9], dtype=np.int64),
),
(
np.array(
[
[0, 0, 0, 0, 0],
[0, 0, 1, 2, 3],
[0, 1, 0, 3, 5],
[1, 0, 0, 4, 6],
[1, 2, 3, 6, 8],
],
dtype=np.int64,
),
np.array([7, 5, 6, 3, 9], dtype=np.int64),
np.array([2, 3, 6, 7, 9], dtype=np.int64),
np.array([9, -1, 7], dtype=np.int64),
),
(
np.array([[0, 0], [0, 1], [3, 4], [4, 3], [7, 3]], dtype=np.int64),
np.array([7, 5, 6, 3, 9], dtype=np.int64),
np.array([9, 4], dtype=np.int64),
np.array([-1], dtype=np.int64),
),
(
np.array([[0], [5], [10], [20], [24]], dtype=np.int64),
np.array([7, 5, 6, 3, 9], dtype=np.int64),
np.array([25], dtype=np.int64),
np.array([5, 5], dtype=np.int64),
),
(
np.array([[0, 100], [200, 100], [300, 400], [50, 20], [400, 50]], dtype=np.int64),
np.array([7, 5, 6, 3, 9], dtype=np.int64),
np.array([500, 20], dtype=np.int64),
np.array([500, 20], dtype=np.int64),
),
(
np.array([[0, 100], [200, 100], [300, 400], [50, 20], [400, 50]], dtype=np.int64),
np.array([7, 5, 6, 3, 9], dtype=np.int64),
np.array([500, 20], dtype=np.int64),
np.array([500, -1], dtype=np.int64),
),
(
np.array([[0, 100], [200, 100], [300, 400], [50, 20], [400, 50]], dtype=np.int64),
np.array([7, 5, 6, 3, 9], dtype=np.int64),
np.array([500, 20], dtype=np.int64),
np.array([250, 40], dtype=np.int64),
),
],
)
@pytest.mark.parametrize("use_dyn", [True, False])
def test_forward_sparse_reshape(
sparse_indices_np, sparse_values_np, prev_shape_np, new_shape_np, use_dyn
):
"""sparse_reshape op test"""
###################################################################
#
# In order to create a SparseTensor, it requires 3 input as below:
# SparseTensor(indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
#
# Above Sparse can be represented in Dense as below :
# [[1, 0, 0, 0]
# [0, 0, 2, 0]
# [0, 0, 0, 0]]
#
# ------------------------------------------------------------------
_test_sparse_reshape(sparse_indices_np, sparse_values_np, prev_shape_np, new_shape_np, use_dyn)
#######################################################################
# Sparse Segment Variants
# ------------
def _test_sparse_segment_variant(
tf_op, data_np, indices_np, segment_ids_np, num_segments, use_dyn=False
):
with tf.Graph().as_default():
if use_dyn:
data = tf.placeholder(
shape=[None for _ in data_np.shape], dtype=data_np.dtype, name="data"
)
indices = tf.placeholder(shape=[None], dtype=indices_np.dtype, name="indices")
segment_ids = tf.placeholder(
shape=(None), dtype=segment_ids_np.dtype, name="segment_ids"
)
else:
data = tf.placeholder(shape=data_np.shape, dtype=data_np.dtype, name="data")
indices = tf.placeholder(shape=indices_np.shape, dtype=indices_np.dtype, name="indices")
segment_ids = tf.placeholder(
shape=segment_ids_np.shape, dtype=segment_ids_np.dtype, name="segment_ids"
)
_ = tf_op(
data, indices, segment_ids, num_segments=num_segments, name="sparse_segment_variant"
)
compare_tf_with_tvm(
[data_np, indices_np, segment_ids_np],
[data.name, indices.name, segment_ids.name],
["sparse_segment_variant:0"],
mode="vm",
)
@pytest.mark.parametrize(
"data_np, indices_np, segment_ids_np, num_segments",
[
(
np.array([5, 1, 7, 2, 3, 4], dtype=np.float32),
np.array([0, 3, 4], dtype=np.int32),
np.array([0, 1, 1], dtype=np.int32),
None,
),
(
np.array([[1, 2, 3, 4], [-1, -2, -3, -4], [5, 6, 7, 8]], dtype=np.float64),
np.array([0, 1], dtype=np.int32),
np.array([0, 2], dtype=np.int32),
4,
),
(
np.random.random((6, 4, 5)),
np.array([0, 2, 4, 3, 1], dtype=np.int32),
np.array([0, 0, 1, 5, 5], dtype=np.int32),
100,
),
(
np.random.random((6, 4, 5)),
np.array([0, 2, 4, 3, 1], dtype=np.int32),
np.array([0, 0, 1, 5, 5], dtype=np.int32),
None,
),
(
np.array([[[1, 7]], [[3, 8]], [[2, 9]]], dtype=np.float64),
np.array([0, 1, 2], dtype=np.int32),
np.array([0, 0, 1], dtype=np.int32),
None,
),
(
np.random.random((9, 4, 5, 7)),
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8], dtype=np.int32),
np.array([0, 0, 1, 3, 5, 6, 7, 7, 8], dtype=np.int32),
9,
),
(
np.random.random((9, 4, 5, 7)),
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8], dtype=np.int32),
np.array([0, 0, 1, 3, 5, 6, 7, 7, 8], dtype=np.int32),
None,
),
(
np.array([[1, 2, 3, 4], [-1, -2, -3, -4], [5, 6, 7, 8]], dtype=np.float64),
np.array([0, 1], dtype=np.int32),
np.array([0, 2], dtype=np.int32),
None,
),
(
np.random.random((9, 4, 5, 7)),
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8], dtype=np.int32),
np.array([0, 0, 1, 3, 5, 5, 5, 5, 5], dtype=np.int32),
6,
),
],
)
@pytest.mark.parametrize("use_dyn", [True, False])
@pytest.mark.parametrize(
"tf_op",
[
tf.sparse.segment_sum,
tf.sparse.segment_sqrt_n,
tf.sparse.segment_mean,
],
)
def test_forward_sparse_segment_sum_variants(
tf_op,
data_np,
indices_np,
segment_ids_np,
num_segments,
use_dyn,
):
"""sparse segment sum variants tests"""
_test_sparse_segment_variant(tf_op, data_np, indices_np, segment_ids_np, num_segments, use_dyn)
#######################################################################
# Math SegmentSum
# ------------
def _test_math_segment_sum(data_np, segment_ids_np, use_dyn=False):
with tf.Graph().as_default():
if use_dyn:
data = tf.placeholder(
shape=[None for _ in data_np.shape], dtype=data_np.dtype, name="data"
)
segment_ids = tf.placeholder(
shape=(None), dtype=segment_ids_np.dtype, name="segment_ids"
)
else:
data = tf.placeholder(shape=data_np.shape, dtype=data_np.dtype, name="data")
segment_ids = tf.placeholder(
shape=segment_ids_np.shape, dtype=segment_ids_np.dtype, name="segment_ids"
)
_ = tf.math.segment_sum(data, segment_ids, name="segment_sum")
compare_tf_with_tvm(
[data_np, segment_ids_np],
[data.name, segment_ids.name],
["segment_sum:0"],
mode="vm",
)
@pytest.mark.parametrize(
"data_np, segment_ids_np",
[
(
np.array([5, 1, 7, 2, 3, 4], dtype=np.float32),
np.array([0, 0, 0, 1, 1, 1], dtype=np.int32),
),
(
np.array([[1, 2, 3, 4], [-1, -2, -3, -4], [5, 6, 7, 8]], dtype=np.float64),
np.array([0, 0, 1], dtype=np.int32),
),
(
np.random.random((6, 4, 5)),
np.array([0, 0, 1, 2, 2, 3], dtype=np.int64),
),
(
np.array([[[1, 7]], [[3, 8]], [[2, 9]]], dtype=np.float32),
np.array([0, 0, 1], dtype=np.int32),
),
(
np.random.random((9, 4, 5, 7)),
np.array([0, 0, 0, 1, 2, 3, 4, 4, 5], dtype=np.int64),
),
],
)
@pytest.mark.parametrize("use_dyn", [True, False])
def test_forward_math_segment_sum(data_np, segment_ids_np, use_dyn):
"""math segment sum test"""
_test_math_segment_sum(data_np, segment_ids_np, use_dyn)
# tensorflow.compat.v1.sparse_to_dense
# ---------------
def _test_sparse_to_dense(sparse_indices, sparse_values, default_value, output_shape):
with tf.Graph().as_default():
indices = tf.placeholder(
shape=sparse_indices.shape, dtype=str(sparse_indices.dtype), name="indices"
)
values = tf.placeholder(
shape=sparse_values.shape, dtype=str(sparse_values.dtype), name="values"
)
oshape = tf.constant(output_shape, shape=output_shape.shape, dtype=str(output_shape.dtype))
# Output shape depends on a dynamic input, use VM.
if default_value is None:
output = tf.sparse_to_dense(indices, oshape, values)
compare_tf_with_tvm(
[sparse_indices, sparse_values], ["indices:0", "values:0"], output.name, mode="vm"
)
else:
dv = tf.placeholder(shape=(), dtype=str(default_value.dtype), name="default_value")
output = tf.sparse_to_dense(indices, oshape, values, dv)
compare_tf_with_tvm(
[sparse_indices, sparse_values, default_value],
["indices:0", "values:0", "default_value:0"],
output.name,
mode="vm",
)
def test_forward_sparse_to_dense():
"""Sparse to dense"""
# scalar
_test_sparse_to_dense(
sparse_indices=np.int32(1),
sparse_values=np.int32(3),
default_value=np.int32(0),
output_shape=np.array([5]).astype("int32"),
)
# vector
_test_sparse_to_dense(
sparse_indices=np.array([0, 1, 4]).astype("int32"),
sparse_values=np.array([3, 3, 3]).astype("int32"),
default_value=np.int32(0),
output_shape=np.array([5]).astype("int32"),
)
# vector nXd
_test_sparse_to_dense(
sparse_indices=np.array([[0, 0], [1, 2]]).astype("int32"),
sparse_values=np.array([1, 2]).astype("int32"),
default_value=np.int32(0),
output_shape=np.array([3, 4]).astype("int32"),
)
_test_sparse_to_dense(
sparse_indices=np.array([[0, 0, 0], [1, 2, 3]]).astype("int32"),
sparse_values=np.array([1, 2]).astype("int32"),
default_value=np.int32(4),
output_shape=np.array([2, 3, 4]).astype("int32"),
)
# floats
_test_sparse_to_dense(
sparse_indices=np.array([0, 1, 4]).astype("int32"),
sparse_values=np.array([3.1, 3.1, 3.1]).astype("float32"),
default_value=np.float32(3.5),
output_shape=np.array([5]).astype("int32"),
)
# default value not specified
_test_sparse_to_dense(
sparse_indices=np.array([0, 1, 4]).astype("int32"),
sparse_values=np.array([3.1, 3.1, 3.1]).astype("float32"),
default_value=None,
output_shape=np.array([5]).astype("int32"),
)
#######################################################################
# tensorflow.sparse.to_dense
# ---------------
def _test_sparse_to_dense_v2(indices, values, A_shape, dtype, default_value=None):
with tf.Graph().as_default():
A_sp = tf.sparse.SparseTensor(indices=indices, values=values, dense_shape=A_shape)
result = tf.sparse.to_dense(A_sp, default_value=default_value)
# The output shape depends on a dynamic input, use VM.
compare_tf_with_tvm([], [], result.name, mode="vm")
def test_forward_sparse_to_dense_v2():
_test_sparse_to_dense_v2([[1]], [3.0], [5], "float32")
_test_sparse_to_dense_v2([[1]], [3.0], [5], "float32", 0.3)
_test_sparse_to_dense_v2([[0, 0], [1, 2]], [4.0, 8.0], [3, 4], "float32")
_test_sparse_to_dense_v2([[0, 0], [1, 2]], [4.0, 8.0], [3, 4], "float32", 1.3)
_test_sparse_to_dense_v2([[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [5, 5], "float32")
_test_sparse_to_dense_v2([[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [5, 5], "float32", 1.9)
#######################################################################
# tensorflow.sparse.add
# ----------------------------------
def _test_sparse_add(indices, values, A_shape, B_shape, dtype, flip=False):
"""One iteration of tf.sparse.add"""
# TODO(ANSHUMAN87): support cuda
# TODO(ANSHUMAN87): support both sparse input case
with tf.Graph().as_default():
A_sp = tf.sparse.SparseTensor(
indices=indices, values=np.array(values).astype(dtype), dense_shape=A_shape
)
B = tf.placeholder(shape=B_shape, dtype=dtype, name="B")
# TODO(ANSHUMAN87): support user input threashold values
if flip:
if package_version.parse(tf.VERSION) < package_version.parse("1.13.0"):
result = tf.sparse.add(B, A_sp, thresh=0)
else:
result = tf.sparse.add(B, A_sp, threshold=0)
else:
if package_version.parse(tf.VERSION) < package_version.parse("1.13.0"):
result = tf.sparse.add(A_sp, B, thresh=0)
else:
result = tf.sparse.add(A_sp, B, threshold=0)
B_np = np.random.uniform(high=5.0, size=B_shape).astype(dtype)
compare_tf_with_tvm([B_np], [B.name], result.name, no_gpu=True)
def test_sparse_add():
"""sparse.add op test"""
###################################################################
#
# In order to create a SparseTensor, it requires 3 input as below:
# SparseTensor(indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[3, 4])
#
# Above Sparse can be represented in Dense as below :
# [[1, 0, 0, 0]
# [0, 0, 2, 0]
# [0, 0, 0, 0]]
#
# ------------------------------------------------------------------
for dtype_inp in ["float32", "float64", "int32"]:
_test_sparse_add([[0, 0], [1, 2]], [4.0, 8.0], [3, 4], [3, 4], dtype_inp)
_test_sparse_add([[0, 0], [1, 2]], [4.0, 8.0], [3, 4], [3, 4], dtype_inp, True)
_test_sparse_add([[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [5, 5], [5, 5], dtype_inp)
_test_sparse_add([[0, 0], [1, 3], [4, 3]], [3.0, 6.0, 9.0], [5, 5], [5, 5], dtype_inp, True)
#######################################################################
# StridedSlice
# ------------
def _test_stridedslice(
ip_shape,
begin,
end,
stride,
dtype,
begin_mask=0,
end_mask=0,
new_axis_mask=0,
shrink_axis_mask=0,
ellipsis_mask=0,
):
"""One iteration of a Stridedslice"""
tf.reset_default_graph()
np_data = np.random.uniform(size=ip_shape).astype(dtype)
with tf.Graph().as_default():
if len(ip_shape) == 0: # pylint: disable=len-as-condition
in_data = tf.constant(np_data, dtype)
else:
in_data = tf.placeholder(dtype, ip_shape, name="in_data")
tf.strided_slice(
in_data,
begin,
end,
stride,
begin_mask=begin_mask,
end_mask=end_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask,
ellipsis_mask=ellipsis_mask,
name="strided_slice",
)
if len(ip_shape) == 0: # pylint: disable=len-as-condition
compare_tf_with_tvm(None, "", "strided_slice:0")
else:
compare_tf_with_tvm(np_data, "in_data:0", "strided_slice:0")
def test_forward_stridedslice():
"""test StridedSlice"""
_test_stridedslice([], [0], [0], [1], "float32", new_axis_mask=1)
_test_stridedslice([2], [1], [1], [1], "float32", shrink_axis_mask=1)
_test_stridedslice([4], [-1], [0], [1], "float32", shrink_axis_mask=1)
_test_stridedslice([2, 1], [0], [1], [1], "float32", shrink_axis_mask=1)
_test_stridedslice([2, 3, 4], [-2], [0], [1], "float32", shrink_axis_mask=8)
_test_stridedslice([2, 3, 4], [0], [1], [1], "float32", shrink_axis_mask=8)
_test_stridedslice([3, 4, 3], [1, -1, 0], [4, -5, 3], [2, -1, 1], "float32")
_test_stridedslice([3, 4, 3], [1, 0], [4, 3], [2, 1], "float32", ellipsis_mask=8)
_test_stridedslice([3, 4, 3], [1, 0], [4, 2], [2, 1], "float32", ellipsis_mask=2)
_test_stridedslice([3, 4, 5, 3], [1, 0], [4, 2], [2, 1], "float32", ellipsis_mask=2)
_test_stridedslice([3, 4, 5, 3], [1, 0, 1], [4, 2, 2], [2, 1, 1], "float32", ellipsis_mask=2)
_test_stridedslice([3, 4, 3], [1, 1, 0], [4, 4, 2], [2, 1, 1], "float32", new_axis_mask=5)
_test_stridedslice(
[3, 4, 3], [1, 1, 1], [4, 4, 1], [2, 1, 1], "float32", ellipsis_mask=2, new_axis_mask=4
)
_test_stridedslice(
[6, 4, 5], [1, 1, 1], [6, 3, 4], [2, 1, 1], "float32", ellipsis_mask=2, new_axis_mask=5
)
_test_stridedslice(
[3, 4, 3], [1, 1, 2], [4, 4, 3], [2, 1, 1], "float32", ellipsis_mask=4, new_axis_mask=2
)
_test_stridedslice(
[3, 4, 3], [1, 1, 2], [4, 4, 3], [2, 1, 1], "float32", ellipsis_mask=2, new_axis_mask=3
)
_test_stridedslice(
[3, 4, 3], [1, 1, 0], [4, 4, 1], [2, 1, 1], "float32", ellipsis_mask=2, new_axis_mask=3
)
_test_stridedslice(
[3, 4, 3], [1, 1, 2], [4, 4, 3], [2, 1, 1], "float32", ellipsis_mask=2, new_axis_mask=2
)
_test_stridedslice((3, 4), [1, 0], [4, 4], [1, 1], "float32", shrink_axis_mask=2)
_test_stridedslice(
[3, 4, 3], [1, 1, 0], [4, 4, 3], [2, 1, 1], "float32", shrink_axis_mask=2, new_axis_mask=2
)
_test_stridedslice(
[3, 4, 3], [1, 1, 0], [4, 4, 3], [2, 1, 1], "float32", shrink_axis_mask=1, new_axis_mask=2
)
_test_stridedslice(
[3, 4, 3], [1, 1, 0], [4, 4, 3], [2, 1, 1], "float32", shrink_axis_mask=2, new_axis_mask=1
)
_test_stridedslice(
[3, 4, 5, 4, 5, 6], [0, 0], [2, 3], [1, 1], "float32", shrink_axis_mask=5, new_axis_mask=1
)
_test_stridedslice(
[3, 4, 5, 4, 5, 6],
[0, 0, 1, 2, 1],
[2, 3, 4, 5, 3],
[1, 1, 2, 2, 1],
"float32",
shrink_axis_mask=5,
new_axis_mask=1,
ellipsis_mask=2,
begin_mask=8,
end_mask=8,
)
_test_stridedslice(
[3, 4, 5, 4, 5, 6],
[0, 0, 1, 2, 1],
[2, 3, 4, 5, 3],
[1, 1, 2, 2, 1],
"float32",
shrink_axis_mask=8,
new_axis_mask=1,
ellipsis_mask=2,
begin_mask=5,
end_mask=5,
)
_test_stridedslice(
[3, 4, 5, 4, 5, 6],
[0, 0, 1, 2, 1],
[2, 3, 4, 5, 3],
[1, 1, 2, 2, 1],
"float32",
shrink_axis_mask=16,
new_axis_mask=1,
ellipsis_mask=2,
begin_mask=5,
end_mask=5,
)
_test_stridedslice(
[3, 4, 5, 4, 5, 6],
[1, 2, 0, -3],
[4, 5, 3, 3],
[2, 2, 1, 1],
"float32",
shrink_axis_mask=8,
new_axis_mask=1,
ellipsis_mask=2,
begin_mask=5,
end_mask=8,
)
_test_stridedslice(
[1, 13, 13, 3, 2],
[0, 0],
[1, 1],
[1, -1],
"float32",
ellipsis_mask=1,
begin_mask=2,
end_mask=2,
)
#######################################################################
# FloorDiv, RealDiv
# -----------------
def _test_forward_divide(ip_shape, dtype):
np_numer = np.random.uniform(-100, 100, size=ip_shape).astype(dtype)
np_denomin = np.random.uniform(1, 100, size=ip_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
numerator = tf.placeholder(dtype, ip_shape, name="numer")
denominator = tf.placeholder(dtype, ip_shape, name="denomin")
tf.math.divide(numerator, denominator, name="RealDiv")
compare_tf_with_tvm([np_numer, np_denomin], ["numer:0", "denomin:0"], "RealDiv:0")
def _test_forward_floordiv(ip_shape, dtype):
np_numer = np.random.uniform(1, 100, size=ip_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
numerator = tf.placeholder(dtype, ip_shape, name="numer")
tf.math.floordiv(numerator, tf.constant(5, dtype=dtype), name="FloorDiv")
compare_tf_with_tvm([np_numer], ["numer:0"], "FloorDiv:0")
def test_forward_divide():
"""test FloorDiv, RealDiv"""
_test_forward_divide((4,), "int32")
_test_forward_divide((4, 3, 7), "float32")
_test_forward_floordiv((4, 3, 7), "float32")
_test_forward_floordiv((4, 3, 7), "int32")
#######################################################################
# FloorMod
# --------
def _test_forward_floormod(in_shape, if_shape, dtype):
np_numer = np.random.uniform(1, 100, size=in_shape).astype(dtype)
np_factor = np.random.uniform(1, 100, size=if_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
numerator = tf.placeholder(dtype, in_shape, name="numer")
factor = tf.placeholder(dtype, if_shape, name="factor")
tf.floormod(numerator, factor, name="FloorMod")
compare_tf_with_tvm([np_numer, np_factor], ["numer:0", "factor:0"], "FloorMod:0")
def test_forward_floormod():
"""test FloorMod"""
_test_forward_floormod((10,), (10,), "float32")
_test_forward_floormod((8, 2), (1,), "float32")
_test_forward_floormod((4, 3, 7), (4, 3, 7), "float32")
_test_forward_floormod((4, 3, 7), (4, 3, 7), "int32")
#######################################################################
# TruncateMod
# -----------
def _test_forward_truncatemod(ip_shape, dtype):
np_data_1 = np.random.uniform(-100, 100, size=ip_shape).astype(dtype)
np_data_2 = np.random.uniform(1, 10, size=ip_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data_1 = tf.placeholder(dtype, ip_shape, name="in_data_1")
in_data_2 = tf.placeholder(dtype, ip_shape, name="in_data_2")
tf.truncatemod(in_data_1, in_data_2, name="truncatemod")
compare_tf_with_tvm([np_data_1, np_data_2], ["in_data_1:0", "in_data_2:0"], "truncatemod:0")
def test_forward_truncatemod():
"""test TruncateMod"""
_test_forward_truncatemod((4, 3, 7), "int32")
#######################################################################
# Gather, GatherV2
# --------------------------
def _test_gather(ip_shape, indice_shape, indice_value, axis, batch_dims, dtype):
"""One iteration of a GatherV2"""
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, ip_shape, name="in_data")
indices = tf.placeholder("int32", indice_shape, name="indices")
out = tf.gather(in_data, indices, axis=axis, batch_dims=batch_dims)
np_data = np.random.uniform(1, 10, size=ip_shape).astype(dtype)
def _fill_indices(indice_value):
indices = np.array(ip_shape, dtype=dtype)
if isinstance(indice_value, int):
indices = np.array([indice_value], dtype="int32")
else:
indices = np.asarray(indice_value, dtype="int32")
return indices
np_indices = _fill_indices(indice_value)
compare_tf_with_tvm([np_data, np_indices], ["in_data:0", "indices:0"], out.name)
def test_forward_gather():
"""test Gather/GatherV2 layer"""
_test_gather((4,), (1,), 1, 0, 1, "int32")
_test_gather((4,), (1,), 1, 0, 0, "float32")
_test_gather((1, 4), (1,), [0], 0, 0, "int32")
_test_gather((4,), (1, 2, 2), [[[1, 0], [0, 1]]], 0, 0, "float32")
_test_gather((2, 2), (1, 2, 2), [[[1, 0], [0, 1]]], 0, 0, "int32")
_test_gather((2, 2), (1, 2, 2), [[[1, 0], [0, 1]]], 1, 0, "int32")
_test_gather((2, 2), (1, 2, 2), [[[1, 0], [0, 1]]], 0, 0, "float32")
_test_gather((3, 3, 3), (1, 1, 2), [[[1, 0]]], 0, 0, "int32")
_test_gather((3, 3, 3), (1, 1, 2), [[[1, 0]]], 2, 0, "int32")
_test_gather((4, 3, 5, 6), (1, 4), [[2, 1, 0, 0]], 0, 0, "float32")
_test_gather((2, 2), (2, 2), [[0, 0], [0, 0]], 1, 1, "float32")
_test_gather(
(2, 2, 3, 6), (2, 2, 3), [[[1, 1, 0], [0, 0, 1]], [[0, 1, 0], [1, 0, 1]]], 2, 2, "float32"
)
_test_gather(
(2, 2, 3, 6), (2, 2, 3), [[[1, 1, 0], [0, 0, 1]], [[0, 1, 0], [1, 0, 1]]], 3, 1, "float32"
)
_test_gather(
(2, 2, 3, 6), (2, 2, 3), [[[1, 1, 0], [0, 0, 1]], [[0, 1, 0], [1, 0, 1]]], 3, 2, "float32"
)
_test_gather(
(2, 2, 3, 6), (2, 2, 3), [[[1, 1, 0], [0, 0, 1]], [[0, 1, 0], [1, 0, 1]]], 3, 0, "float32"
)
#######################################################################
# GatherND
# --------------------------
def _test_gather_nd(ip_shape, indice_value, dtype):
"""test operator GatherNd"""
np_data = np.random.uniform(1, 100, size=ip_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, ip_shape, name="in_data")
tf.gather_nd(in_data, indices=indice_value, name="gather_nd")
compare_tf_with_tvm([np_data], ["in_data:0"], "gather_nd:0")
def test_forward_gather_nd():
"""test operator GatherNd"""
_test_gather_nd((2, 2), [[0, 0], [1, 1]], "float32")
_test_gather_nd((2, 2, 2), [[1, 0, 0], [0, 0, 0]], "float32")
_test_gather_nd((4,), [1], "float32")
_test_gather_nd((4,), [1], "int32")
_test_gather_nd((1, 4), [0, 3], "int32")
_test_gather_nd((2, 2), [[[1, 0], [0, 1]]], "int32")
_test_gather_nd((2, 2), [[[1, 0], [0, 1]]], "float32")
_test_gather_nd((3, 3, 3), [[[1, 0]]], "int32")
_test_gather_nd((3, 3, 3), [[[1, 0]]], "int32")
_test_gather_nd((4, 3, 5, 6), [[2, 1, 0, 0]], "float32")
_test_gather_nd((3, 3, 3), [[[2, 1]]], "int32")
#######################################################################
# BiasAdd
# -------
def test_forward_bias_add():
"""test Op BiasAdd"""
def check_bias_add(lh_shpae, rh_shape, dtype):
tf.reset_default_graph()
lh_data = np.random.uniform(size=lh_shpae).astype(dtype)
rh_data = np.random.uniform(size=rh_shape).astype(dtype)
with tf.Graph().as_default():
lft_data = tf.placeholder(dtype, name="lft_data")
rgt_data = tf.placeholder(dtype, name="rgt_data")
tf.nn.bias_add(lft_data, rgt_data, name="BiasAdd")
compare_tf_with_tvm([lh_data, rh_data], ["lft_data:0", "rgt_data:0"], "BiasAdd:0")
check_bias_add((10, 8, 16, 32), (32,), dtype="int32")
check_bias_add((10, 20), (20,), dtype="float32")
#######################################################################
# Split
# -----
def _test_split(in_shape, axis, num_or_size_splits, dtype):
"""One iteration of a Split"""
np_data = np.random.uniform(-5, 5, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
_ = len(num_or_size_splits) if isinstance(num_or_size_splits, list) else num_or_size_splits
split = tf.split(in_data, num_or_size_splits, axis=axis)
relu = [tf.nn.relu(i) for i in split]
compare_tf_with_tvm([np_data], ["in_data:0"], [n.name for n in relu])
# and now test together with concat
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
splitted = tf.split(in_data, num_or_size_splits, axis=axis)
concat = tf.concat(splitted, axis)
compare_tf_with_tvm([np_data], "in_data:0", concat.name)
def test_forward_split():
"""test split layer"""
# rank 1
_test_split((3,), 0, 1, "float32")
_test_split((3,), 0, 3, "float32")
_test_split((6,), 0, 3, "float32")
# rank 2
_test_split((6, 2), 0, 3, "float32")
_test_split((2, 6), 1, 6, "float32")
# rank 3
_test_split((6, 2, 4), 0, 2, "int32")
_test_split((2, 6, 4), 1, 3, "float32")
_test_split((2, 4, 6), 2, 1, "float32")
# rank 4
_test_split((6, 1, 3, 5), 0, 3, "float32")
_test_split((1, 6, 3, 5), 1, 3, "float32")
_test_split((1, 3, 6, 5), 2, 3, "float32")
_test_split((1, 3, 5, 6), 3, 3, "float32")
# split along negative axis
_test_split((6, 1, 3, 5), -4, 3, "float32")
_test_split((1, 6, 3, 5), -3, 3, "float32")
_test_split((1, 3, 6, 5), -2, 3, "float32")
_test_split((1, 3, 5, 6), -1, 3, "float32")
# size_splits list
_test_split((6,), 0, [1, 2, 3], "int32")
_test_split((3, 6, 4), -2, [1, 4, 1], "float32")
######################################################################
# TopKV2
# ------
def _test_forward_top_k_v2(in_shape, k):
np_data = np.random.uniform(-100, 100, size=in_shape).astype("float32")
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder("float32", in_shape, name="in_data")
tf.math.top_k(in_data, k, name="TopK")
compare_tf_with_tvm([np_data], ["in_data:0"], "TopK:0")
def test_forward_top_k_v2():
_test_forward_top_k_v2((3,), 1)
_test_forward_top_k_v2((3,), 3)
_test_forward_top_k_v2((3, 5, 7), 3)
_test_forward_top_k_v2((3, 5, 7), 3)
#######################################################################
# Unstack
# -------
def _test_unstack(ip_shape, axis, dtype):
np_data = np.random.uniform(-5, 5, size=ip_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, ip_shape, name="in_data")
unstack = tf.unstack(in_data, axis=axis)
compare_tf_with_tvm([np_data], ["in_data:0"], [n.name for n in unstack])
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, ip_shape, name="in_data")
tf.stack(tf.unstack(in_data, axis=axis), axis=axis)
compare_tf_with_tvm([np_data], ["in_data:0"], "stack:0")
def test_forward_unstack():
"""test unstack layer"""
_test_unstack((6,), 0, "int32")
_test_unstack((2, 6), 1, "float64")
# negative axis
_test_unstack((1, 4), -1, "int32")
_test_unstack((3, 6, 4), -2, "float32")
#######################################################################
# Tile
# ----
def _test_tile(in_shape, multiples, dtype):
np_data = np.random.uniform(-5, 5, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
tf.tile(in_data, multiples=multiples, name="tile")
compare_tf_with_tvm([np_data], ["in_data:0"], "tile:0")
def test_forward_tile():
"""test Tile"""
_test_tile((2,), (3,), "int32")
_test_tile((2, 2), (2, 3), "float32")
_test_tile((2, 4, 6), (6, 7, 8), "float64")
#######################################################################
# ClipByValue
# -----------
def _test_forward_clip_by_value(ip_shape, clip_value_min, clip_value_max, dtype):
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, ip_shape, name="in_data")
tf.clip_by_value(in_data, clip_value_min, clip_value_max, name="ClipByValue")
np_data = np.random.uniform(-100, 100, size=ip_shape).astype(dtype)
compare_tf_with_tvm([np_data], ["in_data:0"], "ClipByValue:0")
def test_forward_clip_by_value():
"""test ClipByValue op"""
if tf.__version__ < LooseVersion("1.9"):
_test_forward_clip_by_value((4,), 0.1, 5.0, "float32")
_test_forward_clip_by_value((4, 4), 1, 5, "int32")
#######################################################################
# Multi Input to graph
# --------------------
def test_forward_multi_input():
"""Multi Input"""
with tf.Graph().as_default():
in1 = tf.placeholder(tf.int32, shape=[3, 3], name="in1")
in2 = tf.placeholder(tf.int32, shape=[3, 3], name="in2")
in3 = tf.placeholder(tf.int32, shape=[3, 3], name="in3")
in4 = tf.placeholder(tf.int32, shape=[3, 3], name="in4")
out1 = tf.add(in1, in2, name="out1")
out2 = tf.subtract(in3, in4, name="out2")
_ = tf.multiply(out1, out2, name="out")
in_data = np.arange(9, dtype="int32").reshape([3, 3])
compare_tf_with_tvm(
[in_data, in_data, in_data, in_data], ["in1:0", "in2:0", "in3:0", "in4:0"], "out:0"
)
#######################################################################
# Multi Output to Graph
# ---------------------
def test_forward_multi_output():
"""Multi Output"""
with tf.Graph().as_default():
in1 = tf.placeholder(tf.int32, shape=[3, 3], name="in1")
in2 = tf.placeholder(tf.int32, shape=[3, 3], name="in2")
in3 = tf.placeholder(tf.int32, shape=[3, 3], name="in3")
in4 = tf.placeholder(tf.int32, shape=[3, 3], name="in4")
_ = tf.add(in1, in2, name="out1")
_ = tf.subtract(in3, in4, name="out2")
in_data = np.arange(9, dtype="int32").reshape([3, 3])
in_data = [in_data] * 4
in_name = ["in1:0", "in2:0", "in3:0", "in4:0"]
out_name = ["out1:0", "out2:0"]
out_node = [out.strip(":0") for out in out_name]
in_node = [inp.strip(":0") for inp in in_name]
with tf.Session() as sess:
final_graph_def = tf.graph_util.convert_variables_to_constants(
sess,
sess.graph.as_graph_def(add_shapes=True),
out_node,
)
tf_output = run_tf_graph(sess, in_data, in_name, out_name)
tvm_output = run_tvm_graph(
final_graph_def, in_data, in_node, target="llvm", out_names=out_node, num_output=2
)
for i, tf_out in enumerate(tf_output):
tvm.testing.assert_allclose(tf_out, tvm_output[i], atol=1e-5, rtol=1e-5)
#######################################################################
# Resize Bilinear, Nearest_Neighbor
# ---------------------------------
def _test_resize_bilinear(in_shape, to_shape, align_corners):
"""One iteration of resize bilinear"""
data = np.random.uniform(size=in_shape).astype("float32")
shape_data = np.array(to_shape).astype("int32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
shape_data = constant_op.constant(
shape_data, shape=shape_data.shape, dtype=shape_data.dtype
)
tf.image.resize_bilinear(in_data, shape_data, align_corners=align_corners)
compare_tf_with_tvm(data, "Placeholder:0", "ResizeBilinear:0")
def _test_resize_bilinear_from_tensor(in_shape, align_corners):
"""One iteration of resize bilinear with non-constant output shape, requires
value inference to get proper output shape."""
data = np.random.uniform(size=in_shape).astype("float32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(
shape=[in_shape[0], None, None, in_shape[3]], dtype=data.dtype
)
to_shape = tf.shape(in_data)[1:3]
tf.image.resize_bilinear(in_data, to_shape, align_corners=align_corners)
compare_tf_with_tvm(data, "Placeholder:0", "ResizeBilinear:0")
def _test_resize_nearest_neighbor(in_shape, to_shape):
"""One iteration of resize nearest neighbor"""
data = np.random.uniform(size=in_shape).astype("float32")
shape_data = np.array(to_shape).astype("int32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
shape_data = constant_op.constant(
shape_data, shape=shape_data.shape, dtype=shape_data.dtype
)
tf.image.resize_nearest_neighbor(in_data, shape_data, name="resize_nearest_neighbor")
compare_tf_with_tvm(data, "Placeholder:0", "resize_nearest_neighbor:0")
def _test_resize_nearest_neighbor_dynamic_shape(in_shape, scale):
"""One iteration of resize nearest neighbor for graph with dynamic input shape"""
data = np.random.uniform(size=in_shape).astype("float32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=None, dtype=data.dtype)
# multiply input shape by scale factor
new_shape = tf.shape(in_data)[1:3] * tf.constant(scale, dtype=tf.int32)
tf.image.resize_nearest_neighbor(in_data, new_shape, name="resize_nearest_neighbor")
compare_tf_with_tvm(data, "Placeholder:0", "resize_nearest_neighbor:0")
def test_forward_resize():
"""Resize Bilinear, Nearest_Neighbor"""
# TF default layout is NHWC
_test_resize_bilinear((4, 32, 32, 3), [50, 50], False)
_test_resize_bilinear((6, 32, 32, 3), [20, 20], True)
_test_resize_bilinear_from_tensor((4, 32, 32, 3), False)
_test_resize_bilinear_from_tensor((6, 50, 50, 3), True)
_test_resize_nearest_neighbor((6, 32, 32, 3), [20, 20])
_test_resize_nearest_neighbor_dynamic_shape((1, 16, 16, 3), scale=[2, 2])
#######################################################################
# BroadcastArgs
# -----------
def _test_broadcast_args(in_shape_1, in_shape_2):
"""One iteration of broadcast_args"""
shape_1 = np.array(in_shape_1).astype("int32")
shape_2 = np.array(in_shape_2).astype("int32")
with tf.Graph().as_default():
shape_1 = constant_op.constant(shape_1, shape=shape_1.shape, dtype=shape_1.dtype)
shape_2 = constant_op.constant(shape_2, shape=shape_2.shape, dtype=shape_2.dtype)
tf.raw_ops.BroadcastArgs(s0=shape_1, s1=shape_2)
compare_tf_with_tvm(None, "", "BroadcastArgs:0", opt_level=0)
def test_forward_broadcast_args():
"""Resize Bilinear"""
_test_broadcast_args((4, 1, 32, 32), [4, 8, 32, 32])
_test_broadcast_args((6, 32, 32, 1), [6, 32, 32, 16])
_test_broadcast_args((32, 32, 16), [6, 32, 32, 16])
#######################################################################
# BroadcastTo
# -----------
def _test_broadcast_to(in_shape, to_shape):
"""One iteration of broadcast_to"""
data = np.random.uniform(size=in_shape).astype("float32")
shape_data = np.array(to_shape).astype("int32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
shape_data = constant_op.constant(
shape_data, shape=shape_data.shape, dtype=shape_data.dtype
)
tf.broadcast_to(in_data, shape_data)
compare_tf_with_tvm(data, "Placeholder:0", "BroadcastTo:0", opt_level=0)
def _test_broadcast_to_from_tensor(in_shape):
"""One iteration of broadcast_to with unknown shape at graph build"""
data = np.random.uniform(size=in_shape).astype("float32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=[None], dtype=data.dtype)
shape_data = tf.multiply(tf.shape(in_data), 32)
tf.broadcast_to(in_data, shape_data)
compare_tf_with_tvm(data, "Placeholder:0", "BroadcastTo:0")
def test_forward_broadcast_to():
"""Resize Bilinear"""
_test_broadcast_to((4, 1, 32, 32), [4, 8, 32, 32])
_test_broadcast_to((6, 32, 32, 1), [6, 32, 32, 16])
_test_broadcast_to_from_tensor((1))
#######################################################################
# Fill
# ----
def _test_fill(in_shape):
"""Use the fill op to create a tensor of ones with non-constant shape."""
with tf.Graph().as_default():
tf.ones(shape=in_shape, dtype="float32")
compare_tf_with_tvm(in_shape, [], "ones:0", opt_level=1)
def _test_fill_from_tensor(in_shape):
"""Use the fill op to create a tensor of ones with non-constant shape.
Some extra ops need to be added here to prevent the graph from
being fully constant and folded away."""
data = np.random.uniform(size=in_shape).astype("float32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(
shape=[in_shape[0], in_shape[1], None, None], dtype=data.dtype
)
x = tf.ones(shape=2 * tf.shape(in_data), dtype=data.dtype)
_ = tf.math.add(in_data, tf.reduce_mean(x), name="out1")
compare_tf_with_tvm(data, "Placeholder:0", "out1:0")
def _test_fill_symbolic_inputs(in_shape_data, in_value_data, dtype):
with tf.Graph().as_default():
in_shape = tf.placeholder(shape=[in_shape_data.shape[0]], dtype=in_shape_data.dtype)
in_value = tf.placeholder(shape=(), dtype=dtype)
out = tf.fill(in_shape, in_value)
for mode in ["debug", "vm"]:
compare_tf_with_tvm(
[in_shape_data, in_value_data], [in_shape.name, in_value.name], out.name, mode=mode
)
def test_forward_fill():
"""Resize Bilinear"""
_test_fill((32))
_test_fill((6, 32, 64, 64))
_test_fill_from_tensor((6, 32, 64, 64))
_test_fill_symbolic_inputs(np.array((2,)), np.int32(9), tf.int32)
_test_fill_symbolic_inputs(np.array((2, 3)), 9, tf.int64)
_test_fill_symbolic_inputs(np.array((2, 3, 4)), np.float32(9.0), tf.float32)
#######################################################################
# Crop to bounding box
# --------------------
def _test_crop(in_shape, off_h, off_w, tar_h, tar_w):
"""Crop to bounding box"""
data = np.random.uniform(size=in_shape).astype("float32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
tf.image.crop_to_bounding_box(in_data, off_h, off_w, tar_h, tar_w)
compare_tf_with_tvm(data, "Placeholder:0", "crop_to_bounding_box/Slice:0")
def test_forward_crop():
"""Crop to bounding box"""
_test_crop((1, 224, 224, 3), 20, 20, 120, 120)
#######################################################################
# CropAndResize
# -------------
def _test_forward_crop_and_resize(
img_shape,
boxes,
box_idx,
crop_size,
extrapolation_value=0.0,
method="bilinear",
dtype="float32",
atol=1e-4,
rtol=1e-4,
):
image = np.random.uniform(0, 10, size=img_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = array_ops.placeholder(dtype, image.shape, name="in_data")
tf.image.crop_and_resize(
in_data,
boxes=boxes,
box_ind=box_idx,
crop_size=crop_size,
method=method,
extrapolation_value=extrapolation_value,
name="crop_and_resize",
)
compare_tf_with_tvm([image], ["in_data:0"], "crop_and_resize:0", atol=atol, rtol=rtol)
def test_forward_crop_and_resize():
"""CropAndResize"""
_test_forward_crop_and_resize([1, 6, 6, 3], [[0, 0, 1, 1]], [0], [3, 3])
_test_forward_crop_and_resize([1, 6, 6, 3], [[0, 0, 1, 1]], [0], [3, 3], 0.2)
_test_forward_crop_and_resize([1, 6, 6, 3], [[0, 0, 1, 1]], [0], [3, 3], 0.2, "nearest")
_test_forward_crop_and_resize([1, 11, 11, 3], [[0.3, 0.3, 1, 1]], [0], [21, 21])
_test_forward_crop_and_resize([1, 41, 41, 3], [[0.2, 0.4, 0.8, 0.8]], [0], [21, 11])
_test_forward_crop_and_resize([1, 100, 100, 3], [[0, 0, 0.9, 0.9]], [0], [30, 30])
_test_forward_crop_and_resize([1, 249, 249, 3], [[0, 0, 1, 1]], [0], [9, 9])
_test_forward_crop_and_resize([1, 201, 301, 3], [[0.2, 0.3, 0.7, 0.8]], [0], [51, 51])
_test_forward_crop_and_resize(
img_shape=[10, 11, 11, 3],
boxes=[[0, 0, 0.9, 0.9], [0.2, 0.2, 0.8, 0.8]],
box_idx=[0, 1],
crop_size=[5, 5],
)
if platform.machine() == "aarch64":
pytest.skip("Currently failing on AArch64")
_test_forward_crop_and_resize([1, 224, 224, 3], [[0.1, 0.2, 1, 1]], [0], [9, 9])
_test_forward_crop_and_resize(
img_shape=[20, 576, 576, 3],
boxes=[[0, 0, 1, 1], [0, 0, 0.8, 0.8], [0.1, 0.2, 0.9, 1], [0.2, 0, 1, 1]],
box_idx=[1, 0, 2, 3],
crop_size=[24, 24],
extrapolation_value=0.3,
atol=1e-3,
rtol=1e-3,
)
_test_forward_crop_and_resize(
img_shape=[20, 229, 229, 3],
boxes=[[0, 0, 0.9, 0.9], [0.3, 0.3, 1, 1], [0.2, 0.1, 0.7, 0.8], [0, 0, 1, 1]],
box_idx=[3, 0, 2, 1],
crop_size=[58, 58],
extrapolation_value=0.2,
method="nearest",
atol=1e-3,
rtol=1e-3,
)
#######################################################################
# Non Max Suppression
# -------------------
def _test_forward_nms_v3(
bx_shape, score_shape, iou_threshold, score_threshold, out_size, dtype="float32"
):
boxes = np.random.uniform(0, 10, size=bx_shape).astype(dtype)
scores = np.random.uniform(size=score_shape).astype(dtype)
max_output_size = np.int32(out_size)
tf.reset_default_graph()
in_data_1 = tf.placeholder(dtype, boxes.shape, name="in_data_1")
in_data_2 = tf.placeholder(dtype, scores.shape, name="in_data_2")
in_data_3 = tf.placeholder(tf.int32, name="in_data_3")
tf.image.non_max_suppression(
boxes=in_data_1,
scores=in_data_2,
max_output_size=in_data_3,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
name="nms",
)
compare_tf_with_tvm(
[boxes, scores, max_output_size],
["in_data_1:0", "in_data_2:0", "in_data_3:0"],
"nms/NonMaxSuppressionV3:0",
mode="vm",
)
compare_tf_with_tvm(
[boxes, scores, max_output_size],
["in_data_1:0", "in_data_2:0", "in_data_3:0"],
"nms/NonMaxSuppressionV3:0",
mode="debug",
)
def _test_forward_nms_v4(
bx_shape, score_shape, iou_threshold, score_threshold, out_size, dtype="float32"
):
boxes = np.random.uniform(0, 10, size=bx_shape).astype(dtype)
scores = np.random.uniform(size=score_shape).astype(dtype)
max_output_size = np.int32(out_size)
tf.reset_default_graph()
in_data_1 = tf.placeholder(dtype, boxes.shape, name="in_data_1")
in_data_2 = tf.placeholder(dtype, scores.shape, name="in_data_2")
in_data_3 = tf.placeholder(tf.int32, name="in_data_3")
indices_padded, num_valid = tf.image.non_max_suppression_padded(
boxes=in_data_1,
scores=in_data_2,
max_output_size=in_data_3,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
name="nms",
pad_to_max_output_size=True,
)
num_valid = tf.reshape(num_valid, shape=(-1,))
indices_padded = tf.reshape(indices_padded, shape=(-1,))
tf.slice(indices_padded, tf.constant([0]), num_valid, name="SlicedIndices")
compare_tf_with_tvm(
[boxes, scores, max_output_size],
["in_data_1:0", "in_data_2:0", "in_data_3:0"],
["nms/NonMaxSuppressionV4:1", "SlicedIndices:0"],
mode="vm",
)
compare_tf_with_tvm(
[boxes, scores, max_output_size],
["in_data_1:0", "in_data_2:0", "in_data_3:0"],
["nms/NonMaxSuppressionV4:1", "SlicedIndices:0"],
mode="debug",
)
def _test_forward_nms_v5(
bx_shape, score_shape, iou_threshold, score_threshold, out_size, dtype="float32"
):
boxes = np.random.uniform(0, 10, size=bx_shape).astype(dtype)
scores = np.random.uniform(size=score_shape).astype(dtype)
max_output_size = np.int32(out_size)
tf.reset_default_graph()
in_data_1 = tf.placeholder(dtype, boxes.shape, name="in_data_1")
in_data_2 = tf.placeholder(dtype, scores.shape, name="in_data_2")
in_data_3 = tf.placeholder(tf.int32, name="in_data_3")
tf.image.non_max_suppression_with_scores(
boxes=in_data_1,
scores=in_data_2,
max_output_size=in_data_3,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
name="nms",
)
compare_tf_with_tvm(
[boxes, scores, max_output_size],
["in_data_1:0", "in_data_2:0", "in_data_3:0"],
["nms/NonMaxSuppressionV5:0", "nms/NonMaxSuppressionV5:1"],
mode="vm",
)
def test_forward_nms():
"""NonMaxSuppressionV3,5"""
for _test_forward_nms in [_test_forward_nms_v3, _test_forward_nms_v5]:
_test_forward_nms((5, 4), (5,), 0.7, 0.5, 5)
_test_forward_nms((20, 4), (20,), 0.5, 0.6, 10)
_test_forward_nms((1000, 4), (1000,), 0.3, 0.7, 1000)
_test_forward_nms((2000, 4), (2000,), 0.4, 0.6, 7)
def _test_forward_combined_nms(
bx_shape,
score_shape,
iou_threshold,
score_threshold,
out_size,
total_size,
clip_boxes=False,
dtype="float32",
):
def get_random_scores(size, dtype):
size1d = np.prod(size)
scores = np.linspace(0, 1, num=size1d)
np.random.shuffle(scores)
return scores.reshape(size).astype(dtype)
boxes = np.random.uniform(-1, 2, size=bx_shape).astype(dtype)
scores = get_random_scores(score_shape, dtype)
max_output_size = np.int32(out_size)
tf.reset_default_graph()
in_data_1 = tf.placeholder(dtype, boxes.shape, name="in_data_1")
in_data_2 = tf.placeholder(dtype, scores.shape, name="in_data_2")
in_data_3 = tf.placeholder(tf.int32, name="in_data_3")
tf.image.combined_non_max_suppression(
boxes=in_data_1,
scores=in_data_2,
max_output_size_per_class=in_data_3,
max_total_size=total_size,
iou_threshold=iou_threshold,
score_threshold=score_threshold,
pad_per_class=False,
clip_boxes=clip_boxes,
name="nms",
)
compare_tf_with_tvm(
[boxes, scores, max_output_size],
["in_data_1:0", "in_data_2:0", "in_data_3:0"],
[
"nms/CombinedNonMaxSuppression:0",
"nms/CombinedNonMaxSuppression:1",
"nms/CombinedNonMaxSuppression:2",
"nms/CombinedNonMaxSuppression:3",
],
)
def test_forward_combined_nms():
"""CombinedNonMaxSuppression"""
_test_forward_combined_nms((1, 64, 1, 4), (1, 64, 1), 0.7, 0.5, 64, 64)
_test_forward_combined_nms((1, 32, 1, 4), (1, 32, 1), 0.7, 0.5, 10, 64)
_test_forward_combined_nms((1, 32, 1, 4), (1, 32, 2), 0.7, 0.5, 32, 64)
_test_forward_combined_nms((1, 64, 1, 4), (1, 64, 20), 0.7, 0.5, 64, 10)
# This workload seems flaky on CI.
# See https://github.com/apache/tvm/issues/8140
# _test_forward_combined_nms((1, 64, 20, 4), (1, 64, 20), 0.7, 0.5, 64, 64, clip_boxes=True)
_test_forward_combined_nms((2, 200, 1, 4), (2, 200, 1), 0.4, 0.6, 100, 100)
_test_forward_combined_nms((2, 200, 1, 4), (2, 200, 10), 0.4, 0.2, 150, 1000)
#######################################################################
# LSTM
# ----
def _test_lstm_cell(batch_size, num_hidden, num_layers, forget_bias, dtype):
"""One iteration of a LSTM cell"""
tf.reset_default_graph()
input_size = num_hidden
input_data = np.full((batch_size, input_size), 1.0, dtype=dtype)
in_state_c = np.full((batch_size, num_hidden), 0.1, dtype=dtype)
in_state_h = np.full((batch_size, num_hidden), 0.1, dtype=dtype)
def _get_tensorflow_output():
with tf.Session() as sess:
with variable_scope.variable_scope(
"root", initializer=init_ops.constant_initializer(0.5)
):
m0 = tf.placeholder(dtype, [batch_size, num_hidden], name="m0")
m1 = tf.placeholder(dtype, [batch_size, num_hidden], name="m1")
x = tf.placeholder(shape=(batch_size, input_size), dtype=dtype, name="input")
g, ((out_m0, out_m1)) = tensorflow.contrib.rnn.LSTMBlockCell(
num_hidden, forget_bias=forget_bias
)(x, (m0, m1))
sess.run([variables.global_variables_initializer()])
res = sess.run(
[g, out_m0, out_m1],
{
x.name: np.array([[1.0, 1.0]]),
m0.name: in_state_c,
m1.name: in_state_h,
},
)
graph_def = sess.graph.as_graph_def(add_shapes=True)
final_graph_def = graph_util.convert_variables_to_constants(
sess, graph_def, ["root/lstm_cell/LSTMBlockCell"]
)
return final_graph_def, res
graph_def, tf_out = _get_tensorflow_output()
tvm_output = run_tvm_graph(
graph_def,
[input_data, in_state_c, in_state_h],
["root/input", "root/m0", "root/m1"],
num_output=7,
)
assert isinstance(tvm_output, list)
tvm.testing.assert_allclose(tf_out[0], tvm_output[6], rtol=1e-3, atol=1e-3)
tvm.testing.assert_allclose(tf_out[1], tvm_output[1], rtol=1e-3, atol=1e-3)
def test_forward_lstm():
"""test LSTM block cell"""
if package_version.parse(tf.VERSION) < package_version.parse("2.0.0"):
# in 2.0, tf.contrib.rnn.LSTMBlockCell is removed
_test_lstm_cell(1, 2, 1, 0.5, "float32")
#######################################################################
# Pack
# ---
def _test_pack(axis, shape, **kwargs):
a = np.arange(np.prod(shape), dtype=np.float32).reshape(shape)
b = np.arange(np.prod(shape), dtype=np.float32).reshape(shape)
with tf.Graph().as_default():
tf_a = array_ops.placeholder(shape=shape, dtype="float32", name="pl_a")
tf_b = array_ops.placeholder(shape=shape, dtype="float32", name="pl_b")
tf_c = tf.stack([tf_a, tf_b], axis=axis, **kwargs)
assert tf_c.op.op_def.name == "Pack", "tf.stack() is expected to produce 'Pack' operation"
compare_tf_with_tvm([a, b], ["pl_a:0", "pl_b:0"], "stack:0")
def test_forward_pack():
for axis in range(-3, 3):
_test_pack(axis, [3, 2, 1])
for axis in range(-1, 1):
_test_pack(axis, [3])
_test_pack(0, [])
#######################################################################
# Unpack
# ------
def _test_forward_unpack(in_shape, axis, dtype):
"""test operator Unpack"""
np_data = np.random.uniform(-100, 100, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
tf.unstack(in_data, axis=axis, name="Unpack")
compare_tf_with_tvm([np_data], ["in_data:0"], "Unpack:0")
def test_forward_unpack():
_test_forward_unpack((3,), 0, "int32")
_test_forward_unpack((3,), -1, "int16")
_test_forward_unpack((21, 23, 3), 2, "float32")
#######################################################################
# Range
# -----
def test_forward_range():
"""test operator Range"""
for dtype in [tf.int32, tf.int64]:
tf.reset_default_graph()
with tf.Graph().as_default():
tf.range(1, 18, 3, name="range", dtype=dtype)
compare_tf_with_tvm([], [], "range:0")
# test type assignment for operator Range
tf.reset_default_graph()
with tf.Graph().as_default():
tf.range(1, 256 + 1, 1, dtype=tf.float32)
compare_tf_with_tvm([], [], "range:0")
#######################################################################
# Einsum
# -----
def _test_einsum(equation, dtype, *shape_of_input_tensors):
"""Test Einsum Op"""
with tf.Graph().as_default():
inputs_placeholders = []
input_data = []
for idx, shape in enumerate(shape_of_input_tensors):
input_name = f"input_{idx}"
inputs_placeholders.append(tf.placeholder(shape=shape, dtype=dtype, name=input_name))
input_data.append(np.random.normal(size=shape).astype(dtype))
result = tf.einsum(equation, *inputs_placeholders)
compare_tf_with_tvm(input_data, [ph.name for ph in inputs_placeholders], result.name)
def test_forward_einsum():
for dtype in ["float32"]:
_test_einsum("ij,jk->ik", dtype, [2, 3], [3, 5]) # Matmul
_test_einsum("ij,jk", dtype, [2, 3], [3, 5]) # Matmul
_test_einsum("i,i->", dtype, [2], [2]) # Dot product
_test_einsum("i,j->ij", dtype, [3], [5]) # Outer produce
_test_einsum("ij->ji", dtype, [2, 3]) # Transpose
_test_einsum("ii->i", dtype, [3, 3]) # Diag
_test_einsum("ii", dtype, [3, 3]) # Trace of a square matrix
_test_einsum("bij,bjk->bik", dtype, [7, 5, 3], [7, 3, 2]) # Batch matmul
#######################################################################
# Pad
# ---
def _test_pad(input_shape, paddings, mode, **kwargs):
"""One iteration of pad operation with given shape"""
x = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape)
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=input_shape, dtype="float32")
pad_values = constant_op.constant(paddings)
_ = tf.pad(in_data, paddings=pad_values, mode=mode, **kwargs)
if mode == "CONSTANT":
if "constant_values" in kwargs:
out_name = "PadV2:0"
else:
out_name = "Pad:0"
else:
out_name = "MirrorPad:0"
compare_tf_with_tvm(x, "Placeholder:0", out_name)
def test_forward_pad():
"""Pad"""
_test_pad((2, 3), [[1, 1], [2, 2]], mode="CONSTANT")
_test_pad((2, 3), [[1, 1], [2, 2]], mode="CONSTANT", constant_values=1.0)
_test_pad((2, 3), [[1, 1], [2, 2]], mode="SYMMETRIC")
_test_pad((2, 3), [[1, 1], [2, 2]], mode="REFLECT")
#######################################################################
# Logical operators
# --------------------
def test_logical_and():
with tf.Graph().as_default():
in1 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name="in1")
in2 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name="in2")
_ = tf.logical_and(in1, in2, name="out")
in_data1 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype("bool")
in_data2 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype("bool")
compare_tf_with_tvm([in_data1, in_data2], ["in1:0", "in2:0"], "out:0")
def test_logical_or():
with tf.Graph().as_default():
in1 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name="in1")
in2 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name="in2")
_ = tf.logical_or(in1, in2, name="out")
in_data1 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype("bool")
in_data2 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype("bool")
compare_tf_with_tvm([in_data1, in_data2], ["in1:0", "in2:0"], "out:0")
def test_logical_xor():
with tf.Graph().as_default():
in1 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name="in1")
in2 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name="in2")
_ = tf.logical_xor(in1, in2, name="out")
in_data1 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype("bool")
in_data2 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype("bool")
compare_tf_with_tvm([in_data1, in_data2], ["in1:0", "in2:0"], "out:0")
def test_logical_not():
with tf.Graph().as_default():
in1 = tf.placeholder(tf.bool, shape=[1, 4, 4, 3], name="in1")
_ = tf.logical_not(in1, name="out")
in_data1 = np.random.choice(a=[False, True], size=(1, 4, 4, 3)).astype("bool")
compare_tf_with_tvm(in_data1, "in1:0", "out:0")
def test_forward_logical():
test_logical_and()
test_logical_or()
test_logical_xor()
test_logical_not()
#######################################################################
# Where, Select, SelectV2
# -------------
def test_forward_where():
"""Where: return elements depending on conditions"""
with tf.Graph().as_default():
with tf.Session() as _:
input1 = tf.placeholder(tf.int32, shape=[1, 4, 4, 3], name="input1")
input2 = tf.placeholder(tf.int32, shape=[1, 4, 4, 3], name="input2")
mask = input1 > input2
tf.where(mask, input1 + 1, input2 * 2)
in_data1 = np.random.uniform(0, 10, size=(1, 4, 4, 3)).astype("uint32")
in_data2 = np.random.uniform(0, 10, size=(1, 4, 4, 3)).astype("uint32")
compare_tf_with_tvm([in_data1, in_data2], ["input1:0", "input2:0"], "Select:0")
#######################################################################
# Inception V3
# ------------
@pytest.mark.skip(reason="See https://github.com/apache/tvm/issues/10275")
def test_forward_inception_v3():
"""test inception V3 model"""
with tf.Graph().as_default():
graph_def = tf_testing.get_workload(
"InceptionV3/inception_v3_2016_08_28_frozen-with_shapes.pb"
)
# Call the utility to import the graph definition into default graph.
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
data = np.random.uniform(size=(1, 299, 299, 3)).astype("float32")
with tf.Session() as sess:
tf_output = run_tf_graph(sess, data, "input:0", "InceptionV3/Predictions/Reshape_1:0")
tvm_output = run_tvm_graph(graph_def, data, "input")
tvm.testing.assert_allclose(tf_output[0], tvm_output[0], rtol=1e-5, atol=1e-5)
#######################################################################
# Inception V1
# ------------
def test_forward_inception_v1():
"""test inception V1 model"""
with tf.Graph().as_default():
graph_def = tf_testing.get_workload("InceptionV1/classify_image_graph_def-with_shapes.pb")
# Call the utility to import the graph definition into default graph.
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
# Build an image from random data.
img_array = np.random.uniform(size=(1, 600, 600, 3)).astype("uint8")
img = Image.frombuffer("RGB", (600, 600), img_array.tostring(), "raw", "RGB", 0, 1)
temp = utils.tempdir()
img_path = temp.relpath("tf-test.jpg")
img.save(img_path)
if not tf.gfile.Exists(os.path.join(img_path)):
tf.logging.fatal("File does not exist %s", img_path)
data = tf.gfile.FastGFile(os.path.join(img_path), "rb").read()
temp.remove()
# Extract tensorflow decoded image frame for tvm input
with tf.Session() as sess:
tvm_data = run_tf_graph(sess, data, "DecodeJpeg/contents:0", "DecodeJpeg:0")
with tf.Session() as sess:
tf_output = run_tf_graph(sess, data, "DecodeJpeg/contents:0", "softmax:0")
tvm_output = run_tvm_graph(graph_def, tvm_data, "DecodeJpeg/contents")
tvm.testing.assert_allclose(tf_output[0], tvm_output[0], rtol=1e-5, atol=1e-5)
#######################################################################
# Mobilenet
# ---------
def test_forward_mobilenet():
"""test mobilenet model"""
# MobilenetV2
with tf.Graph().as_default():
graph_def = tf_testing.get_workload(
"https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_1.4_224.tgz",
"mobilenet_v2_1.4_224_frozen.pb",
)
# Call the utility to import the graph definition into default graph.
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
data = np.random.uniform(size=(1, 224, 224, 3)).astype("float32")
out_node = "MobilenetV2/Predictions/Reshape_1"
with tf.Session() as sess:
# Add shapes to the graph.
graph_def = tf_testing.AddShapesToGraphDef(sess, out_node)
tf_output = run_tf_graph(sess, data, "input:0", out_node + ":0")
tvm_output = run_tvm_graph(graph_def, data, "input")
tvm.testing.assert_allclose(
np.squeeze(tvm_output[0]), np.squeeze(tf_output[0]), rtol=1e-5, atol=1e-5
)
#######################################################################
# ResnetV2
# --------
@tvm.testing.requires_gpu
def test_forward_resnetv2():
"""test resnet model"""
if is_gpu_available():
with tf.Graph().as_default():
graph_def = tf_testing.get_workload(
"ResnetV2/resnet-20180601_resnet_v2_imagenet-shapes.pb"
)
# Call the utility to import the graph definition into default graph.
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
data = np.random.uniform(size=(128, 224, 224, 3)).astype("float32")
out_node = "ArgMax"
with tf.Session() as sess:
tf_output = run_tf_graph(sess, data, "input_tensor:0", out_node + ":0")
for device in ["llvm", "cuda"]:
_ = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print(f"Skip because {device} is not enabled")
continue
tvm_output = run_tvm_graph(
graph_def, data, "input_tensor", len(tf_output), target=device
)
tvm.testing.assert_allclose(
np.squeeze(tvm_output[0]), np.squeeze(tf_output[0]), rtol=1e-5, atol=1e-5
)
#######################################################################
# SSD
# ---
def _test_ssd_impl():
"""Test SSD with backbone MobileNet V1"""
with tf.Graph().as_default():
graph_def = tf_testing.get_workload(
"object_detection/ssd_mobilenet_v1_ppn_shared_"
"box_predictor_300x300_coco14_sync_2018_07_03.pb"
)
# Call the utility to import the graph definition into default graph.
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
data = np.random.uniform(0.0, 255.0, size=(1, 512, 512, 3)).astype("uint8")
in_node = "image_tensor"
out_node = ["detection_boxes", "detection_scores", "detection_classes"]
with tf.Session() as sess:
tf_output = run_tf_graph(
sess, data, f"{in_node}:0", [f"{oname}:0" for oname in out_node]
)
# TODO(kevinthesun): enable gpu test when VM heterogeneous execution is ready.
for device in ["llvm"]:
_ = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print(f"Skip because {device} is not enabled")
continue
tvm_output = run_tvm_graph(
graph_def,
data,
in_node,
len(out_node),
target=device,
layout="NCHW",
out_names=out_node,
mode="vm",
disabled_pass=["FoldScaleAxis"],
serialize=True,
)
for i in range(len(out_node)):
tvm.testing.assert_allclose(tvm_output[i], tf_output[i], rtol=1e-3, atol=1e-3)
@pytest.mark.skip(
reason="Use of threading module here hides errors, see https://github.com/apache/tvm/pull/10231"
)
def test_forward_ssd():
run_thread = threading.Thread(target=_test_ssd_impl, args=())
old_stack_size = threading.stack_size(100 * 1024 * 1024)
run_thread.start()
run_thread.join()
threading.stack_size(old_stack_size)
#######################################################################
# Placeholder
# -----------
def test_forward_placeholder():
"""test a simple pb with Placeholder node in the end of GraphDef"""
with tf.Graph().as_default():
graph_def = tf_testing.get_workload("Custom/placeholder.pb")
# Call the utility to import the graph definition into default graph.
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
data = np.random.uniform(size=(1, 224, 224, 3)).astype("float32")
out_node = "mul"
with tf.Session() as sess:
# Add shapes to the graph.
graph_def = tf_testing.AddShapesToGraphDef(sess, out_node)
tf_output = run_tf_graph(sess, data, "Placeholder:0", out_node + ":0")
tvm_output = run_tvm_graph(graph_def, data, "Placeholder")
tvm.testing.assert_allclose(
np.squeeze(tvm_output[0]), np.squeeze(tf_output[0]), rtol=1e-5, atol=1e-5
)
#######################################################################
# PTB
# ---
try:
# Load contrib for running ptb model in tf version before 2.0
import tensorflow.contrib
except ImportError:
pass
def test_forward_ptb():
"""test ptb model"""
config = tf_testing.get_config()
num_steps = config.num_steps
num_hidden = config.hidden_size
num_layers = config.num_layers
batch_size = config.batch_size
vocab_size = config.vocab_size
out_sample_shape = (batch_size, vocab_size)
out_state_shape = (batch_size, num_hidden)
# Sample input
inpt = "we have no useful information on"
cnt_sample = 20
def _pretty_print(items, is_char_model, id2word):
if not is_char_model:
return " ".join([id2word[x] for x in items])
else:
return "".join([id2word[x] for x in items]).replace("_", " ")
def _get_tvm_graph_module(graph_def):
# Cell inputs 'c and 'h' consist of all layers values
shape_dict = {"Model/Placeholder": (batch_size, num_steps)}
with tvm.testing.disable_span_filling():
mod, params = relay.frontend.from_tensorflow(
graph_def,
shape=shape_dict,
outputs=[
"Model/Softmax:0",
"Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell:1",
"Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell:6",
"Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell_1:1",
"Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell_1:6",
],
)
with tvm.testing.enable_span_filling():
mod_with_span, _ = relay.frontend.from_tensorflow(
graph_def,
shape=shape_dict,
outputs=[
"Model/Softmax:0",
"Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell:1",
"Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell:6",
"Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell_1:1",
"Model/RNN/RNN/multi_rnn_cell/cell_0/lstm_cell/LSTMBlockCell_1:6",
],
)
assert tvm.ir.structural_equal(mod["main"], mod_with_span["main"])
target = "llvm"
with tvm.transform.PassContext(opt_level=0):
graph, lib, params = relay.build(mod, target, params=params)
dev = tvm.cpu(0)
return params, graph_executor.create(graph, lib, dev)
def _do_tvm_sample(model, data, in_states, params, num_samples):
"""Sampled from the model"""
samples = []
state = in_states
sample = None
def _get_sample(data, state):
input_data = np.full((batch_size, num_steps), data, dtype="int32")
model.set_input("Model/Placeholder", tvm.nd.array(input_data.astype("int32")))
model.set_input(
"Model/MultiRNNCellZeroState/LSTMBlockCellZeroState/zeros",
tvm.nd.array(state[0].astype("float32")),
)
model.set_input(
"Model/MultiRNNCellZeroState/LSTMBlockCellZeroState/zeros_1",
tvm.nd.array(state[1].astype("float32")),
)
model.set_input(
"Model/MultiRNNCellZeroState/LSTMBlockCellZeroState_1/zeros",
tvm.nd.array(state[2].astype("float32")),
)
model.set_input(
"Model/MultiRNNCellZeroState/LSTMBlockCellZeroState_1/zeros_1",
tvm.nd.array(state[3].astype("float32")),
)
model.set_input(**params)
model.run()
tvm_output = model.get_output(0, tvm.nd.empty(out_sample_shape, "float32")).numpy()
state_output = []
for i in range(4):
state_output.append(
model.get_output(i + 1, tvm.nd.empty(out_state_shape, "float32")).numpy()
)
sample = tf_testing.pick_from_weight(tvm_output[0])
return sample, state_output
for x in data:
sample, state = _get_sample(x, state)
if sample is not None:
samples.append(sample)
else:
samples.append(0)
k = 1
while k < num_samples:
sample, state = _get_sample(samples[-1], state)
samples.append(sample)
k += 1
return samples, state
with tf.Graph().as_default():
word_to_id, id_to_word, graph_def = tf_testing.get_workload_ptb()
vocab_size = len(word_to_id)
# Call the utility to import the graph definition into default graph.
graph_def = tf_testing.ProcessGraphDefParam(graph_def)
sess = tf.Session()
# TVM graph module creation
params, m = _get_tvm_graph_module(graph_def)
# Create 10 predicted statments of 20 words
cnt_stm = 0
while cnt_stm < 10:
cnt_stm += 1
in_state = [np.full((batch_size, num_hidden), 0, dtype="float32")] * 2 * num_layers
seed_for_sample = inpt.split()
tvm_samples, _ = _do_tvm_sample(
m, [word_to_id[word] for word in seed_for_sample], in_state, params, cnt_sample
)
tvm_sample_str = _pretty_print(tvm_samples, False, id_to_word)
tf_samples, _ = tf_testing.do_tf_sample(
sess, [word_to_id[word] for word in seed_for_sample], in_state, cnt_sample
)
tf_sample_str = _pretty_print(tf_samples, False, id_to_word)
inpt = tvm_sample_str
tvm.testing.assert_allclose(tf_samples, tvm_samples, rtol=1e-5, atol=1e-5)
assert tvm_sample_str == tf_sample_str
#######################################################################
# LRN (Local Response Normalization)
# ----------------------------------
def _test_lrn(ishape, size, axis, bias, alpha, beta):
"""testing local response normalization"""
lrn_depth_radius = size / 2
inp_array = np.random.uniform(size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype, name="lrn0_data")
nn_ops.local_response_normalization(
in1, name="lrn", depth_radius=lrn_depth_radius, bias=bias, alpha=alpha, beta=beta
)
compare_tf_with_tvm(inp_array, "lrn0_data:0", "lrn:0")
def test_forward_lrn():
_test_lrn((1, 3, 20, 20), 3, 1, 1.0, 1.0, 0.5)
#######################################################################
# l2_normalize
# ------------
def _test_l2_normalize(ishape, eps, axis):
"""testing l2 normalize (uses max, sum, square, sqrt frontend operators)"""
inp_array = np.random.uniform(size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
nn.l2_normalize(in1, axis=axis, epsilon=eps, name=None, dim=None)
compare_tf_with_tvm(inp_array, "Placeholder:0", "l2_normalize:0")
def test_forward_l2_normalize():
_test_l2_normalize((1, 3, 20, 20), 0.001, (0,))
#######################################################################
# transpose
# ---------
def _test_forward_transpose(ishape, axes=None):
data = np.random.uniform(size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=data.shape, dtype=data.dtype, name="transpose_data")
if axes is None:
tf.transpose(in1)
else:
tf.transpose(in1, perm=axes)
compare_tf_with_tvm(data, "transpose_data:0", "transpose:0")
def _test_forward_tranapose_axes_input(ishape, axes):
data = np.random.uniform(size=ishape).astype(np.float32)
axes_np = np.array(axes).astype(np.int32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=data.shape, dtype=data.dtype, name="transpose_data")
const1 = tf.constant(axes_np, dtype=tf.int32)
# make axes an input to tf.transpose, but not an input to the graph,
# so it can be extracted with infer_value_simulated
axes = tf.reverse(const1, axis=[-1])
tf.transpose(in1, axes)
compare_tf_with_tvm([data], ["transpose_data:0"], "transpose:0")
def test_forward_transpose():
_test_forward_transpose((2, 3, 4), (1, 2, 0))
_test_forward_transpose((2, 3, 4))
_test_forward_transpose((7, 8, 8, 10))
_test_forward_transpose((2, 3, 4), (1, 2, 0))
_test_forward_transpose((2, 3, 4), (0, 1, 2))
_test_forward_transpose((2, 3, 4, 5), (3, 0, 1, 2))
_test_forward_tranapose_axes_input((2, 3, 4), (1, 2, 0))
_test_forward_tranapose_axes_input((2, 3, 4, 5), (3, 0, 1, 2))
def _test_forward_slice_operation_input(input_value, begin_value, size_value):
input_data = np.array(input_value, dtype=np.float32)
with tf.Graph().as_default():
input_tensor = tf.placeholder(shape=input_data.shape, dtype=input_data.dtype, name="input")
tf.slice(input_tensor, begin_value, size_value, name="slice_output")
compare_tf_with_tvm([input_data], ["input:0"], "slice_output:0")
def test_forward_slice():
_test_forward_slice_operation_input([1, 1], [0], [2])
_test_forward_slice_operation_input([0, 1, 2, 3], [3], [-1])
_test_forward_slice_operation_input(
[[0, 1, 2, 3], [4, 5, 6, 7]], begin_value=[0, 1], size_value=[-1, -1]
)
def test_forward_ceil():
ishape = (1, 3, 10, 10)
inp_array = np.random.uniform(size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.ceil(in1)
compare_tf_with_tvm(inp_array, "Placeholder:0", "Ceil:0")
def test_forward_floor():
ishape = (1, 3, 10, 10)
inp_array = np.random.uniform(size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.floor(in1)
compare_tf_with_tvm(inp_array, "Placeholder:0", "Floor:0")
def test_forward_relu():
ishape = (1, 3, 10, 10)
inp_array = np.random.uniform(-5, 5, size=ishape).astype(np.float32)
for mode in ["graph_executor", "vm"]:
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.nn.relu(in1)
compare_tf_with_tvm(inp_array, "Placeholder:0", "Relu:0", mode=mode)
def test_forward_leaky_relu():
ishape = (1, 3, 10, 10)
inp_array = np.random.uniform(-5, 5, size=ishape).astype(np.float32)
for mode in ["graph_executor", "vm"]:
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.nn.leaky_relu(in1, alpha=0.4)
compare_tf_with_tvm(inp_array, "Placeholder:0", "LeakyRelu:0", mode=mode)
def test_forward_elu():
ishape = (1, 3, 10, 10)
inp_array = np.random.uniform(-5, 5, size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.nn.elu(in1)
compare_tf_with_tvm(inp_array, "Placeholder:0", "Elu:0")
def test_forward_selu():
ishape = (1, 3, 10, 10)
inp_array = np.random.uniform(-5, 5, size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.nn.selu(in1)
compare_tf_with_tvm(inp_array, "Placeholder:0", "Selu:0")
def test_forward_tanh():
ishape = (1, 3, 10, 10)
inp_array = np.random.uniform(-5, 5, size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.nn.tanh(in1)
compare_tf_with_tvm(inp_array, "Placeholder:0", "Tanh:0")
#######################################################################
# Softmax
# -------
def test_forward_softmax():
"""test operator Softmax"""
def check_softmax(in_shape, axis, dtype):
np_data = np.random.uniform(-100, 100, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
tf.nn.softmax(in_data, axis=axis, name="Softmax")
compare_tf_with_tvm([np_data], ["in_data:0"], "Softmax:0")
check_softmax((2, 3, 5), 2, "float32")
check_softmax((2, 3, 5), -1, "float32")
#######################################################################
# Tensor
# ------
def test_forward_round():
"""test Round"""
np_data = np.random.uniform(-10, 10, size=(5, 7)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (5, 7), name="in_data")
tf.round(in_data, name="round")
compare_tf_with_tvm([np_data], ["in_data:0"], "round:0")
def test_forward_abs():
"""test operator Abs"""
np_data = np.random.uniform(1, 100, size=(9, 11)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (9, 11), name="in_data")
tf.math.abs(in_data, name="abs")
compare_tf_with_tvm([np_data], ["in_data:0"], "abs:0")
def _test_forward_zeros_like(in_shape, dtype):
np_data = np.random.uniform(-10, 10, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
tf.zeros_like(in_data, name="zeros_like")
compare_tf_with_tvm([np_data], ["in_data:0"], "zeros_like:0")
def test_forward_zeros_like():
if tf.__version__ < LooseVersion("1.2"):
_test_forward_zeros_like((2, 3), "int32")
_test_forward_zeros_like((2, 3, 5), "int8")
_test_forward_zeros_like((2, 3, 5, 7), "uint16")
_test_forward_zeros_like((2, 3, 11), "float32")
_test_forward_zeros_like((2, 3, 11), "float64")
def test_forward_squared_difference():
ishape = (1, 3, 10, 14)
inp_array_a = np.random.uniform(-5, 5, size=ishape).astype(np.float32)
inp_array_b = np.random.uniform(-5, 5, size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array_a.shape, dtype=inp_array_a.dtype, name="in1")
in2 = tf.placeholder(shape=inp_array_b.shape, dtype=inp_array_b.dtype, name="in2")
out = tf.math.squared_difference(in1, in2)
compare_tf_with_tvm([inp_array_a, inp_array_b], [in1.name, in2.name], out.name)
def _test_forward_reverse_v2(in_shape, axis, dtype):
np_data = np.random.uniform(-10, 10, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, in_shape, name="in_data")
tf.reverse(in_data, axis=[axis], name="reverse")
compare_tf_with_tvm([np_data], ["in_data:0"], "reverse:0")
def test_forward_reverse_v2():
"""test ReverseV2"""
_test_forward_reverse_v2((2, 3), 0, "int32")
_test_forward_reverse_v2((2, 3, 5), 2, "float32")
_test_forward_reverse_v2((2, 3, 5, 7), 1, "float32")
_test_forward_reverse_v2((2, 3, 5), -1, "float64")
_test_forward_reverse_v2((2, 3, 5), -3, "float64")
def test_forward_sign():
"""test Sign"""
np_data = np.random.uniform(-10, 10, size=(5, 7, 11)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (5, 7, 11), name="in_data")
tf.sign(in_data, name="sign")
compare_tf_with_tvm([np_data], ["in_data:0"], "sign:0")
def test_forward_square():
"""test operator Square"""
np_data = np.random.uniform(1, 100, size=(2, 3, 5)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (2, 3, 5), name="in_data")
tf.square(in_data, name="square")
compare_tf_with_tvm([np_data], ["in_data:0"], "square:0")
def test_forward_pow_exp():
"""test Pow and Exp"""
np_in1 = np.random.uniform(-2, 2, size=(5, 7, 11)).astype(np.float32)
np_in2 = np.random.uniform(-2, 2, size=(5, 7, 11)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in1 = tf.placeholder(tf.float32, (5, 7, 11), name="in1")
in2 = tf.placeholder(tf.float32, (5, 7, 11), name="in2")
_ = tf.pow(in1, in2, name="pow")
_ = tf.exp(in1, name="exp")
compare_tf_with_tvm([np_in1, np_in2], ["in1:0", "in2:0"], "pow:0")
compare_tf_with_tvm([np_in1], ["in1:0"], "exp:0")
def test_forward_unary():
"""Unary"""
def _test_forward_unary(op, a_min=1, a_max=5, dtype=np.float32):
"""test unary operators"""
np_data = np.random.uniform(a_min, a_max, size=(2, 3, 5)).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, (2, 3, 5), name="in_data")
out = op(in_data)
compare_tf_with_tvm([np_data], ["in_data:0"], out.name)
_test_forward_unary(tf.acos, -1, 1)
_test_forward_unary(tf.asin, -1, 1)
_test_forward_unary(tf.atanh, -1, 1)
_test_forward_unary(tf.sinh)
_test_forward_unary(tf.cosh)
_test_forward_unary(tf.acosh)
_test_forward_unary(tf.asinh)
_test_forward_unary(tf.atan)
_test_forward_unary(tf.sin)
_test_forward_unary(tf.cos)
_test_forward_unary(tf.tan)
_test_forward_unary(tf.tanh)
_test_forward_unary(tf.erf)
_test_forward_unary(tf.log)
_test_forward_unary(tf.log1p)
def test_forward_atan2():
"""test operator tan"""
tf.disable_eager_execution()
np_data_1 = np.random.uniform(1, 100, size=(2, 3, 5)).astype(np.float32)
np_data_2 = np.random.uniform(1, 100, size=(2, 3, 5)).astype(np.float32)
tf.reset_default_graph()
in_data_1 = tf.placeholder(tf.float32, (2, 3, 5), name="in_data_1")
in_data_2 = tf.placeholder(tf.float32, (2, 3, 5), name="in_data_2")
tf.atan2(in_data_1, in_data_2, name="atan2")
compare_tf_with_tvm([np_data_1, np_data_2], ["in_data_1:0", "in_data_2:0"], "atan2:0")
def test_forward_expm1():
"""test operator expm1"""
def _test_forward_expm1(shape):
tf.disable_eager_execution()
np_data = np.random.uniform(1, 10, size=shape).astype(np.float32)
tf.reset_default_graph()
in_data = tf.placeholder(tf.float32, shape, name="in_data")
tf.expm1(in_data, name="expm1")
compare_tf_with_tvm([np_data], ["in_data:0"], "expm1:0")
_test_forward_expm1([1, 100])
_test_forward_expm1([1, 10, 10])
_test_forward_expm1([2, 5, 2, 5])
def test_forward_softsign():
"""test operator softsign"""
def _test_forward_softsign(shape):
tf.disable_eager_execution()
np_data = np.random.uniform(1, 100, size=shape).astype(np.float32)
tf.reset_default_graph()
in_data = tf.placeholder(tf.float32, shape, name="in_data")
tf.nn.softsign(in_data, name="softsign")
compare_tf_with_tvm([np_data], ["in_data:0"], "softsign:0")
_test_forward_softsign([1, 100])
_test_forward_softsign([1, 10, 10])
_test_forward_softsign([2, 5, 2, 5])
def test_forward_rint():
"""test operator rint"""
def _test_forward_rint(shape):
tf.disable_eager_execution()
np_data = np.random.uniform(-100, 100, size=shape).astype(np.float32)
tf.reset_default_graph()
in_data = tf.placeholder(tf.float32, shape, name="in_data")
tf.math.rint(in_data, name="rint")
compare_tf_with_tvm([np_data], ["in_data:0"], "rint:0")
_test_forward_rint([100])
_test_forward_rint([1, 100])
_test_forward_rint([1, 10, 10])
_test_forward_rint([2, 5, 2, 5])
def test_forward_negative():
"""test tf operator Neg"""
np_data = np.random.uniform(-100, 255, size=(224, 224, 3)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (224, 224, 3), name="in_data")
tf.negative(in_data, name="negative")
compare_tf_with_tvm([np_data], ["in_data:0"], "negative:0")
def test_forward_log_softmax():
"""test operator LogSoftmax"""
np_data = np.random.uniform(1, 100, size=(9, 11)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (9, 11), name="in_data")
tf.math.log_softmax(in_data, name="LogSoftmax")
compare_tf_with_tvm([np_data], ["in_data:0"], "LogSoftmax:0")
def test_forward_softplus():
"""test operator Softplus"""
np_data = np.random.uniform(1, 10, size=(2, 3, 5)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (2, 3, 5), name="in_data")
tf.nn.softplus(in_data, name="softplus")
compare_tf_with_tvm([np_data], ["in_data:0"], "softplus:0")
def test_forward_rsqrt():
"""test Rsqrt"""
np_data = np.random.uniform(1, 100, size=(5, 7, 11)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (5, 7, 11), name="in_data")
tf.rsqrt(in_data, name="rsqrt")
compare_tf_with_tvm([np_data], ["in_data:0"], "rsqrt:0")
def test_forward_sqrt():
"""test Sqrt"""
np_data = np.random.uniform(1, 100, size=(5, 7, 11)).astype(np.float32)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, (5, 7, 11), name="in_data")
tf.sqrt(in_data, name="sqrt")
compare_tf_with_tvm([np_data], ["in_data:0"], "sqrt:0")
def _test_forward_right_shift(in_shape, dtype):
"""test operator RightShift"""
lh_data = np.random.randint(1, 3, size=in_shape).astype(dtype)
rh_data = np.random.randint(1, 8, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
lft_data = tf.placeholder(dtype, in_shape, name="lft_data")
rgt_data = tf.placeholder(dtype, in_shape, name="rgt_data")
tf.bitwise.right_shift(lft_data, rgt_data, name="RightShift")
compare_tf_with_tvm([lh_data, rh_data], ["lft_data:0", "rgt_data:0"], "RightShift:0")
def test_forward_right_shift():
_test_forward_right_shift((7,), "int32")
_test_forward_right_shift((3, 11), "int16")
def _test_forward_left_shift(in_shape, dtype):
"""test operator LeftShift"""
lh_data = np.random.randint(100, 1000000, size=in_shape).astype(dtype)
rh_data = np.random.randint(1, 3, size=in_shape).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
lft_data = tf.placeholder(dtype, in_shape, name="lft_data")
rgt_data = tf.placeholder(dtype, in_shape, name="rgt_data")
tf.bitwise.left_shift(lft_data, rgt_data, name="LeftShift")
compare_tf_with_tvm([lh_data, rh_data], ["lft_data:0", "rgt_data:0"], "LeftShift:0")
def test_forward_left_shift():
_test_forward_left_shift((10,), "int32")
_test_forward_left_shift((224, 224, 3), "int16")
#######################################################################
# Mean
# ----
def test_forward_mean():
"""Mean"""
def check_mean(ishape, **kwargs):
inp_array = np.random.uniform(size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.keras.backend.mean(in1, **kwargs)
compare_tf_with_tvm(inp_array, "Placeholder:0", "Mean:0", no_gpu=True)
check_mean((10, 8, 16, 32))
check_mean((10, 8, 16, 32), axis=(2, 3))
check_mean((10, 8, 16, 32), axis=(1, 2), keepdims=True)
#######################################################################
# Size
# ----
def test_forward_size():
"""Size"""
def check_size(ishape):
np_input = np.random.uniform(size=ishape).astype(np.float32)
# if all dimensions are constant, TF will optimize away size operator into constant
tf_input_shape = list(np_input.shape)
tf_input_shape[0] = None
with tf.Graph().as_default():
tf_input = tf.placeholder(shape=tf_input_shape, dtype=np_input.dtype, name="input")
tf.size(tf_input, name="size")
compare_tf_with_tvm([np_input], ["input:0"], "size:0")
check_size((10, 8, 16, 32))
check_size((10,))
#######################################################################
# All, Any, Max, Min, Prod, variance, std, logsumexp, euclidean_norm
# ------------------------------------------------------------------
def test_forward_reduce():
"""Reduce"""
def _check_op(tf_op, ishape, axis, keepdims, dtype="float32"):
tf.reset_default_graph()
if dtype == "bool":
np_data = np.random.choice([True, False], size=ishape)
else:
np_data = np.random.uniform(size=ishape).astype(dtype)
if tf_op == tf.math.reduce_prod:
axis = 1
np_data = np_data.reshape(1, -1)
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, name="in_data")
reduce_op = tf_op(in_data, axis=axis, keepdims=keepdims, name="reduce_std")
compare_tf_with_tvm([np_data], ["in_data:0"], reduce_op.name)
def _test_math_op(op, d_types=None):
d_types = d_types or ["int32", "float32"]
for dtype in d_types:
_check_op(op, (3, 10), axis=(-1), keepdims=False, dtype=dtype)
_check_op(op, (8, 16, 32), axis=(-1), keepdims=False, dtype=dtype)
_check_op(op, (1, 8, 8, 3), axis=(2, 3), keepdims=True, dtype=dtype)
_check_op(op, (2, 3, 10, 10), axis=(1, 2), keepdims=True, dtype=dtype)
_test_math_op(tf.math.reduce_all, d_types=["bool"])
_test_math_op(tf.math.reduce_any, d_types=["bool"])
_test_math_op(tf.math.reduce_max)
_test_math_op(tf.math.reduce_min)
_test_math_op(tf.math.reduce_prod)
_test_math_op(tf.math.reduce_variance, d_types=["float32"])
_test_math_op(tf.math.reduce_std, d_types=["float32"])
_test_math_op(tf.math.reduce_logsumexp, d_types=["float32"])
if package_version.parse(tf.VERSION) >= package_version.parse("1.15.0"):
_test_math_op(tf.math.reduce_euclidean_norm)
#######################################################################
# All, Max, Min
# ------------------------------------------------------------------
def test_forward_raw_reduce():
"""Raw reduce"""
def _check_op(tf_op, ishape, axis, keepdims, range_axis=False, dtype="float32"):
tf.reset_default_graph()
if dtype == "bool":
np_data = np.random.choice([True, False], size=ishape)
else:
np_data = np.random.uniform(size=ishape).astype(dtype)
if tf_op == tf.math.reduce_prod:
axis = 1
np_data = np_data.reshape(1, -1)
with tf.Graph().as_default():
if range_axis:
axis = tf.range(axis[0], axis[1], axis[2], name="range", dtype="int32")
in_data = tf.placeholder(dtype, name="in_data")
reduce_op = tf_op(input=in_data, axis=axis, keep_dims=keepdims, name="reduce_std")
compare_tf_with_tvm([np_data], ["in_data:0"], reduce_op.name)
def _test_raw_reduce_op(op, d_types=None):
d_types = d_types or ["int32", "float32"]
for dtype in d_types:
_check_op(op, (3, 10), axis=(-1), keepdims=False, dtype=dtype)
_check_op(op, (8, 16, 32), axis=(-1), keepdims=False, dtype=dtype)
_check_op(op, (1, 8, 8, 3), axis=(2, 3), keepdims=True, dtype=dtype)
_check_op(op, (2, 3, 10, 10), axis=(1, 2), keepdims=True, dtype=dtype)
_check_op(op, (1, 8, 8, 3), axis=(2, 4, 1), keepdims=True, range_axis=True, dtype=dtype)
_check_op(
op, (2, 3, 10, 10), axis=(1, 3, 1), keepdims=True, range_axis=True, dtype=dtype
)
if package_version.parse(tf.VERSION) >= package_version.parse("2.4.1"):
_test_raw_reduce_op(tf.raw_ops.All, d_types=["bool"])
_test_raw_reduce_op(tf.raw_ops.Max)
_test_raw_reduce_op(tf.raw_ops.Min)
#######################################################################
# Relational operators
# --------------------
def _test_forward_rel_op(data, func):
with tf.Graph().as_default():
in1 = tf.placeholder(shape=data[0].shape, dtype=data[0].dtype, name="in1")
in2 = tf.placeholder(shape=data[1].shape, dtype=data[1].dtype, name="in2")
op = func(in1, in2, name="op")
_ = tf.cast(op, tf.int32, name="out1")
compare_tf_with_tvm([data[0], data[1]], ["in1:0", "in2:0"], "out1:0")
def test_forward_rel_ops():
t1 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
t2 = np.array([[9, 8, 7], [6, 5, 4], [3, 2, 1]])
_test_forward_rel_op([t1, t2], math_ops.less)
_test_forward_rel_op([t1, t2], math_ops.greater)
_test_forward_rel_op([t1, t2], math_ops.less_equal)
_test_forward_rel_op([t1, t2], math_ops.greater_equal)
_test_forward_rel_op([t1, t2], math_ops.equal)
_test_forward_rel_op([t1, t2], math_ops.not_equal)
#######################################################################
# ExpandDims
# ----------
def _test_forward_expand_dims(data, axis):
with tf.Graph().as_default():
in1 = tf.placeholder(shape=data.shape, dtype=data.dtype, name="in1")
out = tf.expand_dims(in1, axis)
compare_tf_with_tvm([data], [in1.name], out.name)
def test_forward_expand_dims():
_test_forward_expand_dims(np.int32(1), 0)
_test_forward_expand_dims(np.array([1]), 0)
_test_forward_expand_dims(np.array([1]), -1)
_test_forward_expand_dims(np.array([[1], [2]]), 0)
_test_forward_expand_dims(np.array([[1], [2]]), 1)
_test_forward_expand_dims(np.array([[1], [2]]), -1)
#######################################################################
# Maximum, Minimum
# ----------------
def test_forward_maximum():
"""test Op Maximum"""
def check_maximum(lh_shape, rh_shape, dtype):
tf.reset_default_graph()
lh_data = np.random.uniform(size=lh_shape).astype(dtype)
rh_data = np.random.uniform(size=rh_shape).astype(dtype)
with tf.Graph().as_default():
lft_data = tf.placeholder(dtype, name="lft_data")
rgt_data = tf.placeholder(dtype, name="rgt_data")
tf.math.maximum(lft_data, rgt_data, name="maximum")
compare_tf_with_tvm([lh_data, rh_data], ["lft_data:0", "rgt_data:0"], "maximum:0")
check_maximum((10, 8, 16, 32), (1,), dtype="int32")
check_maximum((10, 8, 16, 32), (10, 8, 16, 32), dtype="float32")
def test_forward_minimum():
"""test Op Minimum"""
def check_minimum(lh_shape, rh_shape, dtype):
tf.reset_default_graph()
lh_data = np.random.uniform(size=lh_shape).astype(dtype)
rh_data = np.random.uniform(size=rh_shape).astype(dtype)
with tf.Graph().as_default():
lft_data = tf.placeholder(dtype, name="lft_data")
rgt_data = tf.placeholder(dtype, name="rgt_data")
tf.math.minimum(lft_data, rgt_data, name="minimum")
compare_tf_with_tvm([lh_data, rh_data], ["lft_data:0", "rgt_data:0"], "minimum:0")
check_minimum((10, 8, 16, 32), (1,), dtype="int32")
check_minimum((10, 8, 16, 32), (10, 8, 16, 32), dtype="float32")
#######################################################################
# PlaceholderWithDefault
# ----------------------
def test_placeholder():
"""Placeholder"""
with tf.Graph().as_default():
in_data1 = np.random.uniform(-5, 5, size=(3, 4, 5)).astype(np.float32)
var1 = tf.Variable(in_data1, name="in1")
var2 = array_ops.placeholder_with_default(var1, None, name="place1")
in_data2 = np.random.uniform(-5, 5, size=(3, 4, 5)).astype(np.float32)
place1 = array_ops.placeholder(shape=in_data1.shape, dtype=in_data1.dtype, name="in2")
out1 = tf.math.add(var1, var2, name="out1")
_ = tf.math.add(out1, place1, name="out2")
compare_tf_with_tvm(
[in_data1, in_data2], ["place1:0", "in2:0"], "out2:0", init_global_variables=True
)
#######################################################################
# OneHot
# ----------------------
def _test_forward_one_hot(indices_shape, depth, on_value, off_value, axis, out_dtype):
inp_array1 = np.random.randint(0, 5, size=indices_shape)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array1.shape, dtype=inp_array1.dtype)
out = tf.one_hot(in1, depth, on_value, off_value, axis, dtype=out_dtype)
compare_tf_with_tvm(inp_array1, in1.name, out.name)
def test_forward_one_hot():
_test_forward_one_hot((3,), 3, 1, 0, -1, "int32")
_test_forward_one_hot((3,), 3, 1.0, 0.0, -1, "float32")
_test_forward_one_hot((2, 2), 5, 2, -2, 0, "int32")
_test_forward_one_hot((2, 2), 5, 0.5, -0.5, 1, "float32")
_test_forward_one_hot((3, 2, 4, 5), 6, 1, 0, 1, "int32")
_test_forward_one_hot((3, 2, 4, 5), 6, 1.0, 0.0, 0, "float32")
#######################################################################
# AddN
# ----------------------
def _test_forward_add_n(inputs):
tf.reset_default_graph()
with tf.Graph().as_default():
temp = []
for each in inputs:
temp.append(tf.placeholder(shape=each.shape, dtype=each.dtype))
output = tf.add_n(temp)
compare_tf_with_tvm(list(inputs), [each.name for each in temp], output.name)
def test_forward_add_n():
"""Add n"""
x = np.random.randint(1, 100, size=(3, 3, 3), dtype=np.int32)
y = np.random.randint(1, 100, size=(3, 3, 3), dtype=np.int32)
z = np.random.randint(1, 100, size=(3, 3, 3), dtype=np.int32)
m, n, o = x.astype(np.float32), y.astype(np.float32), z.astype(np.float32)
in0 = x
in1 = [x, y]
in2 = (x, y, z)
in3 = m
in4 = [m, n]
in5 = (m, n, o)
_test_forward_add_n(in0)
_test_forward_add_n(in1)
_test_forward_add_n(in2)
_test_forward_add_n(in3)
_test_forward_add_n(in4)
_test_forward_add_n(in5)
#######################################################################
# Sharing params case
# ----------------------
def test_sharing_node():
"""Test the sharing params case."""
np_data = np.random.uniform(size=(2, 2, 2)).astype("float32")
with tf.Graph().as_default():
in_data = tf.placeholder(tf.float32, shape=(2, 2, 2), name="in_data")
axis = tf.constant([-1], dtype=tf.int32, name="axis")
mean0 = tf.reduce_mean(in_data, axis=axis, keepdims=False, name="mean0")
mean1 = tf.reduce_mean(in_data, axis=axis, keepdims=False, name="mean1")
_ = tf.add(mean0, mean1, name="out")
compare_tf_with_tvm([np_data], ["in_data:0"], "out:0")
#######################################################################
# Unravel Index
# ----------------------
def _test_forward_unravel_index(inputs):
tf.reset_default_graph()
with tf.Graph().as_default():
temp = []
for each in inputs:
temp.append(tf.placeholder(shape=each.shape, dtype=each.dtype))
output = tf.unravel_index(temp[0], temp[1])
compare_tf_with_tvm(list(inputs), [each.name for each in temp], output.name)
def _test_forward_unravel_index_scalar(x, y, dtype="int32"):
tf.reset_default_graph()
with tf.Graph().as_default():
indices_1 = constant_op.constant(x, dtype=dtype)
dims_1 = constant_op.constant(y, dtype=dtype)
out_1 = array_ops.unravel_index(indices_1, dims_1)
compare_tf_with_tvm([], [], out_1.name)
def test_forward_unravel_index():
"""Unravel index"""
x = np.array([0, 1, 2, 3])
y = np.array([2, 2])
_test_forward_unravel_index([x, y])
x = np.array([0, 1, 2, 5])
y = np.array([2, 3])
_test_forward_unravel_index([x, y])
x = np.array([0, 1, 2, 5])
y = np.array([6])
_test_forward_unravel_index([x, y])
x = np.array([102, 300, 16])
y = np.array([10, 10, 9, 6])
_test_forward_unravel_index([x, y])
x = np.array([100])
y = np.array([10, 10, 9, 6])
_test_forward_unravel_index([x, y])
# Test scalar input
_test_forward_unravel_index_scalar(13, [1, 4, 5, 2])
#######################################################################
# Dilation2d
# ----------------------
def _test_dilation2d(tensor_in_sizes, filter_in_sizes, strides, dilations, padding):
"""One iteration of dilation2d with given shapes and attributes"""
total_size_1 = np.prod(tensor_in_sizes)
total_size_2 = np.prod(filter_in_sizes)
# Initializes the input tensor with array containing incrementing
# numbers from 1.
data_array = [f * 1.0 for f in range(1, total_size_1 + 1)]
filter_array = [f * 1.0 for f in range(1, total_size_2 + 1)]
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype="float32")
in_filter = constant_op.constant(filter_array, shape=filter_in_sizes, dtype="float32")
nn_ops.dilation2d(in_data, in_filter, strides=strides, rates=dilations, padding=padding)
compare_tf_with_tvm(
np.reshape(data_array, tensor_in_sizes).astype("float32"),
"Placeholder:0",
"Dilation2D:0",
no_gpu=True,
)
def test_forward_dilation():
"""Dilation2d"""
_test_dilation2d([1, 18, 18, 32], [4, 4, 32], [1, 1, 1, 1], [1, 2, 1, 1], "VALID")
_test_dilation2d([1, 15, 15, 32], [4, 4, 32], [1, 1, 1, 1], [1, 2, 1, 1], "SAME")
_test_dilation2d([1, 5, 5, 1], [2, 2, 1], [1, 1, 1, 1], [1, 1, 1, 1], "VALID")
_test_dilation2d([1, 5, 5, 1], [3, 3, 1], [1, 1, 1, 1], [1, 2, 2, 1], "VALID")
_test_dilation2d([1, 5, 5, 3], [3, 3, 3], [1, 1, 1, 1], [1, 1, 1, 1], "SAME")
_test_dilation2d([1, 28, 28, 3], [5, 5, 3], [1, 2, 2, 1], [1, 1, 1, 1], "VALID")
_test_dilation2d([1, 224, 224, 10], [8, 8, 10], [1, 1, 1, 1], [1, 1, 1, 1], "VALID")
_test_dilation2d([1, 18, 18, 32], [4, 4, 32], [1, 1, 1, 1], [1, 2, 1, 1], "SAME")
_test_dilation2d([1, 15, 15, 32], [4, 4, 32], [1, 1, 1, 1], [1, 2, 1, 1], "VALID")
_test_dilation2d([1, 5, 5, 1], [7, 2, 1], [1, 3, 1, 1], [1, 1, 1, 1], "SAME")
_test_dilation2d([1, 5, 5, 1], [3, 4, 1], [1, 2, 1, 1], [1, 2, 2, 1], "SAME")
_test_dilation2d([1, 5, 5, 3], [3, 3, 3], [1, 1, 4, 1], [1, 1, 1, 1], "VALID")
_test_dilation2d([1, 28, 28, 3], [5, 6, 3], [1, 1, 2, 1], [1, 1, 1, 1], "SAME")
_test_dilation2d([1, 224, 224, 10], [8, 8, 10], [1, 3, 1, 1], [1, 1, 1, 1], "SAME")
_test_dilation2d([1, 3, 3, 1], [2, 2, 1], [1, 1, 1, 1], [1, 2, 2, 1], "SAME")
_test_dilation2d([1, 3, 3, 1], [2, 2, 1], [1, 1, 1, 1], [1, 1, 2, 1], "VALID")
def _test_identityn(data_np_list):
with tf.Graph().as_default():
data_tensors = []
data_tensors_name = []
for index, data_np in enumerate(data_np_list):
tensor_name = f"data_{index}"
data_tensors_name.append(tensor_name + ":0")
data_tensors.append(
tf.placeholder(shape=data_np.shape, dtype=str(data_np.dtype), name=tensor_name)
)
output = tf.identity_n(data_tensors)
output_names = [out.name for out in output]
compare_tf_with_tvm(
data_np_list,
data_tensors_name,
output_names,
)
@pytest.mark.parametrize(
"data_np_list",
[
(
[
np.array([[1, 1], [0, 3], [0, 1], [2, 0], [3, 1]], dtype=np.int64),
np.array([1, 2, 3, 4, 5], dtype=np.int64),
np.array([5, 6], dtype=np.int64),
]
),
(
[
np.array([[1, 1], [0, 3], [2, 0], [3, 1]], dtype=np.int64),
np.array([1, 2, 3, 4], dtype=np.int64),
np.array([5, 6], dtype=np.int64),
np.array([True, False, True]),
]
),
(
[
np.array([]),
np.array([[]]),
]
),
],
)
def test_forward_identityn(data_np_list):
"""Identityn"""
_test_identityn(data_np_list)
#######################################################################
# infinity ops
# ------------
def _verify_infiniteness_ops(tf_op, name):
"""test operator infinity ops"""
# Only float types are allowed in Tensorflow for isfinite and isinf
# float16 is failing on cuda
tf_dtypes = ["float32", "float64"] # pylint: disable=redefined-outer-name
for tf_dtype in tf_dtypes:
shape = (8, 8)
data = np.random.uniform(size=shape).astype(tf_dtype)
data.ravel()[np.random.choice(data.size, int(data.size * 0.5), replace=False)] = np.infty
data.ravel()[np.random.choice(data.size, int(data.size * 0.5), replace=False)] = np.nan
tf.reset_default_graph()
in_data = tf.placeholder(tf_dtype, shape, name="in_data")
tf_op(in_data, name=name)
compare_tf_with_tvm([data], ["in_data:0"], f"{name}:0")
def test_forward_isinf():
_verify_infiniteness_ops(tf.is_inf, "isinf")
def test_forward_isfinite():
_verify_infiniteness_ops(tf.is_finite, "isfinite")
def test_forward_isnan():
_verify_infiniteness_ops(tf.is_nan, "isnan")
def _test_spop_placeholder_without_shape_info():
with tf.Graph().as_default():
@function.Defun(*[tf.int32] * 2)
def Forward(x, y):
print(x.name)
print(y.name)
b = tf.add(x, y)
return b
pl1 = tf.placeholder(tf.int32, name="pl1")
pl2 = tf.placeholder(tf.int32, name="pl2")
pl3 = tf.placeholder(tf.int32, name="pl3")
data = np.array([[-1, 1], [2, -2]], dtype=np.int32)
data2 = np.array([[-2, 3], [4, -6]], dtype=np.int32)
data3 = np.array([[-2, 3], [4, -6]], dtype=np.int32)
z1 = gen_functional_ops.StatefulPartitionedCall(args=[pl1, pl2], Tout=[tf.int32], f=Forward)
z2 = z1 + pl3
compare_tf_with_tvm(
[data, data2, data3],
["pl1:0", "pl2:0", "pl3:0"],
["StatefulPartitionedCall:0", z2.name],
mode="vm",
init_global_variables=True,
)
def _test_spop_placeholder_with_shape_and_default_value():
with tf.Graph().as_default():
data = np.ones([1], dtype=int).astype(np.int32)
dataVar = tf.Variable(data, shape=data.shape)
pl1 = array_ops.placeholder_with_default(dataVar, shape=data.shape, name="pl1")
tpl = tf.convert_to_tensor(pl1, dtype=tf.int32)
@function.Defun(*[tf.int32])
def pl_with_default(pl):
return tf.expand_dims(tf.multiply(pl, pl), 0)
_ = gen_functional_ops.StatefulPartitionedCall(
args=[tpl], Tout=[tf.int32], f=pl_with_default
)
compare_tf_with_tvm(
data, ["pl1:0"], "StatefulPartitionedCall:0", mode="vm", init_global_variables=True
)
def _test_spop_placeholder_numpy_arange_feed():
with tf.Graph().as_default():
t1 = tf.placeholder(tf.int32, (3, 3, 3), "t1")
t1_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3))
t2 = tf.placeholder(tf.int32, (3, 3, 3), "t2")
t2_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3))
@tf.function
def add(x, y):
return tf.add(x, y, "add_t1_t2")
t3 = add(t1, t2)
compare_tf_with_tvm(
[t1_data, t2_data], ["t1:0", "t2:0"], [t3.name], mode="vm", init_global_variables=True
)
def _test_spop_placeholder_numpy_array_feed():
with tf.Graph().as_default():
t1_data = np.array([[-1, 1, 3], [2, -2, 4], [2, -3, 14]], dtype=np.int32)
t2_data = np.array([[-2, 1, 2], [12, -2, 14], [12, -3, 4]], dtype=np.int32)
t1 = tf.placeholder(tf.int32, name="t1")
t2 = tf.placeholder(tf.int32, name="t2")
@tf.function
def add(x, y):
return tf.add(x, y, "add_t1_t2")
t3 = add(t1, t2)
compare_tf_with_tvm(
[t1_data, t2_data], ["t1:0", "t2:0"], [t3.name], mode="vm", init_global_variables=True
)
def _test_spop_function_invocation_basic():
with tf.Graph().as_default():
def fun1(a):
return tf.multiply(a, a)
def fun2(b):
return tf.multiply(b, 10)
@tf.function
def fun3(x, y):
x = fun2(x)
y = fun1(y)
z = tf.add(x, y)
return z
t3 = fun3(tf.constant(10.5), tf.constant(20.4))
compare_tf_with_tvm([], [], [t3.name], mode="vm", init_global_variables=True)
def _test_spop_function_invocation_nested():
with tf.Graph().as_default():
t1 = tf.placeholder(tf.int32, (3, 3, 3), name="t1")
t1_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3))
t2 = tf.placeholder(tf.int32, name="t2")
t2_data = np.arange(27, dtype=np.int32).reshape((3, 3, 3))
@tf.function
def myfunc(x, y):
return tf.add(x, y, "myfunc")
@tf.function
def myfunc2(x, y):
z = myfunc(x, y)
l = myfunc(z, y)
m = myfunc(l, z)
return tf.add(l, m, "myfunc2")
res1 = myfunc(t1, t2)
res2 = myfunc2(res1, t1)
compare_tf_with_tvm(
[t1_data, t2_data], ["t1:0", "t2:0"], [res2.name], mode="vm", init_global_variables=True
)
def _test_spop_function_invocation_no_autograph():
with tf.Graph().as_default():
@tf.function(autograph=False)
def fun1(a):
return tf.multiply(a, a)
@tf.function(autograph=False)
def fun2(b):
return tf.multiply(b, 10)
@tf.function
def fun3(x, y):
x = fun2(x)
y = fun1(y)
z = tf.add(x, y)
return z
t3 = fun3(tf.constant(10.5), tf.constant(20.4))
compare_tf_with_tvm([], [], [t3.name], mode="vm", init_global_variables=True)
def _test_spop_function_invocation_defun():
with tf.Graph().as_default():
def fun1(a):
return tf.multiply(a, a)
def fun2(b):
return tf.multiply(b, b)
@function.Defun(dtypes.float32, dtypes.float32, func_name="Fun3")
def fun3(x, y):
x = fun2(x)
y = fun1(y)
z = tf.add(x, y)
return z
_ = gen_functional_ops.StatefulPartitionedCall(
args=[tf.constant(10.5), tf.constant(20.4)],
Tout=[dtypes.float32],
f=fun3,
name="SpopFnInvocation",
)
compare_tf_with_tvm([], [], "SpopFnInvocation:0", mode="vm", init_global_variables=True)
def _test_spop_arithmetic():
with tf.Graph().as_default():
@function.Defun(*[dtypes.int32] * 3)
def arithmetic(m, x, c):
z = tf.add(tf.multiply(m, x), c)
return z
m = tf.constant(10)
x = tf.constant(20)
c = tf.constant(2)
_ = gen_functional_ops.StatefulPartitionedCall(
args=[m, x, c], Tout=[tf.int32], f=arithmetic
)
compare_tf_with_tvm(
[], [], "StatefulPartitionedCall:0", mode="vm", init_global_variables=True
)
def _test_spop_control_flow():
with tf.Graph().as_default():
@function.Defun(*[dtypes.float32] * 2)
def Body1(x, y):
with ops.device("/job:localhost/replica:0/task:0/device:CPU:0"):
z = math_ops.multiply(x, y)
i = 0
while i < 10:
i += 1
if i == 5:
continue
z = math_ops.multiply(x, y * i)
return z
_ = gen_functional_ops.StatefulPartitionedCall(
args=[constant_op.constant(32.0), constant_op.constant(100.0)],
Tout=[dtypes.float32],
f=Body1,
)
compare_tf_with_tvm(
[], [], "StatefulPartitionedCall:0", mode="vm", init_global_variables=True
)
def _test_spop_variables():
with tf.Graph().as_default():
const1 = tf.constant(10)
const2 = tf.constant(20)
var1 = tf.Variable(const1, dtype=tf.int32)
var2 = tf.Variable(const2, dtype=tf.int32)
@function.Defun(tf.int32, tf.int32)
def Forward(x, y):
return tf.multiply(x, y)
_ = gen_functional_ops.StatefulPartitionedCall(
args=[var1, var2], Tout=[tf.int32], f=Forward
)
compare_tf_with_tvm(
[], [], "StatefulPartitionedCall:0", init_global_variables=True, mode="vm"
)
def _test_spop_constants():
with tf.Graph().as_default():
@function.Defun(*[dtypes.int32] * 2)
def constantsFn(x, y):
vv = tf.constant([2, 3, 4], name="vv")
z = tf.add(vv + x, y)
return z
a = tf.constant(20000, name="a")
b = tf.constant(40000, name="b")
_ = gen_functional_ops.StatefulPartitionedCall(args=[a, b], Tout=[tf.int32], f=constantsFn)
compare_tf_with_tvm(
[], [], "StatefulPartitionedCall:0", mode="vm", init_global_variables=True
)
def _test_spop_stateful():
# This test case is to test that TVM rejects any TF stateful operations
# (including Resource Variables) except StatefulPartitionedCall/PartitionedCall
# (as these two operators can still be used as container graphs to execute
# "stateless" operations internally.
tf.reset_default_graph()
with tf.Graph().as_default():
@tf.function
def FunctionWithStatefulOp_One(i):
b = tf.random.uniform(shape=[2, 4], maxval=10, dtype=tf.float32, seed=10)
y = tf.multiply(b, i)
return y
@tf.function
def FunctionWithStatefulOp(m, n):
a = tf.random.uniform(shape=[2, 4], maxval=10, dtype=tf.float32, seed=10)
x = tf.multiply(a, m)
y = FunctionWithStatefulOp_One(n)
z = tf.multiply(x, y)
return z
op = FunctionWithStatefulOp(constant_op.constant(1.0), constant_op.constant(2.0))
with pytest.raises(Exception) as execinfo:
compare_tf_with_tvm([], [], [op.name], init_global_variables=True, mode="vm")
assert execinfo.value.args[0].startswith("The following operators are not implemented")
def _test_spop_device_assignment():
# This test case is to test that TVM rejects inconsistent device assignment
# while using StatefulPartitionedCall/PartitionedCall operators which in case of TVM will
# be used as container graphs to internally execute "stateless" operations.
tf.reset_default_graph()
with tf.Graph().as_default():
def fun1(a):
with ops.device("/GPU:0"):
return tf.multiply(a, a)
def fun2(b):
with ops.device("/job:localhost/replica:0/task:0/device:CPU:1"):
return tf.multiply(b, b)
@function.Defun(dtypes.float32, dtypes.float32, func_name="Fun3")
def fun3(x, y):
with ops.device("/CPU:0"):
x = fun2(x)
with ops.device("/job:localhost/replica:0/task:0/device:CPU:2"):
y = fun1(y)
with ops.device("/job:localhost/replica:0/task:0/device:CPU:3"):
z = tf.add(x, y)
return z
_ = gen_functional_ops.StatefulPartitionedCall(
args=[tf.constant(10.5), tf.constant(20.4)], Tout=[dtypes.float32], f=fun3
)
with pytest.raises(Exception) as execinfo:
compare_tf_with_tvm(
[], [], "StatefulPartitionedCall:0", mode="vm", init_global_variables=True
)
assert execinfo.value.args[0].startswith("Found inconsistent Device assignment")
def _test_spop_resource_variables():
# This test case is to test that TVM rejects any graph containing
# resource variables with StatefulPartitionedOp.
tf.reset_default_graph()
with tf.Graph().as_default():
const1 = tf.constant(10)
const2 = tf.constant(20)
var1 = tf.Variable(const1, dtype=tf.int32, use_resource=True)
var2 = tf.Variable(const2, dtype=tf.int32, use_resource=True)
@tf.function
def resourceVariablesTest(x, y):
return tf.multiply(x, y)
_ = resourceVariablesTest(var1, var2)
with pytest.raises(Exception) as execinfo:
compare_tf_with_tvm(
[], [], "StatefulPartitionedCall:0", mode="vm", init_global_variables=True
)
# pylint: disable=implicit-str-concat
assert execinfo.value.args[0].startswith("Graph is not frozen." " Provide a frozen graph")
def test_forward_spop():
"""Spop"""
_test_spop_stateful()
_test_spop_device_assignment()
# tensorflow version upgrade support
# This test is expected to fail in TF version >= 2.6
# as the generated graph will be considered frozen, hence
# not passing the criteria for the test below.
if tf.__version__ < LooseVersion("2.6.1"):
_test_spop_resource_variables()
# Placeholder test cases
_test_spop_placeholder_without_shape_info()
_test_spop_placeholder_with_shape_and_default_value()
_test_spop_placeholder_numpy_arange_feed()
_test_spop_placeholder_numpy_array_feed()
# Function Invocation test cases
_test_spop_function_invocation_basic()
_test_spop_function_invocation_nested()
_test_spop_function_invocation_no_autograph()
_test_spop_function_invocation_defun()
# Test cases for various other TF constructs
_test_spop_arithmetic()
_test_spop_control_flow()
_test_spop_variables()
_test_spop_constants()
#######################################################################
# Dynamic input shape
# -------------------
def test_forward_dynamic_input_shape():
"""Dynamic input shape"""
tf.reset_default_graph()
with tf.Graph().as_default():
data = tf.placeholder(tf.float32, name="data", shape=(None,))
_ = data + 1
np_data = np.random.uniform(size=(2,)).astype("float32")
out_name = "add"
with tf.Session() as sess:
graph_def = tf_testing.AddShapesToGraphDef(sess, out_name)
tf_output = run_tf_graph(sess, np_data, "data:0", [f"{out_name}:0"])
# TODO(kevinthesun): enable gpu test when VM heterogeneous execution is ready.
for device in ["llvm"]:
_ = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print(f"Skip because {device} is not enabled")
continue
tvm_output = run_tvm_graph(
graph_def,
np_data,
["data"],
1,
target=device,
layout="NCHW",
out_names=[out_name],
mode="vm",
ignore_in_shape=True,
)
tvm.testing.assert_allclose(tvm_output[0], tf_output[0], rtol=1e-5, atol=1e-5)
def test_forward_dynmaic_rnn_lstmblockcell():
"""Dynmaic rnn lstmblockcell"""
if package_version.parse(tf.VERSION) >= package_version.parse("2.0.0"):
return
total_series_length = 50000
truncated_backprop_length = 15
state_size = 4
echo_step = 3
batch_size = 5
num_layers = 5
def generateData():
x = np.array(np.random.choice(2, total_series_length, p=[0.5, 0.5]))
y = np.roll(x, echo_step)
y[0:echo_step] = 0
x = x.reshape((batch_size, -1)) # The first index changing slowest, subseries as rows
y = y.reshape((batch_size, -1))
return (x, y)
batchX_placeholder = tf.placeholder(tf.float32, [batch_size, truncated_backprop_length])
init_state = tf.placeholder(tf.float32, [num_layers, 2, batch_size, state_size])
state_per_layer_list = tf.unstack(init_state, axis=0)
rnn_tuple_state = tuple(
list(
tf.nn.rnn_cell.LSTMStateTuple(
state_per_layer_list[idx][0], state_per_layer_list[idx][1]
)
for idx in range(num_layers)
)
)
# Forward passes
def lstm_cell():
return tensorflow.contrib.rnn.LSTMBlockCell(state_size)
cell = tf.nn.rnn_cell.MultiRNNCell(
[lstm_cell() for _ in range(num_layers)], state_is_tuple=True
)
states_series, current_state = tf.nn.dynamic_rnn(
cell, tf.expand_dims(batchX_placeholder, -1), initial_state=rnn_tuple_state
)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
x, _ = generateData()
_current_state = np.zeros((num_layers, 2, batch_size, state_size))
start_idx = 0
end_idx = start_idx + truncated_backprop_length
batchX = x[:, start_idx:end_idx]
# Save current state for TVM
current_state_tvm = _current_state
_current_state, _states_series = sess.run(
[current_state, states_series],
feed_dict={batchX_placeholder: batchX, init_state: _current_state},
)
# Organize results and corresponding names
tf_output = [_states_series]
for c in _current_state:
tf_output.append(c.c)
tf_output.append(c.h)
name = [states_series.name.split(":")[0]]
for t in current_state:
name.append(t.c.name.split(":")[0])
name.append(t.h.name.split(":")[0])
graph_def = sess.graph.as_graph_def(add_shapes=True)
final_graph_def = graph_util.convert_variables_to_constants(sess, graph_def, name)
_ = run_tvm_graph(
final_graph_def,
[batchX.astype("float32"), current_state_tvm.astype("float32")],
["Placeholder", "Placeholder_1"],
out_names=name,
num_output=len(name),
mode="vm",
disabled_pass=["FoldScaleAxis"],
)
# Compare result
for _, tf_out in enumerate(tf_output):
tvm.testing.assert_allclose(tf_out, tf_out, atol=1e-5, rtol=1e-5)
#######################################################################
# Unique
# ------------
def _test_unique(n, dtype, is_dyn):
tf.reset_default_graph()
np_data = np.random.randint(100, size=n).astype(dtype)
with tf.Graph().as_default():
if is_dyn:
in_data = tf.placeholder(dtype, [n], name="in_data")
else:
in_data = tf.constant(np_data, dtype, name="in_data")
tf.unique(in_data)
if is_dyn:
compare_tf_with_tvm(np_data, "in_data:0", ["Unique:0", "Unique:1"], mode="vm")
else:
compare_tf_with_tvm(np_data, "", ["Unique:0", "Unique:1"], mode="vm")
def test_forward_unique():
"""test Unique"""
for dtype in ["int32", "int64"]:
for is_dyn in [False, True]:
_test_unique(50, dtype, is_dyn)
_test_unique(100, dtype, is_dyn)
#######################################################################
# Unique with counts
# ------------
def _test_unique_with_counts(n, dtype, is_dyn):
tf.reset_default_graph()
np_data = np.random.randint(100, size=n).astype(dtype)
with tf.Graph().as_default():
if is_dyn:
in_data = tf.placeholder(dtype, [n], name="in_data")
else:
in_data = tf.constant(np_data, dtype, name="in_data")
tf.unique_with_counts(in_data)
if is_dyn:
compare_tf_with_tvm(
np_data,
"in_data:0",
["UniqueWithCounts:0", "UniqueWithCounts:1", "UniqueWithCounts:2"],
mode="vm",
)
else:
compare_tf_with_tvm(
np_data,
"",
["UniqueWithCounts:0", "UniqueWithCounts:1", "UniqueWithCounts:2"],
mode="vm",
)
def test_forward_unique_with_counts():
"""test UniqueWithCounts"""
for dtype in ["int32", "int64"]:
for is_dyn in [False, True]:
_test_unique_with_counts(10, dtype, is_dyn)
_test_unique_with_counts(20, dtype, is_dyn)
#######################################################################
# check graph ir for nn.moments
# ------------
def test_moments():
"""NN.moments"""
g = tf.Graph()
shape = [4, 176, 8, 8]
dtype = "float32"
with g.as_default():
A = tf.placeholder(shape=shape, dtype=dtype, name="A")
_ = tf.placeholder(shape=shape, dtype=dtype, name="B")
mean, variance = tf.nn.moments(A, [1], keep_dims=True)
_ = (A - mean) / tf.sqrt(variance + 0.0005)
with tvm.testing.disable_span_filling():
mod, _ = from_tensorflow(g.as_graph_def(add_shapes=True))
with tvm.testing.enable_span_filling():
mod_with_span, _ = from_tensorflow(g.as_graph_def(add_shapes=True))
assert tvm.ir.structural_equal(mod["main"], mod_with_span["main"], map_free_vars=True)
program = """
def @main(%A: Tensor[(4, 176, 8, 8), float32]) {
%527 = mean(%A, axis=[1], keepdims=True) /* moments/mean */;
%528 = subtract(%A, %527) /* sub */;
%529 = subtract(%A, %527);
%530 = multiply(%529, %529) /* moments/SquaredDifference */;
%531 = mean(%530, axis=[1], keepdims=True) /* moments/variance */;
%532 = add(%531, 0.0005f) /* add */;
%533 = sqrt(%532) /* Sqrt */;
divide(%528, %533) /* truediv */
}
"""
mod_golden = tvm.relay.parse('#[version = "0.0.5"]\n' + program)
tvm.ir.assert_structural_equal(mod["main"].body, mod_golden["main"].body, map_free_vars=True)
#######################################################################
# invert_permutation
# --------------------
def test_invert_permutation():
"""test InvertPermutation"""
tf.reset_default_graph()
input_shape = [6]
x = np.array([3, 4, 0, 2, 1, 5]).astype("int32")
with tf.Graph().as_default():
in_data = tf.placeholder(shape=input_shape, dtype="int32")
tf.invert_permutation(in_data)
out_name = "InvertPermutation:0"
compare_tf_with_tvm(x, "Placeholder:0", out_name, no_gpu=False)
#######################################################################
# Bincount
# ----
def _test_bincount(in_shape, size, weights):
with tf.Graph().as_default():
inputs = []
data = []
inputs.append(tf.placeholder(shape=in_shape, dtype="int32", name="input0"))
data.append(np.random.uniform(0, size, size=in_shape).astype("int32"))
inputs.append(tf.placeholder(shape=(), dtype="int32", name="size"))
data.append(np.array(size, "int32"))
if weights:
inputs.append(tf.placeholder(shape=in_shape, dtype="float32", name="weights"))
data.append(np.reshape(weights, in_shape).astype("float32"))
else:
inputs.append(tf.placeholder(shape=(0,), dtype="float32", name="weights"))
data.append(np.array([], "float32"))
result = tf.raw_ops.Bincount(arr=data[0], size=data[1], weights=data[2])
compare_tf_with_tvm(data, [a.name for a in inputs], result.name, mode="vm")
def test_forward_bincount():
"""Test Bincount Op"""
# 2D input
_test_bincount((3, 10), 20, [1.0] * 30)
_test_bincount((3, 10), 20, [1.5] * 30)
_test_bincount((3, 10), 20, None)
# 1D input
_test_bincount((10,), 20, [1.0] * 10)
_test_bincount((10,), 20, [1.5] * 10)
_test_bincount((10,), 20, None)
#######################################################################
# DenseBincount
# ----
def _test_dense_bincount(in_shape, size, weights, binary_output):
with tf.Graph().as_default():
inputs = []
data = []
inputs.append(tf.placeholder(shape=in_shape, dtype="int32", name="input0"))
data.append(np.random.uniform(0, size, size=in_shape).astype("int32"))
inputs.append(tf.placeholder(shape=(), dtype="int32", name="size"))
data.append(np.array(size, "int32"))
if weights:
inputs.append(tf.placeholder(shape=in_shape, dtype="float32", name="weights"))
data.append(np.reshape(weights, in_shape).astype("float32"))
else:
inputs.append(tf.placeholder(shape=(0,), dtype="float32", name="weights"))
data.append(np.array([], "float32"))
result = tf.raw_ops.DenseBincount(
input=data[0],
size=data[1],
weights=data[2],
binary_output=binary_output,
)
compare_tf_with_tvm(data, [a.name for a in inputs], result.name, mode="vm")
def test_forward_dense_bincount():
"""Test DenseBincount Op"""
for binary_output in [False, True]:
# 2D input
_test_dense_bincount((3, 10), 20, [1.0] * 30, binary_output)
_test_dense_bincount((3, 10), 20, [1.5] * 30, binary_output)
_test_dense_bincount((3, 10), 20, None, binary_output)
# 1D input
_test_dense_bincount((10,), 20, [1.0] * 10, binary_output)
_test_dense_bincount((10,), 20, [1.5] * 10, binary_output)
_test_dense_bincount((10,), 20, None, binary_output)
#######################################################################
# Test structural_equal and span of a model
# --------------------------------------
class TestSetSpan:
"""Test Structure and span of frequently-used models"""
def _verify(self, res_fptr, golden_fptr):
with tvm.testing.enable_span_filling():
with_span = res_fptr()
with tvm.testing.disable_span_filling():
without_span = res_fptr()
assert tvm.ir.structural_equal(with_span, without_span)
_verify_structural_equal_with_span(with_span, golden_fptr())
def test_conv2d_bias_add_span(self):
"""Test Structure and span of conv2d and bias add model match to the expected result"""
def _res():
in_shape = (1, 5, 5, 1)
kernel_shpae = (2, 2, 1, 2)
kernel_in = np.ones(kernel_shpae)
bias_val_shape = tuple([2])
bias_val_in = np.ones(bias_val_shape)
with tf.Graph().as_default() as g:
x = array_ops.placeholder(shape=in_shape, dtype="float32", name="input")
kernel = tf.constant(kernel_in, dtype=tf.float32, name="filter_weight")
bias_val_tensor = tf.constant(bias_val_in, dtype=tf.float32, name="conv2d_bias")
conv2d = tf.nn.conv2d(
x, kernel, strides=[1, 1, 1, 1], padding="VALID", name="conv2d"
)
_ = tf.nn.bias_add(conv2d, bias_val_tensor, name="bias_add")
mod, _ = relay.frontend.from_tensorflow(
g.as_graph_def(), shape={"input": in_shape}, outputs=["bias_add"]
)
return mod["main"]
def _golden():
model_in = relay.var(
"input", relay.TensorType([1, 5, 5, 1]), span=_create_span("input")
)
weight = relay.var(
"filter_weight", relay.TensorType([2, 2, 1, 2]), span=_create_span("filter_weight")
)
bias = relay.var("conv2d_bias", relay.TensorType([2]), span=_create_span("conv2d_bias"))
conv2d = _set_span(
relay.nn.conv2d(
model_in,
weight,
channels=2,
kernel_size=[2, 2],
data_layout="NHWC",
kernel_layout="HWIO",
),
"conv2d",
)
add = _set_span(relay.op.add(conv2d, bias), "bias_add")
mod = ir.IRModule.from_expr(add)
return mod["main"]
self._verify(_res, _golden)
def test_fully_connected_bias_add_span(self):
"""Test Structure and span of fully connected model match to the expected result"""
def _res():
in_shape = (1, 10)
kernel_shpae = (10, 10)
kernel_in = np.ones(kernel_shpae)
bias_val_shape = tuple([10])
bias_val_in = np.ones(bias_val_shape)
with tf.Graph().as_default() as g:
x = array_ops.placeholder(shape=in_shape, dtype="float32", name="input")
in_filter = tf.constant(kernel_in, dtype=tf.float32, name="filter_weight")
bias_val_tensor = tf.constant(bias_val_in, dtype=tf.float32, name="dense_bias")
mat_mul = math_ops.mat_mul(x, in_filter, name="dense")
_ = tf.nn.bias_add(mat_mul, bias_val_tensor, name="bias_add")
mod, _ = relay.frontend.from_tensorflow(
g.as_graph_def(),
shape={"input": in_shape},
outputs=["bias_add"],
convert_config={"use_dense": True},
)
return mod["main"]
def _golden():
model_in = relay.var("input", relay.TensorType([1, 10]), span=_create_span("input"))
weight = relay.var(
"filter_weight", relay.TensorType([10, 10]), span=_create_span("filter_weight")
)
bias = relay.var("dense_bias", relay.TensorType([10]), span=_create_span("dense_bias"))
transpose = _set_span(relay.transpose(weight, [1, 0]), "dense")
dense = _set_span(relay.nn.dense(model_in, transpose, units=10), "dense")
add = _set_span(relay.op.add(dense, bias), "bias_add")
mod = ir.IRModule.from_expr(add)
return mod["main"]
self._verify(_res, _golden)
def test_reshape_span(self):
"""Test Structure and span of reshape model match to the expected result"""
def _res():
in_shape = (1, 10)
output_shape = (2, 5)
with tf.Graph().as_default() as g:
x = array_ops.placeholder(shape=in_shape, dtype="float32", name="input")
_ = array_ops.reshape(x, output_shape, "reshape")
mod, _ = relay.frontend.from_tensorflow(
g.as_graph_def(), shape={"input": in_shape}, outputs=["reshape"]
)
return mod["main"]
def _golden():
model_in = relay.var("input", relay.TensorType([1, 10]), span=_create_span("input"))
reshape = _set_span(relay.reshape(model_in, [2, 5]), "reshape")
mod = ir.IRModule.from_expr(reshape)
return mod["main"]
self._verify(_res, _golden)
def test_batch_norm_span(self):
"""Test Structure and span of batchnorm model match to the expected result"""
def _res():
in_shape = (1, 12, 12, 32)
with tf.Graph().as_default() as g:
input_tensor = tf.placeholder(tf.float32, shape=in_shape, name="input")
alpha = tf.constant(
np.ones(
in_shape[-1],
),
dtype=tf.float32,
name="alpha",
)
beta = tf.constant(
np.ones(
in_shape[-1],
),
dtype=tf.float32,
name="beta",
)
_ = tf.nn.fused_batch_norm(x=input_tensor, offset=beta, scale=alpha, name="bn")
mod, _ = relay.frontend.from_tensorflow(
g.as_graph_def(), shape={"input": in_shape}, outputs=["bn"]
)
return mod["main"]
def _golden():
model_in = relay.var(
"input", relay.TensorType([1, 12, 12, 32]), span=_create_span("input")
)
alpha = relay.var("alpha", relay.TensorType([32]), span=_create_span("alpha"))
beta = relay.var("beta", relay.TensorType([32]), span=_create_span("beta"))
mean = _set_span(relay.op.mean(model_in, axis=[3], exclude=True), "bn")
variance_mean = _set_span(
relay.op.mean(model_in, axis=[3], keepdims=True, exclude=True), "bn"
)
variance = _set_span(
relay.op._make._variance(model_in, variance_mean, [3], False, True, False), "bn"
)
bn = _set_span(
relay.nn.batch_norm(model_in, alpha, beta, mean, variance, axis=3, epsilon=0.001),
"bn",
)
mod = ir.IRModule.from_expr(bn[0])
return mod["main"]
self._verify(_res, _golden)
if __name__ == "__main__":
tvm.testing.main()
| 216,228 | 34.435759 | 113 | py |
tvm | tvm-main/tests/python/nightly/quantization/test_quantization_accuracy.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from collections import namedtuple
import tvm
from tvm import relay
from tvm.relay import quantize as qtz
import mxnet as mx
from mxnet import gluon
import logging
import os
import tvm.testing
logging.basicConfig(level=logging.INFO)
Config = namedtuple(
"Config",
[
"model",
"nbit_input",
"dtype_input",
"nbit_output",
"dtype_output",
"global_scale",
"expected_acc",
],
)
def get_val_data(model_name, rec_val, batch_size, num_workers=4):
rec_val = os.path.expanduser(rec_val)
mean_rgb = [123.68, 116.779, 103.939]
std_rgb = [58.393, 57.12, 57.375]
def batch_fn(batch, ctx):
data = gluon.utils.split_and_load(batch.data[0], ctx_list=ctx, batch_axis=0)
label = gluon.utils.split_and_load(batch.label[0], ctx_list=ctx, batch_axis=0)
return data, label
img_size = 299 if model_name == "inceptionv3" else 224
val_data = mx.io.ImageRecordIter(
path_imgrec=rec_val,
preprocess_threads=num_workers,
shuffle=False,
batch_size=batch_size,
resize=256,
data_shape=(3, img_size, img_size),
mean_r=mean_rgb[0],
mean_g=mean_rgb[1],
mean_b=mean_rgb[2],
std_r=std_rgb[0],
std_g=std_rgb[1],
std_b=std_rgb[2],
)
return val_data, batch_fn
def get_model(model_name, batch_size, qconfig, original=False):
gluon_model = gluon.model_zoo.vision.get_model(model_name, pretrained=True)
img_size = 299 if model_name == "inceptionv3" else 224
data_shape = (batch_size, 3, img_size, img_size)
mod, params = relay.frontend.from_mxnet(gluon_model, {"data": data_shape})
logging.debug("original")
logging.debug(mod.astext(show_meta_data=False))
if original:
return mod, params
with qconfig:
logging.debug("current quantize config")
logging.debug(qtz.current_qconfig())
qfunc = qtz.quantize(mod, params)
logging.debug("after quantize")
logging.debug(qfunc.astext(show_meta_data=False))
return qfunc, params
def eval_acc(
model, params, dataset, batch_fn, target=tvm.target.cuda(), device=tvm.cuda(), log_interval=500
):
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(model, target, params=params)
# create runtime module
m = tvm.contrib.graph_executor.GraphModule(lib["default"](device))
# setup evaluaiton metric
dataset.reset()
batch_size = dataset.batch_size
acc_top1 = mx.metric.Accuracy()
acc_top5 = mx.metric.TopKAccuracy(5)
acc_top1.reset()
acc_top5.reset()
# Execute
for i, batch in enumerate(dataset):
data, label = batch_fn(batch, [mx.cpu(0)])
m.set_input("data", tvm.nd.array(data[0].asnumpy()))
m.run()
out_arr = m.get_output(0)
acc_top1.update(label, [mx.nd.array(out_arr.numpy())])
acc_top5.update(label, [mx.nd.array(out_arr.numpy())])
if not (i + 1) % log_interval:
_, top1 = acc_top1.get()
_, top5 = acc_top5.get()
nsamples = (i + 1) * batch_size
logging.info("[%d samples] validation: acc-top1=%f acc-top5=%f", nsamples, top1, top5)
logging.info("[final] validation: acc-top1=%f acc-top5=%f", top1, top5)
return top1
@tvm.testing.requires_gpu
def test_quantize_acc(cfg, rec_val):
qconfig = qtz.qconfig(
skip_conv_layers=[0],
nbit_input=cfg.nbit_input,
nbit_weight=cfg.nbit_input,
global_scale=cfg.global_scale,
dtype_input=cfg.dtype_input,
dtype_weight=cfg.dtype_input,
dtype_activation=cfg.dtype_output,
debug_enabled_ops=None,
)
batch_size = 1
model, params = get_model(cfg.model, batch_size, qconfig)
val_data, batch_fn = get_val_data(cfg.model, rec_val=rec_val, batch_size=batch_size)
acc = eval_acc(model, params, val_data, batch_fn)
assert acc > cfg.expected_acc
return acc
if __name__ == "__main__":
# TODO(for user): replace the line with the path to imagenet validation dataset
rec_val = "/scratch/tqchen/imagenet/val.rec"
results = []
configs = [
# TODO: need to fix accuracy and add AutoTVM log
Config(
"mobilenetv2_1.0",
nbit_input=8,
dtype_input="int8",
nbit_output=32,
dtype_output="int32",
global_scale=4.0,
expected_acc=0.666,
),
Config(
"mobilenetv2_1.0",
nbit_input=8,
dtype_input="int8",
nbit_output=16,
dtype_output="int16",
global_scale=4.0,
expected_acc=0.666,
),
Config(
"resnet18_v1",
nbit_input=8,
dtype_input="int8",
nbit_output=16,
dtype_output="int16",
global_scale=8.0,
expected_acc=0.692,
),
Config(
"resnet18_v1",
nbit_input=8,
dtype_input="int8",
nbit_output=32,
dtype_output="int32",
global_scale=8.0,
expected_acc=0.692,
),
Config(
"resnet34_v1",
nbit_input=8,
dtype_input="int8",
nbit_output=32,
dtype_output="int32",
global_scale=8.0,
expected_acc=0.733,
),
Config(
"resnet50_v1",
nbit_input=8,
dtype_input="int8",
nbit_output=32,
dtype_output="int32",
global_scale=8.0,
expected_acc=0.747,
),
Config(
"resnet101_v1",
nbit_input=8,
dtype_input="int8",
nbit_output=32,
dtype_output="int32",
global_scale=8.0,
expected_acc=0.756,
),
]
for config in configs:
acc = test_quantize_acc(config, rec_val)
results.append((config, acc))
for res in results:
print(res)
| 6,833 | 29.64574 | 99 | py |
tvm | tvm-main/tests/python/contrib/test_dnnl.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import itertools
import numpy as np
import sys
import subprocess
import math
import collections
import tvm
from tvm import relay
from tvm.relay import transform
from tvm.relay.build_module import bind_params_by_name
from tvm.relay.testing.temp_op_attr import TempOpAttr
from tvm.relay.op.contrib import dnnl
import tvm.testing
has_dnnl_codegen = pytest.mark.skipif(
not tvm.get_global_func("relay.ext.dnnl", True), reason="DNNL codegen not available"
)
run_module = tvm.testing.parameter(
pytest.param(False, marks=[has_dnnl_codegen, *tvm.testing.requires_llvm.marks()]),
pytest.param(True, marks=[has_dnnl_codegen, *tvm.testing.requires_llvm.marks()]),
ids=["compile", "run"],
)
_bf16_supported = None
def bf16_supported():
global _bf16_supported
if _bf16_supported is None:
_bf16_supported = False
if sys.platform.startswith("darwin"):
cpu_info = subprocess.check_output("sysctl -a", shell=True).strip().decode()
for line in cpu_info.split("\n"):
if line.startswith("hw.optional.avx512f"):
_bf16_supported = bool(int(line.split(":", 1)[1]))
elif sys.platform.startswith("linux"):
_bf16_supported = "avx512" in open("/proc/cpuinfo", "r").read()
return _bf16_supported
def partition_for_dnnl(mod, params=None, alter_layout=True, prune_subgraphs=True):
"""Partition the graph greedily offloading supported operators to DNNL.
Parameters
----------
mod : Module
The module to run passes on.
params : Optional[Dict[str, NDArray]]
Constant input parameters.
Returns
-------
mod : Module
Annotated and partitioned module.
"""
if params:
mod["main"] = bind_params_by_name(mod["main"], params)
with TempOpAttr("nn.conv2d", "FTVMLegalize", dnnl.legalize_group_conv):
with TempOpAttr("nn.conv2d_transpose", "FTVMLegalize", dnnl.legalize_group_conv):
seq = tvm.transform.Sequential(
[
transform.CanonicalizeOps(),
transform.InferType(),
transform.SimplifyInference(),
transform.FoldConstant(),
transform.FoldScaleAxis(),
# fold consecutive add ops to simplify pattern `conv2d-bias_add-bn-relu`
transform.SimplifyExpr(),
transform.FoldConstant(),
# alter group conv /conv_transpose layout to `GOIHW` / `GIOHW`
transform.Legalize(),
transform.FoldConstant(),
]
)
with tvm.transform.PassContext(opt_level=3):
mod = seq(mod)
if alter_layout:
with TempOpAttr("nn.conv1d", "FTVMAlterOpLayout", dnnl.alter_conv):
with TempOpAttr("nn.conv2d", "FTVMAlterOpLayout", dnnl.alter_conv):
with TempOpAttr("nn.conv3d", "FTVMAlterOpLayout", dnnl.alter_conv):
with TempOpAttr(
"nn.conv2d_transpose", "FTVMAlterOpLayout", dnnl.alter_conv_transpose
):
with TempOpAttr(
"nn.conv3d_transpose", "FTVMAlterOpLayout", dnnl.alter_conv_transpose
):
alter_layout_seq = tvm.transform.Sequential(
[
transform.AlterOpLayout(),
transform.FoldConstant(),
]
)
with tvm.transform.PassContext(opt_level=3):
mod = alter_layout_seq(mod)
mod = dnnl.rewrite_layer_norm(mod)
mod = dnnl.rewrite_dense_bias_gelu_reshape_last(mod)
mod = dnnl.legalize_qnn_for_dnnl(mod)
byoc_seq = tvm.transform.Sequential(
[
transform.MergeComposite(dnnl.pattern_table()),
transform.AnnotateTarget("dnnl"),
transform.MergeCompilerRegions(),
transform.PartitionGraph(),
]
)
with tvm.transform.PassContext(opt_level=3):
mod = byoc_seq(mod)
if prune_subgraphs:
mod = dnnl.prune_dnnl_subgraphs(mod)
return mod
def vmobj_to_list(o):
if isinstance(o, tvm.nd.NDArray):
o_np = o.numpy()
if o_np.dtype == np.uint16:
o_np = np.left_shift(o_np.astype("uint32"), 16).view("<f4")
return [o_np]
elif isinstance(o, tvm.runtime.container.ADT) or isinstance(o, list):
return [vmobj_to_list(f) for f in o]
else:
raise RuntimeError("Unknown object type: %s" % type(o))
def assert_result_dict_holds(result_dict):
for k1, k2 in itertools.combinations(result_dict, 2):
res1 = vmobj_to_list(result_dict[k1])
res2 = vmobj_to_list(result_dict[k2])
for r1, r2 in zip(res1, res2):
# ignore the accuracy checking if only one bf16 result presents
if ("bf16" in k1) == ("bf16" in k2):
tvm.testing.assert_allclose(r1, r2, rtol=1e-3, atol=1e-3)
def check_dnnl_used(mod, subgraph_num=None):
num_dnnl_subgraphs = sum([1 if "dnnl" in gv.name_hint else 0 for gv in mod.get_global_vars()])
if subgraph_num:
assert num_dnnl_subgraphs == subgraph_num
else:
assert num_dnnl_subgraphs >= 1
def run_and_verify(mod, input, params, target, run_module, subgraph_num=None, test_bf16=True):
dev = tvm.cpu()
result_dict = dict()
for mode in ["graph", "vm"]:
configs = [
(False, False, False),
(True, False, False),
(True, True, False),
]
if test_bf16 and bf16_supported():
configs += [(True, False, True), (True, True, True)]
for use_dnnl, alter_layout, use_bf16 in configs:
result_key = (
mode
+ ("_dnnl" if use_dnnl else "")
+ ("_layout" if alter_layout else "")
+ ("_bf16" if use_bf16 else "_fp32")
)
processed_mod = mod
if use_bf16:
processed_mod = relay.transform.ToMixedPrecision("bfloat16")(processed_mod)
if tvm.ir.structural_equal(processed_mod, mod):
print("can not convert to bfloat16, skipping...")
continue
if use_dnnl:
processed_mod = partition_for_dnnl(processed_mod, params, alter_layout)
check_dnnl_used(processed_mod)
with tvm.transform.PassContext(opt_level=3):
func = relay.create_executor(
mode, mod=processed_mod, device=dev, target=target
).evaluate()
if run_module:
if isinstance(input, dict):
result_dict[result_key] = func(**input, **params)
else:
result_dict[result_key] = func(input, **params)
if run_module:
assert_result_dict_holds(result_dict)
def run_and_verify_func(
config, run_module, subgraph_num=None, target="llvm", dtype="float32", test_bf16=True
):
"""Test a Relay func by compiling, running, and comparing TVM and DNNL outputs.
Parameters
----------
config : Tuple[relay.Function, Dict[str, NDArray], List[str]]
A tuple containing 1) The function to test, 2) A dictionary of var names to input shapes and
3) A list of which vars should be considered params.
run_module: bool
If True, the built module will be run after being compiled.
"""
f, input_shapes, is_param = config
params = {x: np.random.uniform(-1, 1, input_shapes[x]).astype(dtype) for x in is_param}
input_dict = {
k: np.random.uniform(-1, 1, v).astype(dtype)
for k, v in input_shapes.items()
if k not in is_param
}
run_and_verify(
f,
input_dict,
params,
subgraph_num=subgraph_num,
target=target,
run_module=run_module,
test_bf16=test_bf16,
)
def add_activation(activation, out, dic, param_lst):
if activation == "relu":
return relay.nn.relu(out), dic, param_lst
elif activation == "tanh":
return relay.tanh(out), dic, param_lst
elif activation == "sigmoid":
return relay.sigmoid(out), dic, param_lst
elif activation == "clip":
return relay.clip(out, 0.0, 6.0), dic, param_lst
elif activation == "swish":
sig_out = relay.sigmoid(out)
out = relay.multiply(out, sig_out)
return out, dic, param_lst
elif activation == "gelu":
out = gelu_helper(out)
return out, dic, param_lst
elif activation == "mish":
exp = relay.exp(out)
add = relay.add(exp, relay.const(1.0))
log = relay.log(add)
tanh = relay.tanh(log)
out = relay.multiply(out, tanh)
return out, dic, param_lst
else:
return out, dic, param_lst
def get_conv1d(
x_shape=((1, 3, 224)),
k_shape=(16, 3, 3),
groups=1,
padding=(1, 1),
strides=(1),
dilation=(1),
channels=None,
activation=None,
dtype="float32",
):
x = relay.var("x", shape=(x_shape), dtype=dtype)
kernel = relay.var("kernel", shape=(k_shape), dtype=dtype)
out = relay.nn.conv1d(
x,
kernel,
kernel_size=k_shape[2:3],
groups=groups,
padding=padding,
strides=strides,
dilation=dilation,
channels=k_shape[0],
)
dic = {"x": x_shape, "kernel": k_shape}
param_lst = ["kernel"]
return add_activation(activation, out, dic, param_lst)
def get_conv1d_bias(x_shape=(1, 3, 224), k_shape=(10, 3, 3), activation=None, dtype="float32"):
conv, dic, param_lst = get_conv1d(x_shape=x_shape, k_shape=k_shape, dtype=dtype)
bias = relay.var("bias", shape=(k_shape[0],), dtype=dtype)
out = relay.nn.bias_add(conv, bias)
dic["bias"] = (k_shape[0],)
param_lst += ["bias"]
return add_activation(activation, out, dic, param_lst)
def get_conv1d_bias_bn_relu(x_shape=(1, 3, 224), k_shape=(10, 3, 3), dtype="float32"):
conv1d_bias, dic, param_lst = get_conv1d_bias(x_shape, k_shape, dtype=dtype)
beta = relay.const(np.zeros(k_shape[0]).astype(dtype))
gamma = relay.const(np.ones(k_shape[0]).astype(dtype))
moving_mean = relay.const(np.zeros(k_shape[0]).astype(dtype))
moving_var = relay.const(np.ones(k_shape[0]).astype(dtype))
conv1d_bias_bn, _, _ = relay.nn.batch_norm(
conv1d_bias,
gamma=gamma,
beta=beta,
moving_mean=moving_mean,
moving_var=moving_var,
axis=1,
center=True,
scale=True,
epsilon=1e-5,
)
return relay.nn.relu(conv1d_bias_bn), dic, param_lst
def get_conv2d(
x_shape=(1, 32, 8, 8),
k_shape=(16, 32, 3, 3),
groups=1,
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
activation=None,
dtype="float32",
):
x = relay.var("x", shape=(x_shape), dtype=dtype)
kernel = relay.var("kernel", shape=(k_shape), dtype=dtype)
out = relay.nn.conv2d(
x,
kernel,
kernel_size=k_shape[2:4],
groups=groups,
padding=padding,
strides=strides,
dilation=dilation,
channels=k_shape[0],
)
dic = {"x": x_shape, "kernel": k_shape}
param_lst = ["kernel"]
return add_activation(activation, out, dic, param_lst)
def get_conv2d_transpose(
x_shape=(1, 32, 8, 8),
k_shape=(32, 16, 3, 3),
groups=1,
padding=(0, 0),
strides=(1, 1),
activation=None,
dtype="float32",
):
x = relay.var("x", shape=(x_shape), dtype=dtype)
kernel = relay.var("kernel", shape=(k_shape), dtype=dtype)
out = relay.nn.conv2d_transpose(
x,
kernel,
channels=k_shape[1] * groups,
kernel_size=k_shape[2:4],
groups=groups,
padding=padding,
strides=strides,
)
dic = {"x": x_shape, "kernel": k_shape}
param_lst = ["kernel"]
return add_activation(activation, out, dic, param_lst)
def get_conv2d_weights_const(
x_shape=(1, 32, 8, 8),
k_shape=(16, 32, 3, 3),
groups=1,
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
dtype="float32",
):
x = relay.var("x", shape=(x_shape), dtype=dtype)
kernel = relay.const(np.random.randint(0, 1, k_shape).astype(dtype))
out = relay.nn.conv2d(
x,
kernel,
channels=k_shape[0],
kernel_size=k_shape[2:4],
groups=groups,
padding=padding,
strides=strides,
dilation=dilation,
)
dic = {"x": x_shape}
param_lst = []
return out, dic, param_lst
def get_conv2d_bias(
x_shape=(1, 32, 8, 8), k_shape=(16, 32, 3, 3), activation=None, dtype="float32"
):
conv, dic, param_lst = get_conv2d_weights_const(x_shape=x_shape, k_shape=k_shape, dtype=dtype)
bias = relay.var("bias", shape=(k_shape[0],), dtype=dtype)
out = relay.nn.bias_add(conv, bias)
dic["bias"] = (k_shape[0],)
param_lst += ["bias"]
return add_activation(activation, out, dic, param_lst)
def get_conv2d_transpose_bias(
x_shape=(1, 32, 8, 8), k_shape=(32, 16, 3, 3), activation=None, dtype="float32"
):
conv, dic, param_lst = get_conv2d_transpose(x_shape=x_shape, k_shape=k_shape, dtype=dtype)
bias = relay.var("bias", shape=(k_shape[1],), dtype=dtype)
out = relay.nn.bias_add(conv, bias)
dic["bias"] = (k_shape[1],)
param_lst += ["bias"]
return add_activation(activation, out, dic, param_lst)
def get_conv2d_bias_bn_relu(x_shape=(1, 32, 8, 8), k_shape=(16, 32, 3, 3), dtype="float32"):
conv2d_bias, dic, param_lst = get_conv2d_bias(x_shape, k_shape, dtype=dtype)
beta = relay.const(np.zeros(k_shape[0]).astype(dtype))
gamma = relay.const(np.ones(k_shape[0]).astype(dtype))
moving_mean = relay.const(np.zeros(k_shape[0]).astype(dtype))
moving_var = relay.const(np.ones(k_shape[0]).astype(dtype))
conv2d_bias_bn, _, _ = relay.nn.batch_norm(
conv2d_bias,
gamma=gamma,
beta=beta,
moving_mean=moving_mean,
moving_var=moving_var,
axis=1,
center=True,
scale=True,
epsilon=1e-5,
)
return relay.nn.relu(conv2d_bias_bn), dic, param_lst
def get_layer_norm(x_shape=(1, 49, 64), dtype="float32"):
dic = {"input": x_shape}
param_lst = []
input = relay.var("input", shape=x_shape)
beta = relay.const(np.zeros(x_shape[2]).astype(dtype))
gamma = relay.const(np.ones(x_shape[2]).astype(dtype))
out = relay.nn.layer_norm(input, gamma=gamma, beta=beta)
return out, dic, param_lst
def get_conv3d(
x_shape=(1, 32, 8, 8, 8),
k_shape=(16, 32, 3, 3, 3),
groups=1,
padding=(0, 0, 0),
strides=(1, 1, 1),
dilation=(1, 1, 1),
activation=None,
dtype="float32",
):
x = relay.var("x", shape=(x_shape), dtype=dtype)
kernel = relay.const(np.random.randint(0, 1, k_shape).astype(dtype))
out = relay.nn.conv3d(
x,
kernel,
channels=k_shape[0],
kernel_size=k_shape[2:],
groups=groups,
padding=padding,
strides=strides,
dilation=dilation,
)
dic = {"x": x_shape, "kernel": k_shape}
param_lst = ["kernel"]
return add_activation(activation, out, dic, param_lst)
def get_conv3d_transpose(
x_shape=(1, 32, 8, 8, 8),
k_shape=(32, 16, 3, 3, 3),
groups=1,
padding=(0, 0, 0),
strides=(1, 1, 1),
output_padding=(0, 0, 0),
activation=None,
dtype="float32",
data_layout="NCDHW",
kernel_layout="IODHW",
):
x = relay.var("x", shape=(x_shape), dtype=dtype)
kernel = relay.const(np.random.randint(0, 1, k_shape).astype(dtype))
out = relay.nn.conv3d_transpose(
x,
kernel,
channels=k_shape[1],
kernel_size=k_shape[2:5],
groups=groups,
padding=padding,
strides=strides,
output_padding=output_padding,
data_layout=data_layout,
kernel_layout=kernel_layout,
)
dic = {"x": x_shape, "kernel": k_shape}
param_lst = ["kernel"]
return add_activation(activation, out, dic, param_lst)
def get_conv3d_bias(
x_shape=(1, 32, 8, 8, 8), k_shape=(16, 32, 3, 3, 3), activation=None, dtype="float32"
):
conv, dic, param_lst = get_conv3d(x_shape=x_shape, k_shape=k_shape, dtype=dtype)
bias = relay.var("bias", shape=(k_shape[0],), dtype=dtype)
out = relay.nn.bias_add(conv, bias)
dic["bias"] = (k_shape[0],)
param_lst += ["bias"]
return add_activation(activation, out, dic, param_lst)
def get_conv3d_transpose_bias(
x_shape=(1, 32, 8, 8, 8), k_shape=(32, 16, 3, 3, 3), activation=None, dtype="float32"
):
conv, dic, param_lst = get_conv3d_transpose(x_shape=x_shape, k_shape=k_shape, dtype=dtype)
bias = relay.var("bias", shape=(k_shape[1],), dtype=dtype)
out = relay.nn.bias_add(conv, bias)
dic["bias"] = (k_shape[1],)
param_lst += ["bias"]
return add_activation(activation, out, dic, param_lst)
def gelu_helper(data):
const1 = relay.const(math.sqrt(2.0))
const2 = relay.const(1.0)
const3 = relay.const(0.5)
divisor = relay.op.divide(data, const1)
val_erf = relay.op.erf(divisor)
added_erf = relay.op.add(val_erf, const2)
mul1 = relay.op.multiply(data, added_erf)
out = relay.op.multiply(mul1, const3)
return out
def get_dense(
x_shape=(1, 16), k_shape=(32, 16), activation=None, has_reshape=False, dtype="float32"
):
x = relay.var("x", shape=(x_shape), dtype=dtype)
kernel = relay.var("kernel", shape=(k_shape), dtype=dtype)
out = relay.nn.dense(x, kernel, units=k_shape[0])
# out = relay.nn.dense(x, kernel, units=None)
if has_reshape:
out = relay.reshape(out, newshape=(1, x_shape[0], k_shape[0]))
dic = {"x": x_shape, "kernel": k_shape}
param_lst = ["kernel"]
if activation == "gelu":
out = gelu_helper(out)
return out, dic, param_lst
def get_bmm(
x_shape=(1, 16, 8), k_shape=(1, 4, 8), dtype="float32", transpose_a=False, transpose_b=True
):
x = relay.var("x", shape=(x_shape), dtype=dtype)
kernel = relay.var("kernel", shape=(k_shape), dtype=dtype)
out = relay.nn.batch_matmul(
x, kernel, out_dtype=dtype, transpose_a=transpose_a, transpose_b=transpose_b
)
dic = {"x": x_shape, "kernel": k_shape}
param_lst = ["kernel"]
return out, dic, param_lst
def test_bmm(run_module, dtype="float32"):
x_shape = (1, 2, 4)
k_shape = (1, 3, 4)
dense, dic, param_lst = get_bmm(x_shape, k_shape, dtype=dtype)
dense = tvm.IRModule.from_expr(dense)
config = dense, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
k_shape_t = (1, 4, 3)
dense, dic, param_lst = get_bmm(x_shape, k_shape_t, dtype=dtype, transpose_b=False)
dense = tvm.IRModule.from_expr(dense)
config = dense, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
def get_dense_bias(
x_shape=(1, 16),
k_shape=(32, 16),
activation=None,
has_reshape=False,
use_add=False,
dtype="float32",
):
dense, dic, param_lst = get_dense(
x_shape=x_shape, k_shape=k_shape, has_reshape=has_reshape, dtype=dtype
)
bias = relay.var("bias", shape=(k_shape[0],), dtype=dtype)
if use_add:
out = relay.add(dense, bias)
else:
out = relay.nn.bias_add(dense, bias)
dic["bias"] = (k_shape[0],)
param_lst += ["bias"]
if activation == "gelu":
out = gelu_helper(out)
return out, dic, param_lst
def test_dnnl_not_compatible(run_module, target="llvm", dtype="float32"):
xshape = (1, 32, 14, 14)
x_data = np.random.uniform(-1, 1, xshape).astype(dtype)
x = relay.var("x", shape=(xshape), dtype=dtype)
y = relay.add(x, x)
z = relay.cast(relay.cast(y, "int32"), "float32")
out = relay.nn.relu(z)
f = relay.Function([x], out)
mod = tvm.IRModule()
mod["main"] = f
mod = partition_for_dnnl(mod)
for mode in ["graph", "vm"]:
with tvm.transform.PassContext(opt_level=3):
func = relay.create_executor(mode, mod=mod, device=tvm.cpu(0), target=target).evaluate()
if run_module:
results = func(x_data)
def test_multiple_outputs(run_module, dtype="float32"):
def get_graph():
x = relay.var("x", shape=(1, 3), dtype=dtype)
y = relay.var("y", shape=(1, 3), dtype=dtype)
z = relay.add(x, y)
w = relay.add(z, y)
out = relay.Tuple((z, w))
f = tvm.IRModule.from_expr(out)
return f, {"x": (1, 3), "y": (1, 3)}, []
run_and_verify_func(get_graph(), run_module=run_module, dtype=dtype)
def test_elementwise(run_module, dtype="float32"):
def get_graph(op, x_shape=(1, 8, 3, 3)):
x = relay.var("x", shape=(x_shape), dtype=dtype)
out = op(x)
f = tvm.IRModule.from_expr(out)
return f, {"x": x_shape}, []
for op in [
relay.abs,
relay.exp,
relay.log,
relay.sqrt,
relay.nn.relu,
relay.tanh,
relay.sigmoid,
]:
run_and_verify_func(get_graph(op), run_module=run_module)
def test_clip(run_module, dtype="float32"):
def get_graph(x_shape=(1, 8, 3, 3)):
x = relay.var("x", shape=(x_shape), dtype=dtype)
out = relay.clip(x, a_min=-0.2, a_max=0.4)
f = tvm.IRModule.from_expr(out)
return f, {"x": x_shape}, []
run_and_verify_func(get_graph(), run_module=run_module)
def test_leaky_relu(run_module, dtype="float32"):
def get_graph(x_shape=(1, 8, 3, 3)):
x = relay.var("x", shape=(x_shape), dtype=dtype)
out = relay.nn.leaky_relu(x, alpha=0.1)
f = tvm.IRModule.from_expr(out)
return f, {"x": x_shape}, []
run_and_verify_func(get_graph(), run_module=run_module)
def test_softmax(run_module, dtype="float32"):
def get_graph(x_shape, axis):
x = relay.var("x", shape=(x_shape), dtype=dtype)
out = relay.nn.softmax(x, axis=axis)
f = tvm.IRModule.from_expr(out)
return f, {"x": x_shape}, []
run_and_verify_func(get_graph((1, 1000), axis=1), run_module=run_module)
run_and_verify_func(get_graph((1, 1000), axis=-1), run_module=run_module)
run_and_verify_func(get_graph((1, 3, 4), axis=-2), run_module=run_module)
run_and_verify_func(get_graph((1, 3, 4), axis=1), run_module=run_module)
def test_conv1d(run_module, dtype="float32"):
conv1d, dic, param_lst = get_conv1d(channels=16, dtype=dtype)
conv1d = tvm.IRModule.from_expr(conv1d)
config = conv1d, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
x_shape = (1, 32, 224)
k_shape = (16, 32, 3)
conv1d_bias, dic, param_lst = get_conv1d(x_shape, k_shape, dtype=dtype)
conv1d_bias = tvm.IRModule.from_expr(conv1d_bias)
config = conv1d_bias, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
def test_conv1d_pattern(run_module, dtype="float32"):
x_shape = (1, 3, 224)
k_shape = (16, 3, 3)
activation_lst = [None, "relu", "tanh", "sigmoid"]
for a in activation_lst:
conv1d, dic, param_lst = get_conv1d(x_shape, k_shape, activation=a, dtype=dtype)
conv1d = tvm.IRModule.from_expr(conv1d)
config = conv1d, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
conv1d_bias, dic, param_lst = get_conv1d_bias(x_shape, k_shape, activation=a, dtype=dtype)
conv1d_bias = tvm.IRModule.from_expr(conv1d_bias)
config = conv1d_bias, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
def test_conv2d(run_module, dtype="float32"):
x_shape = (1, 32, 8, 8)
for k_shape, groups in [((16, 32, 3, 3), 1), ((32, 1, 3, 3), 32), ((32, 2, 3, 3), 16)]:
for padding in [(0, 0), (1, 1)]:
for strides in [(1, 1), (2, 2)]:
for dilation in [(1, 1), (2, 2)]:
conv2d, dic, param_lst = get_conv2d(
x_shape=x_shape,
k_shape=k_shape,
groups=groups,
padding=padding,
strides=strides,
dilation=dilation,
dtype=dtype,
)
conv2d = tvm.IRModule.from_expr(conv2d)
config = conv2d, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
def test_conv2d_weights_const(run_module, dtype="float32"):
x_shape = (1, 32, 8, 8)
k_shape = (16, 32, 3, 3)
conv2d, dic, param_lst = get_conv2d_weights_const(x_shape, k_shape, dtype=dtype)
conv2d = tvm.IRModule.from_expr(conv2d)
config = conv2d, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
x_shape = (1, 3, 8, 8)
k_shape = (16, 3, 3, 3)
conv2d, dic, param_lst = get_conv2d_weights_const(x_shape, k_shape, dtype=dtype)
conv2d = tvm.IRModule.from_expr(conv2d)
config = conv2d, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
def test_conv2d_pattern(run_module, dtype="float32"):
x_shape = (1, 32, 8, 8)
k_shape = (16, 32, 3, 3)
activation_lst = [None, "relu", "tanh", "sigmoid", "clip", "swish", "gelu", "mish"]
for a in activation_lst:
conv2d, dic, param_lst = get_conv2d(x_shape, k_shape, activation=a, dtype=dtype)
conv2d = tvm.IRModule.from_expr(conv2d)
config = conv2d, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
conv2d_bias, dic, param_lst = get_conv2d_bias(x_shape, k_shape, activation=a, dtype=dtype)
conv2d_bias = tvm.IRModule.from_expr(conv2d_bias)
config = conv2d_bias, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
conv2d_bias_bn_relu, dic, param_lst = get_conv2d_bias_bn_relu(x_shape, k_shape, dtype=dtype)
conv2d_bias_bn_relu = tvm.IRModule.from_expr(conv2d_bias_bn_relu)
config = conv2d_bias_bn_relu, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
conv2d_bias_bn_relu, dic, param_lst = get_conv2d_bias_bn_relu(x_shape, k_shape, dtype=dtype)
conv2d_bias_bn_relu = tvm.IRModule.from_expr(conv2d_bias_bn_relu)
config = conv2d_bias_bn_relu, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
def test_conv2d_bias_sum_relu(run_module, dtype="float32"):
x_shape = (1, 32, 8, 8)
k_shape = (16, 32, 3, 3)
def get_conv2d_bn_sum_relu(x_shape, k_shape, dtype="float32"):
out, dic, param_lst = get_conv2d_bias(x_shape=x_shape, k_shape=k_shape, dtype=dtype)
beta = relay.const(np.zeros(k_shape[0]).astype(dtype))
gamma = relay.const(np.ones(k_shape[0]).astype(dtype))
moving_mean = relay.const(np.zeros(k_shape[0]).astype(dtype))
moving_var = relay.const(np.ones(k_shape[0]).astype(dtype))
out, _, _ = relay.nn.batch_norm(
out,
gamma=gamma,
beta=beta,
moving_mean=moving_mean,
moving_var=moving_var,
axis=1,
center=True,
scale=True,
epsilon=1e-5,
)
sum_in = relay.var("sum_in", shape=x_shape, dtype=dtype)
kernel = relay.const(np.random.randint(0, 1, k_shape).astype(dtype))
conv_sum = relay.nn.conv2d(
sum_in,
kernel,
channels=k_shape[0],
kernel_size=k_shape[2:4],
groups=1,
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
)
# sum over two conv2d outputs to meet inplace condition
out = relay.add(out, conv_sum)
dic["sum_in"] = x_shape
return relay.nn.relu(out), dic, param_lst
conv2d_bn_sum_relu, dic, param_lst = get_conv2d_bn_sum_relu(x_shape, k_shape, dtype=dtype)
conv2d_bn_sum_relu = tvm.IRModule.from_expr(conv2d_bn_sum_relu)
config = conv2d_bn_sum_relu, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
def test_dense_bias_sum(run_module, dtype="float32"):
x_shape = (4, 32)
k_shape = (16, 32)
def get_dense_bias_sum(x_shape, k_shape, dtype="float32"):
out, dic, param_lst = get_dense_bias(x_shape=x_shape, k_shape=k_shape, dtype=dtype)
sum_in = relay.var("sum_in", shape=x_shape, dtype=dtype)
ker = relay.var("ker", shape=(k_shape), dtype=dtype)
dense_sum = relay.nn.dense(sum_in, ker, units=k_shape[0])
# sum over two dense outputs to meet inplace condition
out = relay.add(out, dense_sum)
dic["sum_in"] = x_shape
dic["ker"] = k_shape
param_lst += ["ker"]
return out, dic, param_lst
dense_bias_sum, dic, param_lst = get_dense_bias_sum(x_shape, k_shape, dtype=dtype)
dense_bias_sum = tvm.IRModule.from_expr(dense_bias_sum)
print("hebi-dbg:")
print(dense_bias_sum)
config = dense_bias_sum, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
def test_conv2d_transpose(run_module, dtype="float32"):
x_shape = (1, 32, 8, 8)
for k_shape, groups in [((32, 16, 3, 3), 1), ((32, 1, 3, 3), 32), ((32, 4, 3, 3), 16)]:
for padding in [(0, 0), (1, 1)]:
for strides in [(1, 1), (2, 2)]:
conv2d_transpose, dic, param_lst = get_conv2d_transpose(
x_shape=x_shape,
k_shape=k_shape,
groups=groups,
padding=padding,
strides=strides,
dtype=dtype,
)
conv2d_transpose = tvm.IRModule.from_expr(conv2d_transpose)
config = conv2d_transpose, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
def test_conv2d_transpose_pattern(run_module, dtype="float32"):
activation_lst = [None, "relu", "tanh", "sigmoid", "clip", "swish", "gelu", "mish"]
for a in activation_lst:
conv2d, dic, param_lst = get_conv2d_transpose(activation=a, dtype=dtype)
conv2d = tvm.IRModule.from_expr(conv2d)
config = conv2d, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
conv2d_bias, dic, param_lst = get_conv2d_transpose_bias(activation=a, dtype=dtype)
conv2d_bias = tvm.IRModule.from_expr(conv2d_bias)
config = conv2d_bias, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
def test_conv3d(run_module, dtype="float32"):
conv3d, dic, param_lst = get_conv3d(dtype=dtype)
conv3d = tvm.IRModule.from_expr(conv3d)
config = conv3d, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
conv3d, dic, param_lst = get_conv3d(padding=(0, 0, 0, 1, 1, 1), dtype=dtype)
conv3d = tvm.IRModule.from_expr(conv3d)
config = conv3d, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
conv3d, dic, param_lst = get_conv3d(
x_shape=(1, 3, 8, 8, 8), k_shape=(16, 3, 3, 3, 3), dtype=dtype
)
conv3d = tvm.IRModule.from_expr(conv3d)
config = conv3d, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
def test_conv3d_pattern(run_module, dtype="float32"):
activation_lst = [None, "relu", "tanh", "sigmoid", "clip", "swish", "gelu", "mish"]
for a in activation_lst:
conv3d, dic, param_lst = get_conv3d(activation=a, dtype=dtype)
conv3d = tvm.IRModule.from_expr(conv3d)
config = conv3d, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
conv3d_bias, dic, param_lst = get_conv3d_bias(activation=a, dtype=dtype)
conv3d_bias = tvm.IRModule.from_expr(conv3d_bias)
config = conv3d_bias, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
def test_conv3d_transpose(run_module, dtype="float32"):
conv3d_transpose, dic, param_lst = get_conv3d_transpose(dtype=dtype)
conv3d_transpose = tvm.IRModule.from_expr(conv3d_transpose)
config = conv3d_transpose, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
conv3d_transpose, dic, param_lst = get_conv3d_transpose(strides=(2, 2, 2), dtype=dtype)
conv3d_transpose = tvm.IRModule.from_expr(conv3d_transpose)
config = conv3d_transpose, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
conv3d_transpose, dic, param_lst = get_conv3d_transpose(
strides=(2, 2, 2), output_padding=(1, 1, 1), dtype=dtype
)
conv3d_transpose = tvm.IRModule.from_expr(conv3d_transpose)
config = conv3d_transpose, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
def test_conv3d_transpose_pattern(run_module, dtype="float32"):
activation_lst = [None, "relu", "tanh", "sigmoid", "clip", "swish", "gelu", "mish"]
for a in activation_lst:
conv3d, dic, param_lst = get_conv3d_transpose(activation=a, dtype=dtype)
conv3d = tvm.IRModule.from_expr(conv3d)
config = conv3d, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
conv3d_bias, dic, param_lst = get_conv3d_transpose_bias(activation=a, dtype=dtype)
conv3d_bias = tvm.IRModule.from_expr(conv3d_bias)
config = conv3d_bias, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
def test_dense(run_module, dtype="float32"):
x_shape = (1, 16)
k_shape = (32, 16)
dense, dic, param_lst = get_dense(x_shape, k_shape, dtype=dtype)
dense = tvm.IRModule.from_expr(dense)
config = dense, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
dense, dic, param_lst = get_dense(x_shape, k_shape=(1, 16), dtype=dtype)
dense = tvm.IRModule.from_expr(dense)
config = dense, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
dense, dic, param_lst = get_dense(x_shape, k_shape, activation="gelu", dtype=dtype)
dense = tvm.IRModule.from_expr(dense)
config = dense, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
def test_dense_pattern(run_module, dtype="float32"):
x_shape = (1, 16)
k_shape = (32, 16)
dense, dic, param_lst = get_dense(x_shape, k_shape, dtype=dtype)
dense = tvm.IRModule.from_expr(dense)
config = dense, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
dense_bias, dic, param_lst = get_dense_bias(x_shape, k_shape, dtype=dtype)
dense_bias = tvm.IRModule.from_expr(dense_bias)
config = dense_bias, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
dense_bias, dic, param_lst = get_dense_bias(x_shape, k_shape, activation="gelu", dtype=dtype)
dense_bias = tvm.IRModule.from_expr(dense_bias)
config = dense_bias, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
def test_pool2d(run_module, dtype="float32"):
def get_graph(
op,
x_shape=(1, 3, 32, 32),
pool_size=(2, 2),
strides=(2, 2),
padding=(0, 0),
ceil_mode=False,
count_include_pad=None,
):
x = relay.var("x", shape=(x_shape), dtype=dtype)
if count_include_pad is not None:
out = op(
x,
pool_size=pool_size,
strides=strides,
padding=padding,
ceil_mode=ceil_mode,
count_include_pad=count_include_pad,
)
else:
out = op(
x,
pool_size=pool_size,
strides=strides,
padding=padding,
ceil_mode=ceil_mode,
)
out = tvm.IRModule.from_expr(out)
return out, {"x": x_shape}, []
for pool_size in [(2, 2), (3, 3)]:
for strides in [(1, 1), (2, 2)]:
for padding in [(0, 0), (1, 1), (0, 0, 1, 1)]:
for ceil_mode in [False]:
# Skip "the padding size is larger than or equal to the filter size for exclusive-counting pooling"
if pool_size == (2, 2) and padding == (0, 0, 1, 1):
continue
for count_include_pad in [False, True]:
# Skip "inclusive-counted blended or average pooling is not supported in combination with asymmetric padding"
if count_include_pad and (padding == (0, 0, 1, 1) or strides == (2, 2)):
continue
run_and_verify_func(
get_graph(
relay.nn.avg_pool2d,
pool_size=pool_size,
strides=strides,
padding=padding,
ceil_mode=ceil_mode,
count_include_pad=count_include_pad,
),
run_module=run_module,
)
run_and_verify_func(
get_graph(
relay.nn.max_pool2d,
pool_size=pool_size,
strides=strides,
padding=padding,
ceil_mode=ceil_mode,
),
run_module=run_module,
)
def test_global_avg_pooling2d(run_module, dtype="float32"):
x_shape = (1, 3, 32, 32)
x = relay.var("x", shape=(x_shape), dtype=dtype)
out = relay.nn.global_avg_pool2d(x)
out = tvm.IRModule.from_expr(out)
config = out, {"x": x_shape}, []
run_and_verify_func(config, run_module=run_module)
def test_pool3d(run_module, dtype="float32"):
def get_graph(
op,
x_shape=(1, 3, 8, 32, 32),
pool_size=(2, 2, 2),
strides=(2, 2, 2),
padding=(0, 0, 0),
ceil_mode=False,
count_include_pad=None,
dtype="float32",
):
x = relay.var("x", shape=(x_shape), dtype=dtype)
if count_include_pad is not None:
out = op(
x,
pool_size=pool_size,
strides=strides,
padding=padding,
ceil_mode=ceil_mode,
count_include_pad=count_include_pad,
)
else:
out = op(
x,
pool_size=pool_size,
strides=strides,
padding=padding,
ceil_mode=ceil_mode,
)
out = tvm.IRModule.from_expr(out)
return out, {"x": x_shape}, []
run_and_verify_func(get_graph(relay.nn.avg_pool3d), run_module=run_module)
run_and_verify_func(get_graph(relay.nn.max_pool3d), run_module=run_module)
run_and_verify_func(
get_graph(relay.nn.max_pool3d, padding=(0, 0, 0, 1, 1, 1)), run_module=run_module
)
run_and_verify_func(get_graph(relay.nn.max_pool3d, strides=(1, 1, 1)), run_module=run_module)
def test_prune_dnnl_subgraph(run_module):
"""In this test, OP "add" should be offloaded from dnnl codegen."""
def get_graph():
x1 = relay.var("x1", shape=(1, 32, 56, 56))
x2 = relay.var("x2", shape=(1, 32, 56, 56))
bias = relay.var("bias", shape=(32,))
weight = relay.var("weight", shape=(32, 32, 3, 3))
y = relay.nn.conv2d(
x1,
weight,
channels=32,
kernel_size=(3, 3),
padding=(1, 1),
)
y = relay.nn.bias_add(y, bias)
y = relay.nn.relu(y)
y = relay.nn.global_max_pool2d(y)
y = relay.add(y, x2)
dic = {
"x1": (1, 32, 56, 56),
"x2": (1, 32, 56, 56),
"weight": (32, 32, 3, 3),
"bias": (32,),
}
param_lst = ["weight", "bias"]
out = tvm.IRModule.from_expr(y)
return out, dic, param_lst
run_and_verify_func(get_graph(), subgraph_num=1, run_module=run_module, test_bf16=False)
def test_layer_norm(run_module, dtype="float32"):
x_shape = (1, 49, 64)
ln, dic, param_lst = get_layer_norm(x_shape, dtype=dtype)
ln = tvm.IRModule.from_expr(ln)
config = ln, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
def test_rewrite_dense_bias_gelu_reshape_last(run_module, dtype="float32"):
def get_graph(act=None):
x_shape = (1, 16)
k_shape = (32, 16)
dense_bias, dic, param_lst = get_dense_bias(
x_shape, k_shape, activation=act, has_reshape=True, use_add=True, dtype=dtype
)
dense_bias = tvm.IRModule.from_expr(dense_bias)
processed_dense_bias = partition_for_dnnl(
dense_bias, params=None, alter_layout=False, prune_subgraphs=False
)
check_dnnl_used(processed_dense_bias, 1)
return dense_bias, dic, param_lst
run_and_verify_func(
get_graph("gelu"), subgraph_num=1, run_module=run_module, dtype=dtype, test_bf16=False
)
run_and_verify_func(
get_graph(), subgraph_num=1, run_module=run_module, dtype=dtype, test_bf16=False
)
def test_resnetv1_rewrite(run_module, dtype="float32"):
def get_graph():
data_shape = (1, 256, 56, 56)
w_shapes = [
(64, 256, 1, 1),
(64, 64, 3, 3),
(256, 64, 1, 1),
(128, 256, 1, 1),
(128, 128, 3, 3),
(512, 128, 1, 1),
(512, 256, 1, 1),
]
x = relay.var("x", shape=data_shape, dtype=dtype)
wights = [relay.const(np.random.randint(0, 1, w).astype(dtype)) for w in w_shapes]
biases = [relay.const(np.random.randint(0, 1, w[0]).astype(dtype)) for w in w_shapes]
conv1 = relay.nn.conv2d(
x,
wights[0],
channels=w_shapes[0][0],
kernel_size=w_shapes[0][2:4],
padding=(w_shapes[0][2] // 2, w_shapes[0][3] // 2),
)
conv1 = relay.nn.bias_add(conv1, biases[0])
conv1 = relay.nn.relu(conv1)
conv2 = relay.nn.conv2d(
conv1,
wights[1],
channels=w_shapes[1][0],
kernel_size=w_shapes[1][2:4],
padding=(w_shapes[1][2] // 2, w_shapes[1][3] // 2),
)
conv2 = relay.nn.bias_add(conv2, biases[1])
conv2 = relay.nn.relu(conv2)
conv3 = relay.nn.conv2d(
conv2,
wights[2],
channels=w_shapes[2][0],
kernel_size=w_shapes[2][2:4],
padding=(w_shapes[2][2] // 2, w_shapes[2][3] // 2),
)
conv3 = relay.nn.bias_add(conv3, biases[2])
conv3 = relay.add(conv3, x)
conv3 = relay.nn.relu(conv3)
left_conv4 = relay.nn.conv2d(
conv3,
wights[3],
channels=w_shapes[3][0],
strides=(2, 2),
kernel_size=w_shapes[3][2:4],
padding=(w_shapes[3][2] // 2, w_shapes[3][3] // 2),
)
left_conv4 = relay.nn.bias_add(left_conv4, biases[3])
left_conv4 = relay.nn.relu(left_conv4)
left_conv5 = relay.nn.conv2d(
left_conv4,
wights[4],
channels=w_shapes[4][0],
kernel_size=w_shapes[4][2:4],
padding=(w_shapes[4][2] // 2, w_shapes[4][3] // 2),
)
left_conv5 = relay.nn.bias_add(left_conv5, biases[4])
left_conv5 = relay.nn.relu(left_conv5)
left_conv6 = relay.nn.conv2d(
left_conv5,
wights[5],
channels=w_shapes[5][0],
kernel_size=w_shapes[5][2:4],
padding=(w_shapes[5][2] // 2, w_shapes[5][3] // 2),
)
left_conv6 = relay.nn.bias_add(left_conv6, biases[5])
right_conv7 = relay.nn.conv2d(
conv3,
wights[6],
channels=w_shapes[6][0],
strides=(2, 2),
kernel_size=w_shapes[6][2:4],
padding=(w_shapes[6][2] // 2, w_shapes[6][3] // 2),
)
right_conv7 = relay.nn.bias_add(right_conv7, biases[6])
out = relay.add(left_conv6, right_conv7)
out = relay.nn.relu(out)
dic = {"x": data_shape}
param_lst = []
return out, dic, param_lst
net, dic, param_lst = get_graph()
net = tvm.IRModule.from_expr(net)
config = net, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
def test_fuse_pad_avg_pool(run_module, dtype="float32"):
def get_graph():
data_shape = (1, 768, 17, 17)
x = relay.var("x", shape=data_shape, dtype=dtype)
out = relay.nn.pad(x, pad_width=[[0, 0], [0, 0], [1, 1], [1, 1]])
out = relay.nn.avg_pool2d(out, pool_size=[3, 3])
dic = {"x": data_shape}
param_lst = []
return out, dic, param_lst
net, dic, param_lst = get_graph()
net = tvm.IRModule.from_expr(net)
config = net, dic, param_lst
run_and_verify_func(config, run_module=run_module, dtype=dtype)
def permute_shape(shape, l_from="", l_to=""):
res_shape = []
for label in l_to:
pos = l_from.find(label)
res_shape.append(shape[pos])
return res_shape
def expand_dim(shape, rank=0):
assert len(shape) == 1
return shape + [1] * (rank - 1)
def filler_uni(low=0, high=1):
def filler_func(shape):
return np.random.uniform(low, high, shape)
return filler_func
class QnnBuilder:
def __init__(self, qnn_profile=None):
self._args = {}
self._args_op = []
self._qp = qnn_profile
def arg(self, shape=[], dtype="float32", filler=filler_uni(), is_const=True):
if isinstance(filler, (int, float)):
value = np.full(shape, filler).astype(dtype)
else:
value = filler(shape).astype(dtype)
if is_const:
res = relay.const(value, dtype=dtype)
else:
name = f"in_{len(self._args)}"
res = relay.var(name, shape=shape, dtype=dtype)
self._args[name] = value
self._args_op.append(res)
return res
def make_zp(self, mean_val, num_ch=1, dispersion=0.2):
if num_ch == 1:
return self.arg(shape=[], dtype="int32", filler=mean_val)
else:
low = int(mean_val * (1 - dispersion))
high = int(mean_val * (1 + dispersion))
return self.arg(shape=[num_ch], dtype="int32", filler=filler_uni(low, high))
def make_scl(self, mean_val, num_ch=1, dispersion=0.2):
if num_ch == 1:
return self.arg(shape=[], dtype="float32", filler=mean_val)
else:
low = mean_val * (1 - dispersion)
high = mean_val * (1 + dispersion)
return self.arg(shape=[num_ch], dtype="float32", filler=filler_uni(low, high))
def make_zp_and_scl(self, name, num_ch=1, dispersion=0.2):
is_per_channel = getattr(self._qp, f"{name}_pc")
zp_val = getattr(self._qp, f"{name}_zp")
scl_val = getattr(self._qp, f"{name}_scl")
zp = self.make_zp(zp_val, num_ch if is_per_channel else 1, dispersion)
scl = self.make_scl(scl_val, num_ch if is_per_channel else 1, dispersion)
return zp, scl
def finalize(self, op):
func = relay.Function(self._args_op, op)
mod = tvm.IRModule.from_expr(func)
mod = relay.transform.InferType()(mod)
return mod, self._args
def check_fully_annotated(mod, desired_compiler):
matched_ops = []
other_ops = []
def _visit(node):
if isinstance(node, tvm.relay.Call):
op = node.op
if isinstance(op, relay.GlobalVar):
func = mod[op]
if "Compiler" in func.attrs and func.attrs["Compiler"] == desired_compiler:
matched_ops.append(op)
return
else:
other_ops.append(op)
tvm.relay.analysis.post_order_visit(mod["main"].body, _visit)
assert len(other_ops) == 0 and len(matched_ops) != 0, "Model is not fully DNNL compiled"
def check_result(
mod,
ref_mod,
map_inputs,
tol=1e-5,
target="llvm",
device=tvm.cpu(),
params=None,
ref_result=None,
atol=None,
desired_compiler="dnnl",
):
if atol is None:
atol = tol
if desired_compiler is not None:
check_fully_annotated(mod, desired_compiler)
if ref_result is None:
# Run the reference result
relay.backend.te_compiler.get().clear()
with tvm.transform.PassContext(opt_level=3):
ref_lib = relay.build(ref_mod, target=target, params=params)
ref_rt_mod = tvm.contrib.graph_executor.GraphModule(ref_lib["default"](device))
for name, data in map_inputs.items():
ref_rt_mod.set_input(name, data)
ref_rt_mod.run()
out = ref_rt_mod.get_output(0)
ref_result = out.numpy()
def check_vm_result():
relay.backend.te_compiler.get().clear()
with tvm.transform.PassContext(opt_level=3):
exe = relay.vm.compile(mod, target=target, params=params)
code, lib = exe.save()
exe = tvm.runtime.vm.Executable.load_exec(code, lib)
vm = tvm.runtime.vm.VirtualMachine(exe, device)
output = vm.run(**map_inputs)
tvm.testing.assert_allclose(output.numpy(), ref_result, rtol=tol, atol=atol)
def check_graph_executor_result():
relay.backend.te_compiler.get().clear()
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target=target, params=params)
rt_mod = tvm.contrib.graph_executor.GraphModule(lib["default"](device))
rt_mod.run(**map_inputs)
output = rt_mod.get_output(0)
tvm.testing.assert_allclose(output.numpy(), ref_result, rtol=tol, atol=atol)
check_vm_result()
check_graph_executor_result()
ConvProfile = collections.namedtuple(
"ConvProfile",
[
"SHAPE",
"KER",
"STR",
"PAD",
"DEL",
"OC",
"GR",
"D_LAYOUT",
"K_LAYOUT",
],
)
base_conv = ConvProfile(
SHAPE=[1, 8, 5, 5],
KER=[3, 3],
STR=[1, 1],
PAD=[1, 1],
DEL=[1, 1],
OC=16,
GR=1,
D_LAYOUT="NCHW",
K_LAYOUT="OIHW",
)
base_conv_nhwc = base_conv._replace(D_LAYOUT="NHWC", K_LAYOUT="HWIO")
base_conv_dilated = base_conv._replace(PAD=[2, 2], DEL=[2, 2])
base_conv_no_pad = base_conv._replace(PAD=[0, 0])
base_conv_no_pad_nhwc = base_conv_no_pad._replace(D_LAYOUT="NHWC", K_LAYOUT="HWIO")
base_conv_group_no_pad = base_conv_no_pad._replace(GR=2)
base_conv_dw_no_pad = base_conv_no_pad._replace(SHAPE=[1, 16, 5, 5], GR=16)
DenseProfile = collections.namedtuple("DenseProfile", ["N", "IC", "OC"])
base_dense_profile = DenseProfile(N=2, IC=10, OC=16)
ArgConstConfig = collections.namedtuple("ArgConstConfig", ["Data", "Weights", "Bias", "Sum"])
acp_regular = ArgConstConfig(Data=False, Weights=True, Bias=True, Sum=None)
acp_no_bias = ArgConstConfig(Data=False, Weights=True, Bias=None, Sum=None)
acp_with_sum = ArgConstConfig(Data=False, Weights=True, Bias=True, Sum=False)
acp_no_bias_with_sum = ArgConstConfig(Data=False, Weights=True, Bias=None, Sum=False)
QuantizationConfig = collections.namedtuple(
"QuantizationConfig",
[
"d_zp",
"d_scl",
"d_pc",
"k_zp",
"k_scl",
"k_pc",
"rq_zp",
"rq_scl",
"rq_pc",
"sum_zp",
"sum_scl",
"sum_pc",
"o_zp",
"o_scl",
"o_pc",
],
)
qp_regular = QuantizationConfig(
d_zp=0,
d_scl=0.2,
d_pc=False,
k_zp=0,
k_scl=0.1,
k_pc=False,
rq_zp=30,
rq_scl=0.2,
rq_pc=False,
sum_zp=15,
sum_scl=0.3,
sum_pc=False,
o_zp=5,
o_scl=0.2,
o_pc=False,
)
qp_asymmetric_data = qp_regular._replace(
d_zp=3, rq_zp=10, rq_scl=0.1, sum_zp=15, sum_scl=0.3, o_zp=4
)
qnn_conv_profiles = tvm.testing.parameter(
by_dict={
# Pattern qnn.conv2d + qnn.requantize
"Base": (base_conv, acp_regular, qp_regular),
"NHWC": (base_conv_nhwc, acp_regular, qp_regular),
# Asymmetric input. NOTE: No pad! Input ZP is not compatible with padding
"Group": (base_conv_group_no_pad, acp_regular, qp_asymmetric_data),
"DW": (base_conv_dw_no_pad, acp_regular, qp_asymmetric_data),
"NoBias": (base_conv, acp_no_bias, qp_regular),
"AsymmetricInput": (base_conv_no_pad, acp_regular, qp_asymmetric_data),
"AsymmetricInput_NHWC": (base_conv_no_pad_nhwc, acp_regular, qp_asymmetric_data),
# Pattern Conv2d + Requantize + Sum
"WithSum": (base_conv_no_pad, acp_with_sum, qp_asymmetric_data),
"WithSum_NHWC": (base_conv_no_pad_nhwc, acp_with_sum, qp_asymmetric_data),
"WithSum_NoBias": (base_conv_no_pad, acp_no_bias_with_sum, qp_asymmetric_data),
}
)
@has_dnnl_codegen
def test_qnn_conv2d(qnn_conv_profiles):
def generate_model(p, c, q):
np.random.seed(0)
N, IC, IH, IW = p.SHAPE
d_shape = p.SHAPE
w_shape = [p.OC, IC, *p.KER]
b_shape = [p.OC]
s_shape = [
p.SHAPE[0],
p.OC,
(IH + 2 * p.PAD[0] - (p.KER[0] - 1) * p.DEL[0] - 1) // p.STR[0] + 1,
(IW + 2 * p.PAD[1] - (p.KER[1] - 1) * p.DEL[1] - 1) // p.STR[1] + 1,
]
if p.GR != 1:
w_shape[1] //= p.GR
d_shape = permute_shape(d_shape, l_from="NCHW", l_to=p.D_LAYOUT)
s_shape = permute_shape(s_shape, l_from="NCHW", l_to=p.D_LAYOUT)
w_shape = permute_shape(w_shape, l_from="OIHW", l_to=p.K_LAYOUT)
c_dim = p.D_LAYOUT.find("C")
b_shape = expand_dim(b_shape, rank=len(p.D_LAYOUT) - c_dim)
bld = QnnBuilder(qnn_profile=q)
# Start build a test graph
data = bld.arg(shape=d_shape, dtype="uint8", is_const=c.Data, filler=filler_uni(0, 20))
d_zp, d_scl = bld.make_zp_and_scl("d", IC)
# Convolution
wgh = bld.arg(shape=w_shape, dtype="int8", is_const=c.Weights, filler=filler_uni(-20, 20))
w_zp, w_scl = bld.make_zp_and_scl("k")
op = tvm.relay.qnn.op.conv2d(
data,
wgh,
d_zp,
w_zp,
d_scl,
w_scl,
kernel_size=p.KER,
padding=p.PAD,
strides=p.STR,
dilation=p.DEL,
groups=p.GR,
channels=p.OC,
out_dtype="int32",
data_layout=p.D_LAYOUT,
kernel_layout=p.K_LAYOUT,
)
# Optional bias
if c.Bias is not None:
bias = bld.arg(
shape=b_shape, dtype="int32", is_const=c.Bias, filler=filler_uni(-50, 50)
)
op = tvm.relay.add(op, bias)
# Re-quantization
rq_in_zp = bld.make_zp(0)
rq_in_scl = bld.make_scl(q.d_scl * q.k_scl) # in real cases that should be a vector
rq_out_zp, rq_out_scl = bld.make_zp_and_scl("rq")
op = tvm.relay.qnn.op.requantize(
op, rq_in_scl, rq_in_zp, rq_out_scl, rq_out_zp, out_dtype="int32"
)
op = tvm.relay.clip(
op, a_min=0.0, a_max=255.0
) # pytorch frontend specific, I guess it's redundant
op = tvm.relay.cast(op, dtype="uint8")
# Optional sum (ResNet like)
if c.Sum is not None:
sum_in = bld.arg(dtype="uint8", shape=s_shape, filler=filler_uni(0, 10), is_const=c.Sum)
lhs_zp, lhs_scl = bld.make_zp_and_scl("rq")
rhs_zp, rhs_scl = bld.make_zp_and_scl("sum")
out_zp, out_scl = bld.make_zp_and_scl("o")
op = tvm.relay.qnn.op.add(op, sum_in, lhs_scl, lhs_zp, rhs_scl, rhs_zp, out_scl, out_zp)
op = tvm.relay.clip(op, a_min=0.0, a_max=255.0)
return bld.finalize(op)
conv_p, arg_p, quant_p = qnn_conv_profiles
ref_mod, args = generate_model(conv_p, arg_p, quant_p)
mod = partition_for_dnnl(ref_mod)
# atol=1 means int values should match with +-1 quantum value tolerance
check_result(mod, ref_mod, args, tol=1e-10, atol=1, desired_compiler="dnnl")
conv_profiles = tvm.testing.parameter(
by_dict={
"Base": (base_conv, acp_regular),
"NHWC": (base_conv_nhwc, acp_regular),
"Group": (base_conv_group_no_pad, acp_regular),
"DW": (base_conv_dw_no_pad, acp_regular),
"Dilated": (base_conv_dilated, acp_regular),
}
)
@has_dnnl_codegen
def test_conv2d_plus(conv_profiles):
def generate_model(p, c):
np.random.seed(0)
N, IC, IH, IW = p.SHAPE
d_shape = p.SHAPE
w_shape = [p.OC, IC, *p.KER]
b_shape = [p.OC]
s_shape = [
p.SHAPE[0],
p.OC,
(IH + 2 * p.PAD[0] - (p.KER[0] - 1) * p.DEL[0] - 1) // p.STR[0] + 1,
(IW + 2 * p.PAD[1] - (p.KER[1] - 1) * p.DEL[1] - 1) // p.STR[1] + 1,
]
if p.GR != 1:
w_shape[1] //= p.GR
d_shape = permute_shape(d_shape, l_from="NCHW", l_to=p.D_LAYOUT)
s_shape = permute_shape(s_shape, l_from="NCHW", l_to=p.D_LAYOUT)
w_shape = permute_shape(w_shape, l_from="OIHW", l_to=p.K_LAYOUT)
c_dim = p.D_LAYOUT.find("C")
# b_shape = expand_dim(b_shape, rank=len(p.D_LAYOUT) - c_dim)
bld = QnnBuilder()
op = bld.arg(shape=d_shape, dtype="float32", is_const=c.Data)
wgh = bld.arg(shape=w_shape, dtype="float32", is_const=c.Weights)
op = tvm.relay.nn.conv2d(
op,
wgh,
kernel_size=p.KER,
padding=p.PAD,
strides=p.STR,
dilation=p.DEL,
groups=p.GR,
channels=p.OC,
out_dtype="float32",
data_layout=p.D_LAYOUT,
kernel_layout=p.K_LAYOUT,
)
if c.Bias is not None:
bias = bld.arg(shape=b_shape, dtype="float32", is_const=c.Bias)
op = tvm.relay.nn.bias_add(op, bias, axis=c_dim)
if c.Sum is not None:
sum_in = bld.arg(shape=s_shape, dtype="float32", is_const=c.Sum)
op = tvm.relay.op.add(op, sum_in)
return bld.finalize(op)
conv_p, arg_p = conv_profiles
ref_mod, args = generate_model(conv_p, arg_p)
mod = partition_for_dnnl(ref_mod, alter_layout=False)
check_result(mod, ref_mod, args, tol=1e-5, desired_compiler="dnnl")
qnn_dense_profiles = tvm.testing.parameter(
by_dict={
# Pattern Dense + Requantize
"Base": (base_dense_profile, acp_regular, qp_regular),
"AsymmetricInput": (base_dense_profile, acp_regular, qp_asymmetric_data),
# Pattern Dense + Requantize + Sum
"AsymmetricInput_Sum": (base_dense_profile, acp_with_sum, qp_asymmetric_data),
}
)
@has_dnnl_codegen
def test_qnn_dense(qnn_dense_profiles):
def generate_model(p, c, q):
np.random.seed(0)
d_shape = [p.N, p.IC]
w_shape = [p.OC, p.IC]
b_shape = [p.OC]
s_shape = [p.N, p.OC]
bld = QnnBuilder(qnn_profile=q)
# Start build a test graph
data = bld.arg(shape=d_shape, dtype="uint8", is_const=c.Data, filler=filler_uni(0, 20))
d_zp, d_scl = bld.make_zp_and_scl("d", p.IC)
# Convolution
wgh = bld.arg(shape=w_shape, dtype="int8", is_const=c.Weights, filler=filler_uni(-20, 20))
w_zp, w_scl = bld.make_zp_and_scl("k")
op = tvm.relay.qnn.op.dense(
data, wgh, d_zp, w_zp, d_scl, w_scl, units=p.OC, out_dtype="int32"
)
# Optional bias
if c.Bias is not None:
bias = bld.arg(
shape=b_shape, dtype="int32", is_const=c.Bias, filler=filler_uni(-50, 50)
)
op = tvm.relay.add(op, bias)
# Re-quantization
rq_in_zp = bld.make_zp(0)
rq_in_scl = bld.make_scl(q.d_scl * q.k_scl) # in real cases that should be a vector
rq_out_zp, rq_out_scl = bld.make_zp_and_scl("rq")
op = tvm.relay.qnn.op.requantize(
op, rq_in_scl, rq_in_zp, rq_out_scl, rq_out_zp, out_dtype="int32"
)
op = tvm.relay.clip(
op, a_min=0.0, a_max=255.0
) # pytorch frontend specific, I guess it's redundant
op = tvm.relay.cast(op, dtype="uint8")
# Optional sum (ResNet like)
if c.Sum is not None:
sum_in = bld.arg(dtype="uint8", shape=s_shape, filler=filler_uni(0, 10), is_const=c.Sum)
lhs_zp, lhs_scl = bld.make_zp_and_scl("rq")
rhs_zp, rhs_scl = bld.make_zp_and_scl("sum")
out_zp, out_scl = bld.make_zp_and_scl("o")
op = tvm.relay.qnn.op.add(op, sum_in, lhs_scl, lhs_zp, rhs_scl, rhs_zp, out_scl, out_zp)
op = tvm.relay.clip(op, a_min=0.0, a_max=255.0)
return bld.finalize(op)
conv_p, arg_p, quant_p = qnn_dense_profiles
ref_mod, args = generate_model(conv_p, arg_p, quant_p)
mod = partition_for_dnnl(ref_mod)
# atol=1 means int values should match with +-1 quantum value tolerance
check_result(mod, ref_mod, args, tol=1e-10, atol=1, desired_compiler="dnnl")
dense_profiles = tvm.testing.parameter(
by_dict={
"Base": (base_dense_profile, acp_regular),
"WithSum": (base_dense_profile, acp_with_sum),
}
)
@has_dnnl_codegen
def test_dense_plus(dense_profiles):
def generate_model(p, c):
np.random.seed(0)
d_shape = [p.N, p.IC]
w_shape = [p.OC, p.IC]
b_shape = [p.OC]
s_shape = [p.N, p.OC]
c_dim = 1
bld = QnnBuilder()
op = bld.arg(shape=d_shape, dtype="float32", is_const=c.Data)
wgh = bld.arg(shape=w_shape, dtype="float32", is_const=c.Weights)
op = tvm.relay.nn.dense(op, wgh, out_dtype="float32")
if c.Bias is not None:
bias = bld.arg(shape=b_shape, dtype="float32", is_const=c.Bias)
op = tvm.relay.nn.bias_add(op, bias, axis=c_dim)
if c.Sum is not None:
sum_in = bld.arg(shape=s_shape, dtype="float32", is_const=c.Sum)
op = tvm.relay.op.add(op, sum_in)
return bld.finalize(op)
dense_p, arg_p = dense_profiles
ref_mod, args = generate_model(dense_p, arg_p)
mod = partition_for_dnnl(ref_mod)
check_result(mod, ref_mod, args, tol=1e-5, desired_compiler="dnnl")
if __name__ == "__main__":
tvm.testing.main()
| 63,695 | 33.806557 | 133 | py |
tvm | tvm-main/tests/python/contrib/test_libtorch_ops.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm.relay
from tvm.relay.op.contrib import torchop
from tvm.testing import requires_libtorch
import_torch_error = None
try:
import torch
except ImportError as e:
torch = None
import_torch_error = str(e)
@pytest.mark.skipif(torch is None, reason=f"PyTorch is not available: {import_torch_error}")
@requires_libtorch
def test_backend():
@torch.jit.script
def script_fn(x, y):
res = x * y
return res
for torch_dt, dt in (
(torch.int32, "int32"),
(torch.float32, "float32"),
(torch.float64, "float64"),
):
x2 = tvm.relay.var("x", shape=[1, 2], dtype=dt)
y2 = tvm.relay.var("y", shape=[2, 2], dtype=dt)
x3 = tvm.relay.var("x", shape=[1, 3], dtype=dt)
y3 = tvm.relay.var("y", shape=[3, 3], dtype=dt)
test_body = tvm.relay.sum(torchop(script_fn, x2, y2)) + tvm.relay.sum(
torchop(script_fn, x3, y3)
)
test_fn = tvm.relay.Function([x2, y2, x3, y3], test_body)
mod = tvm.IRModule({"main": test_fn})
tvm.relay.transform.InferType()(mod)
# mod = tvm.relay.transform.AnnotateTarget("target.torch")(mod)
mod = tvm.relay.transform.MergeCompilerRegions()(mod)
mod = tvm.relay.transform.PartitionGraph()(mod)
mod = tvm.relay.transform.InferType()(mod)
target = "llvm"
with tvm.transform.PassContext(opt_level=3):
lib = tvm.relay.build(mod, target, params={})
ctx = tvm.cpu(0)
rt_mod = tvm.contrib.graph_executor.GraphModule(lib["default"](ctx))
# int does not have randn, so we cast...
x2t = torch.randn(1, 2).to(dtype=torch_dt)
y2t = torch.randn(2, 2).to(dtype=torch_dt)
x3t = torch.randn(1, 3).to(dtype=torch_dt)
y3t = torch.randn(3, 3).to(dtype=torch_dt)
# Set inputs
rt_mod.set_input(0, x2t)
rt_mod.set_input(1, y2t)
rt_mod.set_input(2, x3t)
rt_mod.set_input(3, y3t)
# Execute
rt_mod.run()
# Get outputs
tvm_output = rt_mod.get_output(0).numpy()
expected = (script_fn(x2t, y2t).sum() + script_fn(x3t, y3t).sum()).numpy()
print(tvm_output.dtype)
print(expected.dtype)
tvm.testing.assert_allclose(tvm_output, expected)
if __name__ == "__main__":
tvm.testing.main()
| 3,149 | 32.510638 | 92 | py |
tvm | tvm-main/tests/python/contrib/test_dlpack.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.testing
from tvm import te
import numpy as np
from tvm.contrib.dlpack import to_pytorch_func
def verify_torch_dlpack():
a = np.random.randn(1337)
tvm_a = tvm.nd.array(a)
np.testing.assert_equal(tvm.nd.from_dlpack(tvm_a.to_dlpack()).numpy(), a)
try:
import torch
import torch.utils.dlpack
x = torch.rand(56, 56)
tvm_x = tvm.nd.from_dlpack(torch.utils.dlpack.to_dlpack(x))
np.testing.assert_equal(x.numpy(), tvm_x.numpy())
y = tvm.nd.from_dlpack(tvm_x)
np.testing.assert_equal(y.numpy(), tvm_x.numpy())
np.testing.assert_equal(
torch.utils.dlpack.from_dlpack(y.to_dlpack()).numpy(), tvm_x.numpy()
)
n = tvm.runtime.convert(137)
xx = torch.rand(137, 137)
yy = torch.rand(137, 137)
zz2 = torch.empty(137, 137)
zz = xx.mm(yy)
XX = te.placeholder((n, n), name="X")
YY = te.placeholder((n, n), name="Y")
k = te.reduce_axis((0, n), name="k")
ZZ = te.compute((n, n), lambda i, j: te.sum(XX[i, k] * YY[k, j], axis=k))
s = te.create_schedule(ZZ.op)
# No need to speficy target_host if it's llvm
# Otherwise you will need to specify the target and target_host
f = tvm.build(s, [XX, YY, ZZ], name="f")
f_pytorch = to_pytorch_func(f)
zz2 = torch.empty(137, 137)
f_pytorch(xx, yy, zz2)
tvm.testing.assert_allclose(zz.numpy(), zz2.numpy(), rtol=1e-4, atol=1e-4)
except ImportError:
pass
def test_torch_dlpack():
# Run dlpack interoperability test a few times to make sure it's stable.
for i in range(5):
verify_torch_dlpack()
if __name__ == "__main__":
test_torch_dlpack()
| 2,544 | 33.391892 | 82 | py |
tvm | tvm-main/tests/python/contrib/test_tensorrt.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import itertools
import logging
from typing import Tuple
import numpy as np
import pytest
try:
# See issue #9362.
import torch
except:
pass
import tvm
import tvm.relay.testing
import tvm.testing
from tvm import relay
from tvm.contrib.download import download
from tvm.relay import Any, GlobalVar
from tvm.relay.expr_functor import ExprVisitor
from tvm.relay.op.contrib import tensorrt
SUPPORTED_DTYPES = ["float16", "float32"]
has_tensorrt_codegen = pytest.mark.skipif(
not tensorrt.is_tensorrt_compiler_enabled(), reason="TensorRT codegen not available"
)
# CAUTION: Currently always false in CI since adds tens of minutes to test time and depends
# on TensorRT installation. See https://github.com/apache/tvm/issues/11765
has_tensorrt_runtime = pytest.mark.skipif(
not tensorrt.is_tensorrt_runtime_enabled(), reason="TensorRT runtime not available"
)
run_module = tvm.testing.parameter(
pytest.param(False, marks=[has_tensorrt_codegen, *tvm.testing.requires_cuda.marks()]),
pytest.param(
True, marks=[has_tensorrt_runtime, has_tensorrt_codegen, *tvm.testing.requires_cuda.marks()]
),
ids=["compile", "run"],
)
def vmobj_to_list(o):
if isinstance(o, tvm.nd.NDArray):
return [o.numpy()]
elif isinstance(o, tvm.runtime.container.ADT) or isinstance(o, list):
return [vmobj_to_list(f) for f in o]
else:
raise RuntimeError("Unknown object type: %s" % type(o))
def assert_result_dict_holds(result_dict, dtype="float16"):
for k1, k2 in itertools.combinations(result_dict, 2):
res1 = vmobj_to_list(result_dict[k1])
res2 = vmobj_to_list(result_dict[k2])
for r1, r2 in zip(res1, res2):
if dtype == "float16":
tvm.testing.assert_allclose(r1, r2, rtol=1e-1, atol=1e-1)
else:
tvm.testing.assert_allclose(r1, r2, rtol=1e-3, atol=5e-3)
def set_outer_func_attr(func, compile_name, symbol_name):
func = func.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
func = func.with_attr("Inline", tvm.tir.IntImm("int32", 1))
func = func.with_attr("Compiler", compile_name)
func = func.with_attr("global_symbol", symbol_name)
return func
def set_inner_func_attr(func, pattern_name, composite_name):
func = func.with_attr("PartitionedFromPattern", pattern_name)
func = func.with_attr("Composite", composite_name)
return func
def run_and_verify_func(config, target="cuda", run_module=True, data_type="float32"):
"""Test a Relay func by compiling, running, and comparing TVM and TRT outputs.
Parameters
----------
config : Tuple[relay.Function, Dict[str, NDArray], List[str]]
A tuple containing 1) The function to test, 2) A dictionary of var names to input shapes and
3) A list of which vars should be considered params.
run_module: bool
If True, the built module will be run after being compiled.
data_type: str
Check between single and double floating precision
"""
np.random.seed(42)
f, input_shapes, is_param = config
params = {
x: np.random.uniform(-1, 1, input_shapes[x]).astype(dtype=data_type) for x in is_param
}
input_dict = {
k: np.random.uniform(-1, 1, v).astype(dtype=data_type)
for k, v in input_shapes.items()
if k not in is_param
}
dev = tvm.device(target)
result_dict = dict()
for mode in ["vm", "graph"]:
for use_trt in [True, False]:
mod = tvm.IRModule()
mod["main"] = f
result_key = mode + ("_trt" if use_trt else "")
if use_trt:
use_fp16 = data_type == "float16"
trt_target = tvm.target.Target(f"tensorrt -use_fp16={use_fp16}")
mod = relay.transform.InferType()(mod)
mod = tensorrt.partition_for_tensorrt(mod, params=params, target=trt_target)
with tvm.transform.PassContext(opt_level=3):
func = relay.create_executor(
mode, mod=mod, device=dev, target=[target, trt_target]
).evaluate()
else:
mod = relay.transform.InferType()(mod)
with tvm.transform.PassContext(opt_level=3):
func = relay.create_executor(
mode, mod=mod, device=dev, target=target
).evaluate()
if run_module:
result_dict[result_key] = func(**input_dict, **params)
if run_module:
assert_result_dict_holds(result_dict, data_type)
def test_tensorrt_simple(run_module):
for dtype in SUPPORTED_DTYPES:
xshape = (1, 3, 2, 2)
yshape = (1, 3, 1, 1)
zshape = (1, 1, 1, 1)
x = relay.var("x", shape=(xshape), dtype=dtype)
y = relay.var("y", shape=(yshape), dtype=dtype)
z = relay.var("z", shape=(zshape), dtype=dtype)
w = z * (x + y)
out = relay.nn.relu(w)
f = relay.Function([x, y, z], out)
x_data = np.random.uniform(-1, 1, xshape).astype(dtype)
y_data = np.random.uniform(-1, 1, yshape).astype(dtype)
z_data = np.random.uniform(-1, 1, zshape).astype(dtype)
result_dict = dict()
for mode in ["vm", "graph"]:
for use_trt in [False, True]:
mod = tvm.IRModule()
mod["main"] = f
result_key = mode + ("_trt" if use_trt else "")
if use_trt:
mod = relay.transform.InferType()(mod)
mod = tensorrt.partition_for_tensorrt(mod)
with tvm.transform.PassContext(opt_level=3):
func = relay.create_executor(
mode, mod=mod, device=tvm.cuda(0), target="cuda"
).evaluate()
else:
mod = relay.transform.InferType()(mod)
with tvm.transform.PassContext(opt_level=3):
func = relay.create_executor(
mode, mod=mod, device=tvm.cuda(0), target="cuda"
).evaluate()
if run_module:
result_dict[result_key] = func(x_data, y_data, z_data)
if run_module:
assert_result_dict_holds(result_dict)
def test_tensorrt_simple_cpu_io(run_module):
def get_graph():
dtype = "float32"
x_shape = (1, 3, 2, 2)
y_shape = (1, 3, 1, 1)
z_shape = (1, 1, 1, 1)
x = relay.var("x", shape=(x_shape), dtype=dtype)
y = relay.var("y", shape=(y_shape), dtype=dtype)
z = relay.var("z", shape=(z_shape), dtype=dtype)
w = z * (x + y)
out = relay.nn.relu(w)
f = relay.Function([x, y, z], out)
return f, {"x": x_shape, "y": y_shape, "z": z_shape}, ["y"]
run_and_verify_func(get_graph(), target="llvm", run_module=run_module)
def test_tensorrt_not_compatible(run_module):
dtype = "float32"
xshape = (1, 32, 14, 14)
x_data = np.random.uniform(-1, 1, xshape).astype(dtype)
x = relay.var("x", shape=(xshape), dtype=dtype)
y = relay.add(x, x)
z = relay.cast(relay.cast(y, "int32"), "float32")
out = relay.nn.relu(z)
f = relay.Function([x], out)
mod = tvm.IRModule()
mod["main"] = f
mod = tensorrt.partition_for_tensorrt(mod)
for mode in ["graph", "vm"]:
with tvm.transform.PassContext(opt_level=3):
func = relay.create_executor(
mode, mod=mod, device=tvm.cuda(0), target="cuda"
).evaluate()
if run_module:
results = func(x_data)
def test_conv1d(run_module):
def get_graph(
x_shape=((1, 3, 224)),
k_shape=(10, 3, 3),
groups=1,
padding=(1, 1),
strides=(1),
dilation=(1),
channels=None,
d_type="float16",
):
x = relay.var("x", shape=(x_shape), dtype=d_type)
kernel = relay.var("kernel", shape=(k_shape), dtype=d_type)
out = relay.nn.conv1d(
x,
kernel,
kernel_size=k_shape[2:3],
groups=groups,
padding=padding,
strides=strides,
dilation=dilation,
channels=channels,
out_dtype="float16",
)
f = relay.Function([x, kernel], out)
return f, {"x": x_shape, "kernel": k_shape}, ["kernel"]
for d_type in ["float16"]:
run_and_verify_func(
get_graph(channels=10, d_type=d_type), run_module=run_module, data_type=d_type
)
def test_conv2d(run_module):
def get_graph(
x_shape=(1, 32, 8, 8),
k_shape=(16, 32, 3, 3),
groups=1,
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
channels=None,
data_type="float16",
):
x = relay.var("x", shape=(x_shape), dtype=data_type)
kernel = relay.var("kernel", shape=(k_shape), dtype=data_type)
out = relay.nn.conv2d(
x,
kernel,
kernel_size=k_shape[2:4],
groups=groups,
padding=padding,
strides=strides,
dilation=dilation,
channels=channels,
out_dtype=data_type,
)
f = relay.Function([x, kernel], out)
return f, {"x": x_shape, "kernel": k_shape}, ["kernel"]
for k_shape, groups in [((16, 32, 3, 3), 1), ((32, 1, 3, 3), 32)]:
for padding in [(0, 0), (1, 1)]:
for strides in [(1, 1), (2, 2)]:
for dilation in [(1, 1), (2, 2)]:
run_and_verify_func(
get_graph(
k_shape=k_shape,
groups=groups,
padding=padding,
strides=strides,
dilation=dilation,
),
run_module=run_module,
data_type="float16",
)
run_and_verify_func(
get_graph(
(1, 3, 16, 16), (3, 8, 7, 7), 3, [2, 2, 3, 3], [2, 2], [1, 1], 24, data_type="float16"
),
run_module=run_module,
data_type="float16",
)
run_and_verify_func(
get_graph((1, 3, 16, 16), (1, 3, 1, 1), channels=1, data_type="float32"),
run_module=run_module,
data_type="float32",
)
def test_conv2d_nhwc(run_module):
def get_graph(x_shape=(1, 8, 8, 32), k_shape=(3, 3, 32, 16)):
x = relay.var("x", shape=(x_shape), dtype="float32")
kernel = relay.var("kernel", shape=(k_shape), dtype="float32")
out = relay.nn.conv2d(
x, kernel, channels=16, kernel_size=(3, 3), data_layout="NHWC", kernel_layout="HWIO"
)
f = relay.Function([x, kernel], out)
return f, {"x": x_shape, "kernel": k_shape}, ["kernel"]
run_and_verify_func(get_graph(), run_module=run_module)
def test_conv2d_weights_const(run_module):
def get_graph(
x_shape=(1, 32, 8, 8),
k_shape=(16, 32, 3, 3),
groups=1,
padding=(0, 0),
strides=(1, 1),
dilation=(1, 1),
data_type="float16",
):
x = relay.var("x", shape=(x_shape), dtype=data_type)
kernel = relay.const(np.ones(k_shape).astype(dtype=data_type))
out = relay.nn.conv2d(
x,
kernel,
channels=k_shape[0],
kernel_size=k_shape[2:4],
groups=groups,
padding=padding,
strides=strides,
dilation=dilation,
)
f = relay.Function([x], out)
return f, {"x": x_shape}, []
for tp in ["float16"]:
run_and_verify_func(get_graph(data_type=tp), run_module=run_module, data_type=tp)
def test_conv2d_weights_transposed(run_module):
def get_graph(x_shape=(1, 32, 9, 9), k_shape=(3, 3, 32, 16), order=(3, 2, 0, 1)):
x = relay.var("x", shape=(x_shape), dtype="float32")
kernel = relay.var("kernel", shape=(k_shape), dtype="float32")
kernel_t = relay.transpose(kernel, order)
# Conv2d requires constant weights in TensorRT, so the weights should be transposed by
# FoldConstant.
out = relay.nn.conv2d(x, kernel_t, channels=k_shape[order[0]], kernel_size=(3, 3))
f = relay.Function([x, kernel], out)
return f, {"x": x_shape, "kernel": k_shape}, ["kernel"]
run_and_verify_func(get_graph(), run_module=run_module)
def test_dense(run_module):
def get_graph(x_shape=(1, 16), k_shape=(32, 16), dtp="float16"):
x = relay.var("x", shape=(x_shape), dtype=dtp)
kernel = relay.var("kernel", shape=(k_shape), dtype=dtp)
# Dense requires constant weights in TensorRT, so the weights are transposed by us.
out = relay.nn.dense(x, kernel, units=k_shape[0])
f = relay.Function([x, kernel], out)
return f, {"x": x_shape, "kernel": k_shape}, ["kernel"]
for tp in ["float32"]:
run_and_verify_func(get_graph(dtp=tp), run_module=run_module, data_type=tp)
run_and_verify_func(get_graph(k_shape=(1, 16), dtp=tp), run_module=run_module, data_type=tp)
def test_batch_matmul(run_module):
def get_graph(x_shape=(12, 128, 64), y_shape=(12, 128, 64), transa=False, transb=True):
x = relay.var("x", shape=(x_shape), dtype="float32")
y = relay.var("y", shape=(y_shape), dtype="float32")
out = relay.nn.batch_matmul(x, y, transpose_a=transa, transpose_b=transb)
f = relay.Function([x, y], out)
return f, {"x": x_shape, "y": y_shape}, []
run_and_verify_func(
get_graph(x_shape=(12, 64, 128), y_shape=(12, 128, 64), transa=True, transb=True),
run_module=run_module,
)
run_and_verify_func(
get_graph(x_shape=(12, 64, 128), y_shape=(12, 64, 128), transa=True, transb=False),
run_module=run_module,
)
run_and_verify_func(
get_graph(x_shape=(12, 128, 64), y_shape=(12, 128, 64), transa=False, transb=True),
run_module=run_module,
)
run_and_verify_func(
get_graph(x_shape=(12, 128, 64), y_shape=(12, 64, 128), transa=False, transb=False),
run_module=run_module,
)
def test_bias_add(run_module):
def get_graph(x_shape=(1, 16), channels=16, axis=1):
x = relay.var("x", shape=(x_shape), dtype="float32")
bias = relay.var("bias", shape=(channels,), dtype="float32")
out = relay.nn.bias_add(x, bias, axis)
f = relay.Function([x, bias], out)
return f, {"x": x_shape, "bias": (channels,)}, ["bias"]
run_and_verify_func(get_graph(), run_module=run_module)
run_and_verify_func(get_graph((1, 6, 3, 4), 6), run_module=run_module)
run_and_verify_func(get_graph((1, 6, 3, 4), 4, -1), run_module=run_module)
def test_pool2d(run_module):
def get_graph(
op,
x_shape=(1, 3, 32, 32),
pool_size=(2, 2),
strides=(2, 2),
padding=(0, 0),
ceil_mode=False,
count_include_pad=None,
):
x = relay.var("x", shape=(x_shape), dtype="float32")
if count_include_pad is not None:
out = op(
x,
pool_size=pool_size,
strides=strides,
padding=padding,
ceil_mode=ceil_mode,
count_include_pad=count_include_pad,
)
else:
out = op(x, pool_size=pool_size, strides=strides, padding=padding, ceil_mode=ceil_mode)
f = relay.Function([x], out)
return f, {"x": x_shape}, []
for pool_size in [(2, 2), (3, 3)]:
for strides in [(1, 1), (2, 2)]:
for padding in [(0, 0), (1, 1), (0, 0, 1, 1)]:
for ceil_mode in [False, True]:
# Skip "the padding size is larger than or equal to the filter size for exclusive-counting pooling"
if pool_size == (2, 2) and padding == (0, 0, 1, 1):
continue
for count_include_pad in [False, True]:
# Skip "inclusive-counted blended or average pooling is not supported in combination with asymmetric padding"
if count_include_pad and (padding == (0, 0, 1, 1) or strides == (2, 2)):
continue
run_and_verify_func(
get_graph(
relay.nn.avg_pool2d,
pool_size=pool_size,
strides=strides,
padding=padding,
ceil_mode=ceil_mode,
count_include_pad=count_include_pad,
),
run_module=run_module,
)
run_and_verify_func(
get_graph(
relay.nn.max_pool2d,
pool_size=pool_size,
strides=strides,
padding=padding,
ceil_mode=ceil_mode,
),
run_module=run_module,
)
def test_global_pool2d(run_module):
def get_graph(op, x_shape=(1, 3, 32, 32)):
x = relay.var("x", shape=(x_shape), dtype="float32")
out = op(x)
f = relay.Function([x], out)
return f, {"x": x_shape}, []
run_and_verify_func(get_graph(relay.nn.global_max_pool2d), run_module=run_module)
run_and_verify_func(get_graph(relay.nn.global_avg_pool2d), run_module=run_module)
def test_batch_flatten(run_module):
def get_graph(x_shape=(1, 3, 4, 6), data_type="float16"):
x = relay.var("x", shape=(x_shape), dtype=data_type)
out = relay.nn.batch_flatten(x)
f = relay.Function([x], out)
return f, {"x": x_shape}, []
for dtp in ["float16", "float32"]:
run_and_verify_func(get_graph(data_type=dtp), run_module=run_module, data_type=dtp)
def test_expand_dims(run_module):
def get_graph(x_shape=(1, 3), axis=1, num_newaxis=1):
x = relay.var("x", shape=(x_shape), dtype="float32")
out = relay.expand_dims(x, axis, num_newaxis)
f = relay.Function([x], out)
return f, {"x": x_shape}, []
run_and_verify_func(get_graph(), run_module=run_module)
def test_squeeze(run_module):
def get_graph(x_shape, axis, dtype):
x = relay.var("x", shape=(x_shape), dtype=dtype)
out = relay.squeeze(x, axis=axis)
f = relay.Function([x], out)
return f, {"x": x_shape}, []
for dtype in SUPPORTED_DTYPES:
run_and_verify_func(
get_graph((1, 5, 1, 1), (2, 3), dtype=dtype), run_module=run_module, data_type=dtype
)
run_and_verify_func(
get_graph((1, 3, 1), (-1,), dtype=dtype), run_module=run_module, data_type=dtype
)
def test_concatenate(run_module):
def get_graph(input_shapes, axis):
concat_inputs = []
shapes_dict = {}
for i in range(len(input_shapes)):
name = "input_{}".format(i)
concat_inputs.append(relay.var(name, shape=(input_shapes[i]), dtype="float32"))
shapes_dict[name] = input_shapes[i]
out = relay.concatenate(concat_inputs, axis)
f = relay.Function(concat_inputs, out)
return f, shapes_dict, []
run_and_verify_func(get_graph([(1, 2, 6, 6), (1, 3, 6, 6)], axis=1), run_module=run_module)
def test_split(run_module):
def get_graph(x_shape, indices_or_sections, axis):
x = relay.var("x", shape=(x_shape), dtype="float32")
out = relay.split(x, indices_or_sections=indices_or_sections, axis=axis)
f = relay.Function([x], out.astuple())
return f, {"x": x_shape}, []
run_and_verify_func(get_graph((1, 16), indices_or_sections=2, axis=1), run_module=run_module)
run_and_verify_func(get_graph((1, 16), indices_or_sections=4, axis=1), run_module=run_module)
run_and_verify_func(get_graph((1, 16), indices_or_sections=[8], axis=1), run_module=run_module)
run_and_verify_func(
get_graph((1, 16), indices_or_sections=[2, 3, 6, 10, 14], axis=1), run_module=run_module
)
def test_conv2d_transpose(run_module):
def get_graph(
x_shape=(1, 32, 8, 8), k_shape=(32, 16, 3, 3), groups=1, padding=(0, 0), strides=(1, 1)
):
x = relay.var("x", shape=(x_shape), dtype="float32")
kernel = relay.var("kernel", shape=(k_shape), dtype="float32")
out = relay.nn.conv2d_transpose(
x,
kernel,
channels=k_shape[1],
kernel_size=k_shape[2:4],
groups=groups,
padding=padding,
strides=strides,
)
f = relay.Function([x, kernel], out)
return f, {"x": x_shape, "kernel": k_shape}, ["kernel"]
for padding in [(0, 0), (1, 1)]:
for strides in [(1, 1), (2, 2)]:
run_and_verify_func(get_graph(padding=padding, strides=strides), run_module=run_module)
def test_reshape(run_module):
def get_graph(x_shape, new_shape):
x = relay.var("x", shape=(x_shape), dtype="float16")
out = relay.reshape(x, new_shape)
f = relay.Function([x], out)
return f, {"x": x_shape}, []
run_and_verify_func(
get_graph((1, 1, 1, 10), (-1, 10)), run_module=run_module, data_type="float16"
)
run_and_verify_func(
get_graph((1, 10, 2, 3), (1, -1)), run_module=run_module, data_type="float16"
)
run_and_verify_func(get_graph((1, 1, 2, 3), (1, 6)), run_module=run_module, data_type="float16")
class AreOpsOnGraph(ExprVisitor):
"""
Visits the Graph recursively and checks if it contains ops in the op_list
"""
def __init__(self, op_list):
ExprVisitor.__init__(self)
self.op_list = op_list
self.on_graph = False
def visit_call(self, call):
if isinstance(call.op, tvm.tir.op.Op):
if str(call.op.name) in self.op_list:
self.on_graph = True
return super().visit_call(call)
def are_ops_on_graph(self, subgraph) -> bool:
"""
This function recursively visits the graph and checks if op_list ops are ongraph"
"""
self.visit(subgraph)
return self.on_graph
def are_ops_on_trt(mod, op_list):
op_on_trt = False
op_on_tvm = False
for subgraph in mod.get_global_vars():
name = subgraph.name_hint
if mod[name].attrs and mod[name].attrs["Compiler"] == "tensorrt":
op_on_trt |= AreOpsOnGraph(op_list).are_ops_on_graph(mod[name].body)
else:
op_on_tvm |= AreOpsOnGraph(op_list).are_ops_on_graph(mod[name].body)
return op_on_trt and not op_on_tvm
def test_dynamic_reshape(run_module):
def test_run(x_data_list, x_shape, new_shape, should_offload_to_trt):
result_arr = [{} for _ in range(len(x_data_list))]
for use_trt in [True, False]:
x = relay.var("x", shape=x_shape, dtype="float32")
out = relay.reshape(x, new_shape)
f = relay.Function([x], out)
mod = tvm.IRModule()
mod["main"] = f
if use_trt:
logging.info("Before partitioning:\n%s", mod)
mod = tensorrt.partition_for_tensorrt(mod)
logging.info("After partitioning:\n%s", mod)
assert are_ops_on_trt(mod, op_list=["reshape"]) == should_offload_to_trt
if run_module:
with relay.build_config(opt_level=3):
func = relay.create_executor(
"vm", mod=mod, device=tvm.cpu(0), target="llvm"
).evaluate()
for i, x_data in enumerate(x_data_list):
result_arr[i][use_trt] = func(x_data)
if run_module:
for i in range(len(x_data_list)):
assert_result_dict_holds(result_arr[i])
dim_values = [1, 1, 0, 2, 3, 0, 1, 3, 2]
x_shape = (relay.Any(), 3, 2, 3)
x_data_list = [
np.ones([dim_value] + list(x_shape)[1:]).astype("float32") for dim_value in dim_values
]
new_shape = (-1, 3, 2, 3)
should_offload_to_trt = True
test_run(x_data_list, x_shape, new_shape, should_offload_to_trt)
dim_values = [1, 1, 0, 2, 3, 0, 1, 3, 2]
x_shape = (relay.Any(), 3, 2, 3)
x_data_list = [
np.ones([dim_value] + list(x_shape)[1:]).astype("float32") for dim_value in dim_values
]
new_shape = (-1, 1, 2, 3)
should_offload_to_trt = False
test_run(x_data_list, x_shape, new_shape, should_offload_to_trt)
dim_values = [1, 1, 0, 2, 3, 0, 1, 3, 2]
x_shape = (1, relay.Any(), 2, 3)
x_data_list = [
np.ones(list(x_shape[:1]) + [dim_value] + list(x_shape)[2:]).astype("float32")
for dim_value in dim_values
]
new_shape = (1, -1, 2, 3)
should_offload_to_trt = False
test_run(x_data_list, x_shape, new_shape, should_offload_to_trt)
def test_transpose(run_module):
def get_graph(x_shape, order):
x = relay.var("x", shape=(x_shape), dtype="float32")
out = relay.transpose(x, order)
f = relay.Function([x], out)
return f, {"x": x_shape}, []
run_and_verify_func(get_graph((1, 16, 7, 7), [0, 2, 3, 1]), run_module=run_module)
run_and_verify_func(get_graph((1, 7, 7, 16), [0, 3, 1, 2]), run_module=run_module)
def test_float_const(run_module):
def get_graph(x_shape=(1, 16)):
x = relay.var("x", shape=(x_shape), dtype="float32")
beta = relay.const(1, dtype="float32")
out = relay.multiply(x, beta)
f = relay.Function([x], out)
return f, {"x": x_shape}, []
run_and_verify_func(get_graph(), run_module=run_module, data_type="float32")
def test_float_const16(run_module):
def get_graph(x_shape=(1, 16)):
x = relay.var("x", shape=(x_shape), dtype="float16")
beta = relay.const(1, dtype="float16")
out = relay.multiply(x, beta)
f = relay.Function([x], out)
return f, {"x": x_shape}, []
run_and_verify_func(get_graph(), run_module=run_module, data_type="float16")
def test_pad(run_module):
def get_graph(x_shape, pad_width):
x = relay.var("x", shape=(x_shape), dtype="float32")
out = relay.nn.pad(x, pad_width=pad_width)
f = relay.Function([x], out)
return f, {"x": x_shape}, []
run_and_verify_func(
get_graph((1, 8, 16, 16), [[0, 0], [0, 0], [0, 0], [0, 0]]), run_module=run_module
)
run_and_verify_func(
get_graph((1, 8, 16, 16), [[0, 0], [0, 0], [1, 1], [1, 1]]), run_module=run_module
)
run_and_verify_func(
get_graph((1, 8, 16, 16), [[0, 0], [0, 0], [0, 1], [2, 0]]), run_module=run_module
)
run_and_verify_func(
get_graph((1, 8, 3, 16, 16), [[0, 0], [0, 0], [0, 0], [0, 0], [0, 0]]),
run_module=run_module,
)
def test_add(run_module):
def get_graph(x_shape):
x = relay.var("x", shape=(x_shape), dtype="float16")
y = relay.var("y", shape=(x_shape), dtype="float16")
out = relay.add(x, y)
f = relay.Function([x, y], out)
return f, {"x": x_shape, "y": x_shape}, []
run_and_verify_func(get_graph((1, 1000)), run_module=run_module, data_type="float16")
def test_softmax(run_module):
def get_graph(x_shape, axis, data_type="float32"):
x = relay.var("x", shape=(x_shape), dtype=data_type)
out = relay.nn.softmax(x, axis=axis)
f = relay.Function([x], out)
return f, {"x": x_shape}, []
run_and_verify_func(
get_graph((1, 1000), axis=1, data_type="float32"),
run_module=run_module,
data_type="float32",
)
run_and_verify_func(
get_graph((1, 1000), axis=-1, data_type="float32"),
run_module=run_module,
data_type="float32",
)
run_and_verify_func(
get_graph((1, 3, 4), axis=-2, data_type="float16"),
run_module=run_module,
data_type="float16",
)
run_and_verify_func(
get_graph((1, 3, 4), axis=1, data_type="float16"),
run_module=run_module,
data_type="float16",
)
def test_batch_norm(run_module):
def get_graph(x_shape, param_shape, axis=1, epsilon=1e-5):
x = relay.var("x", shape=(x_shape), dtype="float32")
beta = relay.var("beta", shape=(param_shape), dtype="float32")
gamma = relay.var("gamma", shape=(param_shape), dtype="float32")
moving_mean = relay.var("moving_mean", shape=(param_shape), dtype="float32")
moving_var = relay.var("moving_var", shape=(param_shape), dtype="float32")
out, _, _ = relay.nn.batch_norm(
x,
gamma=gamma,
beta=beta,
moving_mean=moving_mean,
moving_var=moving_var,
axis=axis,
center=True,
scale=True,
epsilon=epsilon,
)
f = relay.Function([x, gamma, beta, moving_mean, moving_var], out)
return (
f,
{
"x": x_shape,
"beta": param_shape,
"gamma": param_shape,
"moving_mean": param_shape,
"moving_var": param_shape,
},
["beta", "gamma", "moving_mean", "moving_var"],
)
run_and_verify_func(get_graph((1, 64, 56, 56), (64,)), run_module=run_module)
run_and_verify_func(
get_graph((1, 56, 56, 64), (64,), axis=3, epsilon=1.001e-05), run_module=run_module
)
run_and_verify_func(get_graph((1, 4, 8, 4), (8,), axis=2), run_module=run_module)
run_and_verify_func(get_graph((1, 8, 4, 4, 4), (8,), axis=1), run_module=run_module)
run_and_verify_func(get_graph((1, 4, 8, 4, 4), (8,), axis=2), run_module=run_module)
run_and_verify_func(get_graph((1, 4, 4, 4, 8), (8,), axis=4), run_module=run_module)
run_and_verify_func(get_graph((1, 8), (8,), axis=1), run_module=run_module)
run_and_verify_func(get_graph((1, 3, 8), (8,), axis=2), run_module=run_module)
def test_layer_norm(run_module):
def get_graph(x_shape, param_shape, axis=1, epsilon=1e-5):
x = relay.var("x", shape=(x_shape), dtype="float32")
gamma = relay.var("gamma", shape=(param_shape), dtype="float32")
beta = relay.var("beta", shape=(param_shape), dtype="float32")
out = relay.nn.layer_norm(
x, gamma=gamma, beta=beta, axis=axis, epsilon=epsilon, center=True, scale=True
)
f = relay.Function([x, gamma, beta], out)
return (f, {"x": x_shape, "beta": param_shape, "gamma": param_shape}, ["beta", "gamma"])
run_and_verify_func(get_graph((1, 32, 8, 8), (32,)), run_module=run_module)
run_and_verify_func(
get_graph((1, 8, 8, 32), (32,), axis=3, epsilon=1.001e-05), run_module=run_module
)
run_and_verify_func(get_graph((1, 8), (8,), axis=1), run_module=run_module)
def test_unary(run_module):
def get_graph(op, x_shape=(1, 8, 3, 3)):
x = relay.var("x", shape=(x_shape), dtype="float32")
out = op(x)
f = relay.Function([x], out)
return f, {"x": x_shape}, []
for op in [
relay.nn.relu,
relay.sigmoid,
relay.tanh,
relay.exp,
relay.log,
relay.sqrt,
relay.abs,
relay.negative,
relay.sin,
relay.cos,
relay.atan,
relay.ceil,
relay.floor,
relay.erf,
]:
run_and_verify_func(get_graph(op), run_module=run_module)
def test_clip(run_module):
def get_graph(x_shape=(1, 8, 3, 3)):
x = relay.var("x", shape=(x_shape), dtype="float16")
out = relay.clip(x, a_min=-0.2, a_max=0.4)
f = relay.Function([x], out)
return f, {"x": x_shape}, []
run_and_verify_func(get_graph(), run_module=run_module, data_type="float16")
def test_relu(run_module):
def get_graph(x_shape=(1, 8, 3, 4)):
x = relay.var("x", shape=(x_shape), dtype="float16")
out = relay.nn.relu(x)
f = relay.Function([x], out)
return f, {"x": x_shape}, []
run_and_verify_func(get_graph(), run_module=run_module, data_type="float16")
def test_leaky_relu(run_module):
def get_graph(x_shape=(1, 8, 3, 4)):
x = relay.var("x", shape=(x_shape), dtype="float16")
out = relay.nn.leaky_relu(x, alpha=0.1)
f = relay.Function([x], out)
return f, {"x": x_shape}, []
run_and_verify_func(get_graph(), run_module=run_module, data_type="float16")
def test_binary(run_module):
def get_graph(op, x_shape, y_shape, y_is_const=False, d_type="float16"):
x = relay.var("x", shape=(x_shape), dtype=d_type)
if y_is_const:
y = relay.const(np.ones(y_shape).astype(d_type))
out = op(x, y)
f = relay.Function([x], out)
return f, {"x": x_shape}, []
y = relay.var("y", shape=(y_shape), dtype=d_type)
out = op(x, y)
f = relay.Function([x, y], out)
return f, {"x": x_shape, "y": y_shape}, []
for op in [relay.add, relay.subtract, relay.multiply, relay.divide, relay.power]:
for d_type in SUPPORTED_DTYPES:
for y_is_const in [True, False]:
run_and_verify_func(
get_graph(op, (1, 8, 3, 3), (1, 8, 3, 3), y_is_const, d_type),
run_module=run_module,
data_type=d_type,
)
run_and_verify_func(
get_graph(op, (1, 8, 1, 3), (1, 8, 3, 1), y_is_const, d_type),
run_module=run_module,
data_type=d_type,
)
run_and_verify_func(
get_graph(op, (1, 10), (10,), y_is_const, d_type),
run_module=run_module,
data_type=d_type,
)
run_and_verify_func(
get_graph(op, (1, 1, 1, 10), (10,), y_is_const, d_type),
run_module=run_module,
data_type=d_type,
)
run_and_verify_func(
get_graph(op, (1, 1, 1), (3,), y_is_const, d_type),
run_module=run_module,
data_type=d_type,
)
def test_reduce(run_module):
def get_graph(op, x_shape=(1, 2, 3, 4), axis=(2, 3), keepdims=False, d_type="float32"):
x = relay.var("x", shape=(x_shape), dtype=d_type)
out = op(x, axis=axis, keepdims=keepdims)
f = relay.Function([x], out)
return f, {"x": x_shape}, []
for type in SUPPORTED_DTYPES:
for op in [relay.sum, relay.prod, relay.max, relay.min, relay.mean]:
for keepdims in [True, False]:
run_and_verify_func(
get_graph(op, axis=(1), keepdims=keepdims, d_type=type),
run_module=run_module,
data_type=type,
)
run_and_verify_func(
get_graph(op, axis=(2, 3), keepdims=keepdims, d_type=type),
run_module=run_module,
data_type=type,
)
run_and_verify_func(
get_graph(op, axis=(1, 2), keepdims=keepdims, d_type=type),
run_module=run_module,
data_type=type,
)
run_and_verify_func(
get_graph(op, axis=(1, 2, 3), keepdims=keepdims, d_type=type),
run_module=run_module,
data_type=type,
)
def test_strided_slice(run_module):
def get_graph(x_shape, begin, end, strides=None, slice_mode="size"):
x = relay.var("x", shape=(x_shape), dtype="float32")
if strides:
out = relay.strided_slice(x, begin, end, strides, slice_mode=slice_mode)
else:
out = relay.strided_slice(x, begin, end, slice_mode=slice_mode)
f = relay.Function([x], out)
return f, {"x": x_shape}, []
for slice_mode in ["size", "end"]:
run_and_verify_func(
get_graph((1, 3, 6, 7), (0, 0, 0, 0), (1, 1, 6, 7), slice_mode=slice_mode),
run_module=run_module,
)
run_and_verify_func(
get_graph((1, 3, 6, 7), [0, 1, 0, 0], [1, 2, 6, 6], slice_mode=slice_mode),
run_module=run_module,
)
run_and_verify_func(
get_graph((2, 3, 6, 7), [0, 0, 0, 0], [-1, -1, -1, -1], slice_mode=slice_mode),
run_module=run_module,
)
run_and_verify_func(
get_graph((2, 3, 6, 7), [0, 1, 0, 0], [-1, -1, -1, -1], slice_mode=slice_mode),
run_module=run_module,
)
run_and_verify_func(
get_graph((1, 6), [0, 1], [1, 3], slice_mode=slice_mode), run_module=run_module
)
def test_adaptive_pool2d(run_module):
def get_graph(op, x_shape=(1, 3, 32, 32), out_size=(1, 1), data_type="float16"):
x = relay.var("x", shape=(x_shape), dtype=data_type)
out = op(x, out_size)
f = relay.Function([x], out)
return f, {"x": x_shape}, []
for type in SUPPORTED_DTYPES:
run_and_verify_func(
get_graph(relay.nn.adaptive_max_pool2d, data_type=type),
run_module=run_module,
data_type=type,
)
run_and_verify_func(
get_graph(relay.nn.adaptive_avg_pool2d, data_type=type),
run_module=run_module,
data_type=type,
)
def test_multiple_outputs(run_module):
def get_graph(d_type="float16"):
x = relay.var("x", shape=(1, 3), dtype=d_type)
y = relay.var("y", shape=(1, 3), dtype=d_type)
z = relay.add(x, y)
w = relay.add(z, y)
out = relay.Tuple((z, w))
f = relay.Function([x, y], out)
return f, {"x": (1, 3), "y": (1, 3)}, []
for type in SUPPORTED_DTYPES:
run_and_verify_func(get_graph(d_type=type), run_module=run_module, data_type=type)
@pytest.mark.skip(reason=("Fails assert_allclose. See https://github.com/apache/tvm/issues/11765"))
def test_conv3d(run_module):
def get_graph(
x_shape=(1, 24, 8, 8, 8),
k_shape=(16, 24, 3, 3, 3),
groups=1,
padding=(0, 0, 0),
strides=(1, 1, 1),
dilation=(1, 1, 1),
):
x = relay.var("x", shape=(x_shape), dtype="float32")
kernel = relay.var("kernel", shape=(k_shape), dtype="float32")
out = relay.nn.conv3d(
x,
kernel,
channels=k_shape[0],
kernel_size=k_shape[2:],
groups=groups,
padding=padding,
strides=strides,
dilation=dilation,
)
f = relay.Function([x, kernel], out)
return f, {"x": x_shape, "kernel": k_shape}, ["kernel"]
run_and_verify_func(get_graph(), run_module=run_module)
run_and_verify_func(get_graph(padding=(0, 0, 0, 1, 1, 1)), run_module=run_module)
def test_pool3d(run_module):
def get_graph(
op,
x_shape=(1, 3, 8, 32, 32),
pool_size=(2, 2, 2),
strides=(2, 2, 2),
padding=(0, 0, 0),
ceil_mode=False,
count_include_pad=None,
):
x = relay.var("x", shape=(x_shape), dtype="float32")
if count_include_pad is not None:
out = op(
x,
pool_size=pool_size,
strides=strides,
padding=padding,
ceil_mode=ceil_mode,
count_include_pad=count_include_pad,
)
else:
out = op(x, pool_size=pool_size, strides=strides, padding=padding, ceil_mode=ceil_mode)
f = relay.Function([x], out)
return f, {"x": x_shape}, []
run_and_verify_func(get_graph(relay.nn.avg_pool3d), run_module=run_module)
run_and_verify_func(get_graph(relay.nn.max_pool3d), run_module=run_module)
run_and_verify_func(
get_graph(relay.nn.max_pool3d, padding=(0, 0, 0, 1, 1, 1)), run_module=run_module
)
run_and_verify_func(get_graph(relay.nn.max_pool3d, strides=(1, 1, 1)), run_module=run_module)
def test_conv3d_transpose(run_module):
def get_graph(
x_shape=(1, 32, 8, 8, 8),
k_shape=(32, 16, 3, 3, 3),
groups=1,
padding=(0, 0, 0),
strides=(1, 1, 1),
output_padding=(0, 0, 0),
):
x = relay.var("x", shape=(x_shape), dtype="float32")
kernel = relay.var("kernel", shape=(k_shape), dtype="float32")
out = relay.nn.conv3d_transpose(
x,
kernel,
channels=k_shape[1],
kernel_size=k_shape[2:5],
groups=groups,
padding=padding,
strides=strides,
output_padding=output_padding,
)
f = relay.Function([x, kernel], out)
return f, {"x": x_shape, "kernel": k_shape}, ["kernel"]
run_and_verify_func(get_graph(), run_module=run_module)
run_and_verify_func(get_graph(strides=(2, 2, 2)), run_module=run_module)
run_and_verify_func(
get_graph(strides=(2, 2, 2), output_padding=(1, 1, 1)), run_module=run_module
)
@has_tensorrt_codegen
def test_dynamic_offload():
"""
This test checks for proper dynamic offloading of relay graphs. An addition between
the outputs of two conv2d's is performed, one of them having all static args whereas
the other has a arg with dynamic shape. It is expected for the TRT partitioner to
offload the conv2d with dynamic arg to TVM while running the other in TRT.
"""
data_shape = (1, 32, 8, 8)
k_shape = (1, 32, 3, 3)
x = relay.var("x", shape=(data_shape[0], data_shape[1], Any(), Any()), dtype="float32")
y = relay.var("y", shape=(data_shape), dtype="float32")
kernel = relay.const(np.random.rand(*k_shape).astype("float32"))
def get_expected():
# Create a nested TRT function that matches the expected output
mod = tvm.IRModule()
outer_var = relay.var("tensorrt_0_i0", shape=(data_shape), dtype="float32")
inner_var = relay.var("FunctionVar_0_0", shape=(data_shape), dtype="float32")
inner_body = relay.nn.conv2d(
inner_var, kernel, channels=k_shape[0], kernel_size=k_shape[2:4]
)
inner_func = relay.Function([inner_var], inner_body)
inner_func = set_inner_func_attr(inner_func, "nn.conv2d_", "tensorrt.nn.conv2d")
outer_body = inner_func(outer_var)
outer_func = relay.Function([outer_var], outer_body)
outer_func = set_outer_func_attr(outer_func, "tensorrt", "tvmgen_default_tensorrt_main_0")
gv = GlobalVar("tvmgen_default_tensorrt_main_0")
mod[gv] = outer_func
mod = relay.transform.InferType()(mod)
# Create the main function
out1 = relay.nn.conv2d(x, kernel, channels=k_shape[0], kernel_size=k_shape[2:4])
out = relay.add(out1, gv(y))
f = relay.Function([x, y], out)
mod["main"] = f
mod = relay.transform.InferType()(mod)
return mod
# Create relay function that will be offloaded to TRT
out1 = relay.nn.conv2d(x, kernel, channels=k_shape[0], kernel_size=k_shape[2:4])
out2 = relay.nn.conv2d(y, kernel, channels=k_shape[0], kernel_size=k_shape[2:4])
out = relay.add(out1, out2)
f = relay.Function([x, y], out)
# Pass the function to TRT compilation
mod = tvm.IRModule()
mod["main"] = f
mod = relay.transform.InferType()(mod)
mod_trt = tensorrt.partition_for_tensorrt(mod)
# Get the expected relay graph and compare
mod_exp = get_expected()
tvm.ir.assert_structural_equal(mod_trt, mod_exp, map_free_vars=True)
def test_tensorrt_dynamic_batch(run_module):
batches_to_test = [1, 1, 0, 2, 3, 0, 1, 3, 2]
x_shape = (relay.Any(), 1, 8, 8)
x_data = np.ones([max(batches_to_test)] + list(x_shape)[1:]).astype("float32")
result_arr = [{} for _ in range(len(batches_to_test))]
for use_trt in [True, False]:
x = relay.var("x", shape=x_shape, dtype="float32")
out = relay.nn.relu(x)
f = relay.Function([x], out)
mod = tvm.IRModule()
mod["main"] = f
if use_trt:
mod = tensorrt.partition_for_tensorrt(mod)
if run_module:
with relay.build_config(opt_level=3):
func = relay.create_executor(
"vm", mod=mod, device=tvm.cpu(0), target="llvm"
).evaluate()
for i, batch_size in enumerate(batches_to_test):
result_arr[i][use_trt] = func(x_data[:batch_size, ...])
if run_module:
for i in range(len(batches_to_test)):
assert_result_dict_holds(result_arr[i])
def test_tensorrt_dynamic_batch_conv(run_module):
batches_to_test = [1, 5, 1, 0, 2, 3, 0, 1, 3, 2]
x_shape = (relay.Any(), 32, 8, 8)
x_data = np.ones([max(batches_to_test)] + list(x_shape)[1:]).astype("float32")
k_shape = (16, 32, 3, 3)
params = {"kernel": np.random.uniform(-1, 1, k_shape).astype("float32")}
for use_implicit_batch in [True, False]:
result_arr = [{"cuda": {}, "llvm": {}} for _ in range(len(batches_to_test))]
for use_trt in [True, False]:
x = relay.var("x", shape=x_shape, dtype="float32")
kernel = relay.var("kernel", shape=k_shape, dtype="float32")
out = relay.nn.conv2d(x, kernel, channels=16, kernel_size=(3, 3), groups=1)
f = relay.Function([x, kernel], out)
mod = tvm.IRModule()
mod["main"] = f
trt_target = tvm.target.Target(f"tensorrt -use_implicit_batch={use_implicit_batch}")
if use_trt:
mod = tensorrt.partition_for_tensorrt(mod, params=params, target=trt_target)
if run_module:
for target in ["llvm", "cuda"]:
targets = [target]
if use_trt:
targets.append(trt_target)
with tvm.transform.PassContext(opt_level=3):
func = relay.create_executor(
"vm", mod=mod, device=tvm.device(target), target=targets
).evaluate()
for i, batch_size in enumerate(batches_to_test):
result_arr[i][target][use_trt] = func(x_data[:batch_size, ...], **params)
if run_module:
for i in range(len(batches_to_test)):
for target in ["llvm", "cuda"]:
assert_result_dict_holds(result_arr[i][target])
def test_maskrcnn_resnet50(run_module) -> None:
"""
This function tests the working of pytorch maskrcnn with resnet50 as backbone with
VM and VM + TRT. Since the order of compiled model outputs is a bit different from
original pytorch model, it uses a custom logic for comparison check.
"""
import torch
import torchvision
def convert_traced_model_to_vm_trt(
traced_module: torch.jit.TopLevelTracedModule, np_sample_input: np.ndarray, target: str
) -> tvm.runtime.vm.Executable:
"""
This function converts a traced pytorch model to VM + TRT.
"""
input_shape = np_sample_input.shape
input_name = "input0"
shape_list = [(input_name, input_shape)]
mod, params = relay.frontend.from_pytorch(traced_module, shape_list)
trt_target = tvm.target.Target("tensorrt -remove_no_mac_subgraphs=True")
mod = tensorrt.partition_for_tensorrt(mod, params=params, target=trt_target)
targets = [target, trt_target]
with tvm.transform.PassContext(opt_level=3, disabled_pass=["FoldScaleAxis"]):
vm_trt_exec = relay.vm.compile(mod, target=targets, params=params)
return vm_trt_exec
class TraceWrapper(torch.nn.Module):
"""
This class is a wrapper over the torch module to convert the outputs into traceable form
"""
def __init__(self, model: torch.nn.Module) -> None:
super().__init__()
self.model = model
def forward(
self, inp: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
out = self.model(inp)
return out[0]["boxes"], out[0]["scores"], out[0]["labels"], out[0]["masks"]
def get_traced_maskrcnn_model(np_sample_input: np.ndarray) -> torch.jit.TopLevelTracedModule:
"""
This function takes a sample input and returns the traced maskrcnn model
"""
model_func = torchvision.models.detection.maskrcnn_resnet50_fpn
model = TraceWrapper(model_func(pretrained=True))
model.eval()
inp = torch.Tensor(np.random.uniform(0.0, 250.0, size=np_sample_input.shape))
with torch.no_grad():
out = model(inp)
traced_module = torch.jit.trace(model, inp)
traced_module.eval()
return traced_module
def get_maskrcnn_input(in_size: int) -> np.ndarray:
"""
This function gets a real image with multiple objects of interest and returns it.
"""
input_shape = (1, 3, in_size, in_size)
img_path = "test_street_small.jpg"
img_url = "https://raw.githubusercontent.com/dmlc/web-data/master/gluoncv/detection/street_small.jpg"
download(img_url, img_path)
import cv2
img = cv2.imread(img_path).astype("float32")
img = cv2.resize(img, (in_size, in_size))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = np.transpose(img / 255.0, [2, 0, 1])
img = np.expand_dims(img, axis=0)
return img
in_size = 300
np_sample_input = get_maskrcnn_input(in_size)
traced_module = get_traced_maskrcnn_model(np_sample_input)
vm_trt_exec = convert_traced_model_to_vm_trt(traced_module, np_sample_input, target="llvm")
if run_module:
dev = tvm.cpu()
vm = tvm.runtime.vm.VirtualMachine(vm_trt_exec, dev)
vm.set_input("main", **{"input0": np_sample_input})
tvm_res = vm.run()
# Descending sort by scores and get the high confidence indices. In this example 9 is chosen,
# because this image has 9 boxes over 0.9 confidence
num_high_confidence_boxes = 9
tvm_indices = np.argsort(-1 * tvm_res[1].numpy())[:num_high_confidence_boxes]
with torch.no_grad():
out = traced_module(torch.Tensor(np_sample_input))
# Descending sort by scores and get the high confidence indices
pt_indices = np.argsort(-1 * out[1].numpy())[:num_high_confidence_boxes]
# [Box Tol, Score Tol, Label Tol, Mask Tol]
tol = [1e-1, 5e-3, 1e-5, 4e-1]
# Because of certain ops, there are certain minor differences in TVM outputs and PT outputs,
# This means that the tolerance can't be 1e-4 or 1e-5 throughout. The ideal way to get around
# this is to test it on an entire dataset and compare mAP with the original model.
# However, since that is not practically possible on CI, the following compromise is made.
# These tolerances are chosen based on their impact or lack thereof to the mAP score, e.g:
# 0.1 pixel difference of a box in a 300X300 image wont make any change.
for i, tol_val in zip(range(4), tol):
np.testing.assert_allclose(
tvm_res[i].numpy()[tvm_indices],
out[i].numpy()[pt_indices],
rtol=tol_val,
atol=tol_val,
)
def test_empty_subgraph(run_module):
x_shape = (1, 3, 5)
mod = tvm.IRModule()
# Empty tensorrt subgraph.
var1 = relay.var("tensorrt_0_i0", shape=(x_shape), dtype="float32")
f1 = GlobalVar("tensorrt_0")
func = relay.Function([var1], var1)
func = set_outer_func_attr(func, "tensorrt", "tvmgen_default_tensorrt_0")
mod[f1] = func
mod = relay.transform.InferType()(mod)
# Create the main function
x = relay.var("x", shape=x_shape, dtype="float32")
out = f1(relay.nn.relu(x))
f = relay.Function([x], out)
mod["main"] = f
x_data = np.random.uniform(-1, 1, x_shape).astype("float32")
for mode in ["graph", "vm"]:
with tvm.transform.PassContext(opt_level=3):
func = relay.create_executor(
mode, mod=mod, device=tvm.cuda(0), target="cuda"
).evaluate()
if run_module:
results = func(x_data)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
tvm.testing.main()
| 53,474 | 36.898653 | 133 | py |
tvm | tvm-main/tests/python/contrib/test_mxnet_bridge.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
def mxnet_check():
"""This is a simple test function for MXNet bridge
It is not included as pytests, because of its dependency on mxnet
User can directly run this script to verify correctness.
"""
import mxnet as mx
from tvm import topi
import tvm
from tvm import te
import numpy as np
from tvm.contrib.mxnet import to_mxnet_func
# build a TVM function through topi
n = 20
shape = (20,)
scale = te.var("scale", dtype="float32")
x = te.placeholder(shape)
y = te.placeholder(shape)
z = topi.broadcast_add(x, y)
zz = te.compute(shape, lambda *i: z(*i) * scale)
target = tvm.target.cuda()
# build the function
with target:
s = topi.generic.schedule_injective(zz)
f = tvm.build(s, [x, y, zz, scale])
# get a mxnet version
mxf = to_mxnet_func(f, const_loc=[0, 1])
dev = mx.gpu(0)
xx = mx.nd.uniform(shape=shape, device=dev)
yy = mx.nd.uniform(shape=shape, device=dev)
zz = mx.nd.empty(shape=shape, device=dev)
# invoke myf: this runs in mxnet engine
mxf(xx, yy, zz, 10.0)
mxf(xx, yy, zz, 10.0)
tvm.testing.assert_allclose(zz.numpy(), (xx.numpy() + yy.numpy()) * 10)
if __name__ == "__main__":
mxnet_check()
| 2,041 | 30.90625 | 75 | py |
tvm | tvm-main/tests/python/contrib/test_tensorrt_int8_exp.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import os
import numpy as np
try:
# See issue #9362.
import torch
except:
pass
import tvm
import tvm.testing
from tvm import relay
from tvm.contrib.download import download_testdata
from tvm.relay.op.contrib.tensorrt import partition_for_tensorrt
from tvm.relay.op.contrib import tensorrt
def skip_codegen_test():
"""Skip test if TensorRT and CUDA codegen are not present"""
if not tvm.runtime.enabled("cuda") or not tvm.cuda(0).exist:
print("Skip because CUDA is not enabled.")
return True
if not tensorrt.is_tensorrt_compiler_enabled():
print("Skip because TensorRT compiler is not available.")
return True
print("TensorRT compiler is available!")
return False
def skip_runtime_test():
if not tvm.runtime.enabled("cuda") or not tvm.cuda(0).exist:
print("Skip because CUDA is not enabled.")
return True
if not tensorrt.is_tensorrt_runtime_enabled():
print("Skip because TensorRT runtime is not available.")
return True
print("TensorRT runtime is available!")
return False
def test_trt_int8():
"""
This Function is used to use tensorrt int8 to compile a resnet34 model,
and compare cosine distance between the output of the original model and trt int8 tvm output
"""
if skip_codegen_test() or skip_runtime_test():
return
try:
from PIL import Image
from scipy.spatial import distance
except:
print("please install scipy and Image python packages")
return
try:
import torch
import torchvision
from torchvision import transforms
except:
print("please install pytorch python package")
return
os.environ["TVM_TENSORRT_USE_INT8"] = "1"
os.environ["TENSORRT_NUM_CALI_INT8"] = "10"
model_name = "resnet34"
model = getattr(torchvision.models, model_name)(pretrained=True)
model = model.eval()
# We grab the TorchScripted model via tracing
input_shape = [1, 3, 224, 224]
input_data = torch.randn(input_shape)
scripted_model = torch.jit.trace(model, input_data).eval()
img_url = "https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true"
img_path = download_testdata(img_url, "cat.png", module="data")
img = Image.open(img_path).resize((224, 224))
my_preprocess = transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
)
img = my_preprocess(img)
img = np.expand_dims(img, 0)
input_name = "input0"
shape_list = [(input_name, img.shape)]
mod, params = relay.frontend.from_pytorch(scripted_model, shape_list)
# compile the model
target = "cuda"
dev = tvm.cuda()
mod = partition_for_tensorrt(mod, params)
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target=target, params=params)
gen_module = tvm.contrib.graph_executor.GraphModule(lib["default"](dev))
num_cali_int8 = int(os.environ["TENSORRT_NUM_CALI_INT8"])
if num_cali_int8 != 0:
print("start calibrating data ... ")
for i in range(num_cali_int8):
tvm_data = tvm.nd.array(img)
gen_module.set_input(input_name, tvm_data)
gen_module.run(data=tvm_data)
print("finished calibrating data ... ")
# get output of tvm model
print("rebuild engine and test to run ... ")
tvm_data = tvm.nd.array(img)
gen_module.set_input(input_name, tvm_data)
gen_module.run(data=tvm_data)
out = gen_module.get_output(0)
# check output of tvm and output of pytorch model are equal
torch_data = torch.from_numpy(img)
model = scripted_model.eval()
torch_output = model(torch_data)
cosine_distance_res = distance.cosine(out.numpy(), torch_output.detach().cpu().numpy())
assert cosine_distance_res <= 0.01
# Evaluate
print("Evaluate inference time cost...")
ftimer = gen_module.module.time_evaluator("run", dev, repeat=10, min_repeat_ms=500)
prof_res = np.array(ftimer().results) * 1e3 # convert to millisecond
message = "Mean inference time (std dev): %.2f ms (%.2f ms)" % (
np.mean(prof_res),
np.std(prof_res),
)
print(message)
if __name__ == "__main__":
tvm.testing.main()
| 5,224 | 32.280255 | 96 | py |
tvm | tvm-main/tests/python/contrib/test_clml/test_network.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""OpenCL ML network tests."""
import tvm
import numpy as np
from tvm import relay
from tvm.relay import testing
from tvm.contrib import utils
from test_clml.infrastructure import build_and_run, Device
import pytest
def _build_and_run_network(mod, params, inputs, data, device, atol, rtol, tvm_log=""):
"""Helper function to build and run a network."""
outputs = []
for clml in [True, False]:
outputs.append(
build_and_run(mod, data, 1, params, device, enable_clml=clml, tune_log=tvm_log)[0][0]
)
return outputs
def _get_keras_model(keras_model, inputs_dict, data):
"""Convert Keras graph to relay."""
inputs = {}
for name, (shape, _) in inputs_dict.items():
inputs[keras_model.input_names[0]] = shape
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
def get_bottom_top_model(model, layer_name):
layer = model.get_layer(layer_name)
bottom_input = model.layers[0].input
bottom_output = layer.output
bottom_model = Model(bottom_input, bottom_output)
return bottom_model
keras_model = get_bottom_top_model(keras_model, "predictions")
ref_output = keras_model.predict(data["input_1"].transpose(0, 2, 3, 1))
mod, params = relay.frontend.from_keras(keras_model, inputs, layout="NCHW")
return mod, params, ref_output
@pytest.mark.parametrize("dtype", ["float16"])
@tvm.testing.requires_openclml
def test_mobilenet(device, dtype):
def get_model():
from tensorflow.keras.applications import MobileNet
import tensorflow as tf
tf.keras.backend.clear_session()
mobilenet = MobileNet(
include_top=True, weights=None, input_shape=(224, 224, 3), classes=1000
)
inputs = {mobilenet.input_names[0]: ((1, 3, 224, 224), "float32")}
data = {}
np.random.seed(0)
for name, (shape, dtype) in inputs.items():
if dtype == "uint8":
low, high = 0, 1
else:
low, high = -1, 1
data[name] = np.random.uniform(low, high, shape).astype(dtype)
mod, params, ref_outputs = _get_keras_model(mobilenet, inputs, data)
return mod, params, inputs, data, ref_outputs
mod, params, inputs, input_data, ref_outputs = get_model()
outputs = _build_and_run_network(
mod, params, inputs, input_data, device=device, atol=1e-5, rtol=1e-5
)
opencl_sort = np.argsort(outputs[1].asnumpy()).flatten()
clml_sort = np.argsort(outputs[0].asnumpy()).flatten()
tvm.testing.assert_allclose(opencl_sort[:10], clml_sort[:10], rtol=1e-5, atol=1e-5)
@pytest.mark.parametrize("dtype", ["float16"])
@tvm.testing.requires_openclml
def test_inception_v3(device, dtype):
def get_model():
from tensorflow.keras.applications import InceptionV3
import tensorflow as tf
tf.keras.backend.clear_session()
inceptionV3 = InceptionV3(
include_top=True, weights=None, input_shape=(299, 299, 3), classes=1000
)
inputs = {inceptionV3.input_names[0]: ((1, 3, 299, 299), "float16")}
data = {}
np.random.seed(0)
for name, (shape, dtype) in inputs.items():
if dtype == "uint8":
low, high = 0, 1
else:
low, high = -2, 1
data[name] = np.random.uniform(low, high, shape).astype(dtype)
mod, params, ref_outputs = _get_keras_model(inceptionV3, inputs, data)
return mod, params, inputs, data, ref_outputs
mod, params, inputs, input_data, ref_outputs = get_model()
outputs = _build_and_run_network(
mod, params, inputs, input_data, device=device, atol=1e-5, rtol=1e-5
)
opencl_sort = np.argsort(outputs[1].asnumpy()).flatten()
clml_sort = np.argsort(outputs[0].asnumpy()).flatten()
tvm.testing.assert_allclose(opencl_sort[:5], clml_sort[:5], rtol=1e-5, atol=1e-5)
@pytest.mark.parametrize("dtype", ["float16"])
@tvm.testing.requires_openclml
def test_resnet50v2(device, dtype):
def get_model():
from tensorflow.keras.applications import ResNet50V2
import tensorflow as tf
tf.keras.backend.clear_session()
model = ResNet50V2(include_top=True, weights=None, input_shape=(224, 224, 3), classes=1000)
inputs_dict = {model.input_names[0]: ((1, 3, 224, 224), "float32")}
data = {}
np.random.seed(0)
for name, (shape, dtype) in inputs_dict.items():
if dtype == "uint8":
low, high = 0, 1
else:
low, high = -1, 1
data[name] = np.random.uniform(low, high, shape).astype(dtype)
"""Convert Keras graph to relay."""
inputs = {}
for name, (shape, _) in inputs_dict.items():
inputs[model.input_names[0]] = shape
ref_outputs = model.predict(data["input_1"].transpose(0, 2, 3, 1))
mod, params = relay.frontend.from_keras(model, inputs, layout="NCHW")
return mod, params, inputs, data, ref_outputs
mod, params, inputs, input_data, ref_outputs = get_model()
outputs = _build_and_run_network(
mod, params, inputs, input_data, device=device, atol=1e-5, rtol=1e-5
)
opencl_sort = np.argsort(outputs[1].asnumpy()).flatten()
clml_sort = np.argsort(outputs[0].asnumpy()).flatten()
tvm.testing.assert_allclose(opencl_sort[:10], clml_sort[:10], rtol=1e-5, atol=1e-5)
if __name__ == "__main__":
tvm.testing.main()
| 6,326 | 34.15 | 99 | py |
tvm | tvm-main/tests/python/contrib/test_arm_compute_lib/test_network.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Arm Compute Library network tests."""
from distutils.version import LooseVersion
import numpy as np
import pytest
from tvm import relay
from test_arm_compute_lib.infrastructure import Device, skip_runtime_test, build_and_run, verify
def _build_and_run_network(mod, params, inputs, device, tvm_ops, acl_partitions, atol, rtol):
"""Helper function to build and run a network."""
data = {}
np.random.seed(0)
for name, (shape, dtype) in inputs.items():
if dtype == "uint8":
low, high = 0, 255
else:
low, high = -127, 128
data[name] = np.random.uniform(low, high, shape).astype(dtype)
outputs = []
for acl in [False, True]:
outputs.append(
build_and_run(
mod,
data,
1,
params,
device,
enable_acl=acl,
tvm_ops=tvm_ops,
acl_partitions=acl_partitions,
)[0]
)
verify(outputs, atol=atol, rtol=rtol, verify_saturation=False)
def _get_tflite_model(tflite_model_path, inputs_dict):
"""Convert TFlite graph to relay."""
try:
import tflite.Model
except ImportError:
pytest.skip("Missing Tflite support")
with open(tflite_model_path, "rb") as f:
tflite_model_buffer = f.read()
try:
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buffer, 0)
except AttributeError:
tflite_model = tflite.Model.GetRootAsModel(tflite_model_buffer, 0)
shape_dict = {}
dtype_dict = {}
for input in inputs_dict:
input_shape, input_dtype = inputs_dict[input]
shape_dict[input] = input_shape
dtype_dict[input] = input_dtype
return relay.frontend.from_tflite(tflite_model, shape_dict=shape_dict, dtype_dict=dtype_dict)
def _get_keras_model(keras_model, inputs_dict):
"""Convert Keras graph to relay."""
inputs = {}
for name, (shape, _) in inputs_dict.items():
inputs[keras_model.input_names[0]] = shape
return relay.frontend.from_keras(keras_model, inputs, layout="NHWC")
def test_vgg16():
Device.load("test_config.json")
if skip_runtime_test():
return
device = Device()
def get_model():
try:
from keras.applications import VGG16
except ImportError:
pytest.skip("Missing Keras Package")
vgg16 = VGG16(include_top=True, weights="imagenet", input_shape=(224, 224, 3), classes=1000)
inputs = {vgg16.input_names[0]: ((1, 224, 224, 3), "float32")}
mod, params = _get_keras_model(vgg16, inputs)
return mod, params, inputs
_build_and_run_network(
*get_model(),
device=device,
tvm_ops=4,
acl_partitions=21,
atol=0.002,
rtol=0.01,
)
def test_mobilenet():
keras = pytest.importorskip("keras")
Device.load("test_config.json")
if skip_runtime_test():
return
device = Device()
def get_model():
try:
from keras.applications import MobileNet
except ImportError:
pytest.skip("Missing keras module")
mobilenet = MobileNet(
include_top=True, weights="imagenet", input_shape=(224, 224, 3), classes=1000
)
inputs = {mobilenet.input_names[0]: ((1, 224, 224, 3), "float32")}
mod, params = _get_keras_model(mobilenet, inputs)
return mod, params, inputs
if keras.__version__ < LooseVersion("2.9"):
# This can be removed after we migrate to TF/Keras >= 2.9
expected_tvm_ops = 56
expected_acl_partitions = 31
else:
# In Keras >= 2.7, one reshape operator was removed
# from the MobileNet model, so it impacted this test
# which now needs to be reduce in by 1
# The change in Keras is `b6abfaed1326e3c`
expected_tvm_ops = 55
expected_acl_partitions = 30
_build_and_run_network(
*get_model(),
device=device,
tvm_ops=expected_tvm_ops,
acl_partitions=expected_acl_partitions,
atol=0.002,
rtol=0.01,
)
def test_quantized_mobilenet():
Device.load("test_config.json")
if skip_runtime_test():
return
try:
import tvm.relay.testing.tf as tf_testing
except ImportError:
pytest.skip("Missing Tflite support")
device = Device()
def get_model():
model_path = tf_testing.get_workload_official(
"https://storage.googleapis.com/download.tensorflow.org/"
"models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz",
"mobilenet_v1_1.0_224_quant.tflite",
)
inputs = {"input": ((1, 224, 224, 3), "uint8")}
mod, params = _get_tflite_model(model_path, inputs_dict=inputs)
return mod, params, inputs
_build_and_run_network(
*get_model(),
device=device,
tvm_ops=3,
acl_partitions=30,
atol=10,
rtol=0,
)
def test_squeezenet():
Device.load("test_config.json")
if skip_runtime_test():
return
try:
import tvm.relay.testing.tf as tf_testing
except ImportError:
pytest.skip("Missing TF Support")
device = Device()
def get_model():
model_path = tf_testing.get_workload_official(
"https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/squeezenet_2018_04_27.tgz",
"squeezenet.tflite",
)
inputs = {"Placeholder": ((1, 224, 224, 3), "float32")}
mod, params = _get_tflite_model(model_path, inputs_dict=inputs)
return mod, params, inputs
_build_and_run_network(
*get_model(),
device=device,
tvm_ops=9,
acl_partitions=31,
atol=8,
rtol=0,
)
if __name__ == "__main__":
test_vgg16()
test_mobilenet()
test_quantized_mobilenet()
test_squeezenet()
| 6,771 | 28.189655 | 135 | py |
tvm | tvm-main/tests/python/contrib/test_hexagon/metaschedule_e2e/export_models.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Hexagon MetaSchedule test helper functions."""
import torch
from torchvision.models import resnet
from torchvision.models.quantization import resnet as qresnet
import tvm
from tvm import relay
def export_resnet50_fp16():
"""Export Resnet50 FP16."""
model = resnet.resnet50(pretrained=True).eval()
pt_inp = torch.randn(1, 3, 224, 224)
script_module = torch.jit.trace(model, pt_inp).eval()
input_name = "image"
input_shapes = [(input_name, pt_inp.shape)]
mod, params = relay.frontend.from_pytorch(script_module, input_shapes)
mod = relay.transform.ToMixedPrecision("float16")(mod)
with open("resnet50_fp16.json", "w") as file:
file.write(tvm.ir.save_json(mod))
with open("resnet50_fp16.params", "wb") as file:
file.write(relay.save_param_dict(params))
def export_resnet50_int8():
"""Export Resnet50 INT8."""
def quantize_model(model, inp):
model.fuse_model()
model.qconfig = torch.quantization.get_default_qconfig("fbgemm")
torch.quantization.prepare(model, inplace=True)
model(inp)
torch.quantization.convert(model, inplace=True)
model = qresnet.resnet50(pretrained=True).eval()
pt_inp = torch.randn(1, 3, 224, 224)
quantize_model(model, pt_inp)
script_module = torch.jit.trace(model, pt_inp).eval()
input_name = "image"
input_shapes = [(input_name, pt_inp.shape)]
mod, params = relay.frontend.from_pytorch(
script_module, input_shapes, keep_quantized_weight=True
)
with open("resnet50_int8.json", "w") as file:
file.write(tvm.ir.save_json(mod))
with open("resnet50_int8.params", "wb") as file:
file.write(relay.save_param_dict(params))
if __name__ == "__main__":
export_resnet50_fp16()
export_resnet50_int8()
| 2,593 | 31.425 | 74 | py |
tvm | tvm-main/tests/python/contrib/test_ethosu/test_legalize.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
import pytest
pytest.importorskip("ethosu.vela")
import math
import numpy as np
import tensorflow as tf
import tflite.Model
import tvm
from tvm import relay
from tvm.relay.backend.contrib.ethosu import legalize, preprocess
from tvm.relay import dataflow_pattern
from tvm.relay.op.contrib import ethosu
from tvm.relay.backend.contrib.ethosu import util, codegen
from tvm.relay.build_module import bind_params_by_name
from tvm.relay.frontend.tflite import get_pad_value
from tvm.relay.expr_functor import ExprVisitor
from . import infra
def partition_ethosu_by_table(mod, pattern_table):
"""In case only the legalization part is supported for an operator, we don't
want to add the operator's pattern to the pattern table so that the compiler
wouldn't attempt to offload an operator without full stack support."""
mod = relay.transform.InferType()(mod)
mod = mod = codegen.replicate_pads(mod)
mod = relay.transform.InferType()(mod)
mod = relay.transform.MergeComposite(pattern_table)(mod)
mod = relay.transform.AnnotateTarget("ethos-u")(mod)
mod = relay.transform.MergeCompilerRegions()(mod)
mod = relay.transform.InferType()(mod)
mod = relay.transform.PartitionGraph()(mod)
mod = relay.transform.InferType()(mod)
mod = preprocess.preprocess_ext_io()(mod)
return mod
def relu_n1_to_1(x):
"""
The specific pattern will be replaced into RELU_N1_TO_1 by tflite.
"""
return tf.math.maximum(-1.0, tf.math.minimum(x, 1.0))
def test_split_indices_legalize():
def create_graph(axis):
x = relay.var("x", shape=(1, 50, 50, 3))
x_relu = relay.nn.relu(x)
split_output = relay.split(x_relu, [5, 20, 45], axis).tuple_value
return relay.Function([x], split_output)
def expected_mod_axis1():
expected_ir_string = """
#[version = "0.0.5"]
def @tvmgen_default_ethos_u_main_0(%x: Tensor[(1, 50, 50, 3), float32]) -> (Tensor[(1, 5, 50, 3), float32],\
Tensor[(1, 15, 50, 3), float32],\
Tensor[(1, 25, 50, 3), float32],\
Tensor[(1, 5, 50, 3), float32]) {
%0 = nn.relu(%x) /* ty=Tensor[(1, 50, 50, 3), float32] */;
%1 = strided_slice(%0, begin=[0, 0, 0, 0], end=[1, 5, 50, 3], strides=[1], axes=None)\
/* ty=Tensor[(1, 5, 50, 3), float32] */;
%2 = strided_slice(%0, begin=[0, 5, 0, 0], end=[1, 20, 50, 3], strides=[1], axes=None)\
/* ty=Tensor[(1, 15, 50, 3), float32] */;
%3 = strided_slice(%0, begin=[0, 20, 0, 0], end=[1, 45, 50, 3], strides=[1], axes=None)\
/* ty=Tensor[(1, 25, 50, 3), float32] */;
%4 = strided_slice(%0, begin=[0, 45, 0, 0], end=[1, 50, 50, 3], strides=[1], axes=None)\
/* ty=Tensor[(1, 5, 50, 3), float32] */;
(%1, %2, %3, %4)
}
"""
return tvm.relay.fromtext(expected_ir_string)
def expected_mod_axis2():
expected_ir_string = """
#[version = "0.0.5"]
def @tvmgen_default_ethos_u_main_0(%x: Tensor[(1, 50, 50, 3), float32]) -> (Tensor[(1, 50, 5, 3), float32],\
Tensor[(1, 50, 15, 3), float32],\
Tensor[(1, 50, 25, 3), float32],\
Tensor[(1, 50, 5, 3), float32]) {
%0 = nn.relu(%x) /* ty=Tensor[(1, 50, 50, 3), float32] */;
%1 = strided_slice(%0, begin=[0, 0, 0, 0], end=[1, 50, 5, 3], strides=[1], axes=None)\
/* ty=Tensor[(1, 50, 5, 3), float32] */;
%2 = strided_slice(%0, begin=[0, 0, 5, 0], end=[1, 50, 20, 3], strides=[1], axes=None)\
/* ty=Tensor[(1, 50, 15, 3), float32] */;
%3 = strided_slice(%0, begin=[0, 0, 20, 0], end=[1, 50, 45, 3], strides=[1], axes=None)\
/* ty=Tensor[(1, 50, 25, 3), float32] */;
%4 = strided_slice(%0, begin=[0, 0, 45, 0], end=[1, 50, 50, 3], strides=[1], axes=None)\
/* ty=Tensor[(1, 50, 5, 3), float32] */;
(%1, %2, %3, %4)
}
"""
return tvm.relay.fromtext(expected_ir_string)
rewrite_split = [legalize.PartitionedSplitRewriter(), legalize.SplitRewriter()]
mod_axis1 = tvm.IRModule()
func = create_graph(1)
for r in rewrite_split:
func = dataflow_pattern.rewrite(r, func)
mod_axis1["tvmgen_default_ethos_u_main_0"] = func
expected_axis1 = expected_mod_axis1()
tvm.ir.assert_structural_equal(mod_axis1, expected_axis1)
mod_axis2 = tvm.IRModule()
func = create_graph(2)
for r in rewrite_split:
func = dataflow_pattern.rewrite(r, func)
mod_axis2["tvmgen_default_ethos_u_main_0"] = func
expected_axis2 = expected_mod_axis2()
tvm.ir.assert_structural_equal(mod_axis2, expected_axis2)
def test_split_sections_legalize():
def create_graph(axis, sections):
x = relay.var("x", shape=(1, 50, 50, 3))
x_abs = relay.abs(x)
split_output = relay.split(x_abs, sections, axis).tuple_value
outputs = list()
for section_idx in range(sections):
split_single_out = relay.TupleGetItem(split_output, section_idx)
tanh = relay.tanh(split_single_out)
outputs.append(tanh)
tuple_out = relay.Tuple(outputs)
return relay.Function([x], tuple_out)
def expected_mod_axis1():
expected_ir_string = """
#[version = "0.0.5"]
def @tvmgen_default_ethos_u_main_0(%x: Tensor[(1, 50, 50, 3), float32]) -> (Tensor[(1, 10, 50, 3), float32],\
Tensor[(1, 10, 50, 3), float32],\
Tensor[(1, 10, 50, 3), float32],\
Tensor[(1, 10, 50, 3), float32],\
Tensor[(1, 10, 50, 3), float32]) {
%0 = abs(%x) /* ty=Tensor[(1, 50, 50, 3), float32] */;
%1 = strided_slice(%0, begin=[0, 0, 0, 0], end=[1, 10, 50, 3], strides=[1], axes=None)\
/* ty=Tensor[(1, 10, 50, 3), float32] */;
%2 = strided_slice(%0, begin=[0, 10, 0, 0], end=[1, 20, 50, 3], strides=[1], axes=None)\
/* ty=Tensor[(1, 10, 50, 3), float32] */;
%3 = strided_slice(%0, begin=[0, 20, 0, 0], end=[1, 30, 50, 3], strides=[1], axes=None)\
/* ty=Tensor[(1, 10, 50, 3), float32] */;
%4 = strided_slice(%0, begin=[0, 30, 0, 0], end=[1, 40, 50, 3], strides=[1], axes=None)\
/* ty=Tensor[(1, 10, 50, 3), float32] */;
%5 = strided_slice(%0, begin=[0, 40, 0, 0], end=[1, 50, 50, 3], strides=[1], axes=None)\
/* ty=Tensor[(1, 10, 50, 3), float32] */;
%6 = (%1, %2, %3, %4, %5);
%7 = %6.0;
%8 = tanh(%7) /* ty=Tensor[(1, 10, 50, 3), float32] */;
%9 = %6.1;
%10 = tanh(%9) /* ty=Tensor[(1, 10, 50, 3), float32] */;
%11 = %6.2;
%12 = tanh(%11) /* ty=Tensor[(1, 10, 50, 3), float32] */;
%13 = %6.3;
%14 = tanh(%13) /* ty=Tensor[(1, 10, 50, 3), float32] */;
%15 = %6.4;
%16 = tanh(%15) /* ty=Tensor[(1, 10, 50, 3), float32] */;
(%8, %10, %12, %14, %16)
}
"""
return tvm.relay.fromtext(expected_ir_string)
def expected_mod_axis2():
expected_ir_string = """
#[version = "0.0.5"]
def @tvmgen_default_ethos_u_main_0(%x: Tensor[(1, 50, 50, 3), float32]) -> (Tensor[(1, 50, 10, 3), float32],\
Tensor[(1, 50, 10, 3), float32],\
Tensor[(1, 50, 10, 3), float32],\
Tensor[(1, 50, 10, 3), float32],\
Tensor[(1, 50, 10, 3), float32]) {
%0 = abs(%x) /* ty=Tensor[(1, 50, 50, 3), float32] */;
%1 = strided_slice(%0, begin=[0, 0, 0, 0], end=[1, 50, 10, 3], strides=[1], axes=None)\
/* ty=Tensor[(1, 50, 10, 3), float32] */;
%2 = strided_slice(%0, begin=[0, 0, 10, 0], end=[1, 50, 20, 3], strides=[1], axes=None)\
/* ty=Tensor[(1, 50, 10, 3), float32] */;
%3 = strided_slice(%0, begin=[0, 0, 20, 0], end=[1, 50, 30, 3], strides=[1], axes=None)\
/* ty=Tensor[(1, 50, 10, 3), float32] */;
%4 = strided_slice(%0, begin=[0, 0, 30, 0], end=[1, 50, 40, 3], strides=[1], axes=None)\
/* ty=Tensor[(1, 50, 10, 3), float32] */;
%5 = strided_slice(%0, begin=[0, 0, 40, 0], end=[1, 50, 50, 3], strides=[1], axes=None)\
/* ty=Tensor[(1, 50, 10, 3), float32] */;
%6 = (%1, %2, %3, %4, %5);
%7 = %6.0;
%8 = tanh(%7) /* ty=Tensor[(1, 50, 10, 3), float32] */;
%9 = %6.1;
%10 = tanh(%9) /* ty=Tensor[(1, 50, 10, 3), float32] */;
%11 = %6.2;
%12 = tanh(%11) /* ty=Tensor[(1, 50, 10, 3), float32] */;
%13 = %6.3;
%14 = tanh(%13) /* ty=Tensor[(1, 50, 10, 3), float32] */;
%15 = %6.4;
%16 = tanh(%15) /* ty=Tensor[(1, 50, 10, 3), float32] */;
(%8, %10, %12, %14, %16)
}
"""
return tvm.relay.fromtext(expected_ir_string)
rewrite_split = [legalize.PartitionedSplitRewriter(), legalize.SplitRewriter()]
mod_axis1 = tvm.IRModule()
func = create_graph(1, 5)
for r in rewrite_split:
func = dataflow_pattern.rewrite(r, func)
mod_axis1["tvmgen_default_ethos_u_main_0"] = func
expected_axis1 = expected_mod_axis1()
tvm.ir.assert_structural_equal(mod_axis1, expected_axis1)
mod_axis2 = tvm.IRModule()
func = create_graph(2, 5)
for r in rewrite_split:
func = dataflow_pattern.rewrite(r, func)
mod_axis2["tvmgen_default_ethos_u_main_0"] = func
expected_axis2 = expected_mod_axis2()
tvm.ir.assert_structural_equal(mod_axis2, expected_axis2)
INVERSE_LAYOUT_TRANSFORM_OHWI_MAP = {
"HWIO": [1, 2, 3, 0],
"HWOI": [1, 2, 0, 3],
"OWHI": [0, 1, 2, 3],
}
@pytest.mark.parametrize("ifm_shape", [(1, 299, 299, 3), (1, 55, 55, 3)])
@pytest.mark.parametrize("kernel_shape", [(3, 2), (1, 3)])
@pytest.mark.parametrize("padding", ["SAME", "VALID"])
@pytest.mark.parametrize("strides, dilation", [((1, 1), (2, 1)), ((3, 2), (1, 1))])
@pytest.mark.parametrize("activation", [None, "RELU"])
def test_tflite_conv2d_legalize(ifm_shape, kernel_shape, padding, strides, dilation, activation):
dtype = "int8"
def create_tflite_graph_single():
class Model(tf.Module):
@tf.function
def tf_function(self, input_shape):
op = tf.nn.conv2d(
input_shape,
filters=tf.constant(
np.random.uniform(size=(kernel_shape[0], kernel_shape[1], 3, 3)),
dtype=tf.float32,
),
strides=strides,
padding=padding,
data_format="NHWC",
dilations=dilation,
)
if activation:
op = tf.nn.relu(op)
return op
model = Model()
concrete_func = model.tf_function.get_concrete_function(
tf.TensorSpec(ifm_shape, dtype=tf.float32)
)
# Convert the model
def representative_dataset():
for _ in range(100):
data = np.random.rand(*tuple(ifm_shape))
yield [data.astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model = converter.convert()
return tflite_model
def verify(ext_func):
op = ext_func.body
ofm_channels = op.attrs.ofm_channels
# check IFM
ifm = op.args[0].checked_type
assert list(ifm.shape) == list(ifm_shape)
assert str(ifm.dtype) == dtype
assert ifm.shape[3] == ofm_channels
# check OFM
ofm = op.checked_type
expected_ofm_shape = infra.compute_ofm_shape(
ifm_shape, padding, kernel_shape, strides, dilation
)
assert list(ofm.shape) == list(expected_ofm_shape)
assert str(ofm.dtype) == dtype
assert ofm.shape[3] == ofm_channels
# check weights
weights_ohwi = op.args[1].data.asnumpy()
assert str(weights_ohwi.dtype) == dtype
assert weights_ohwi.shape[0] == ofm_channels
assert weights_ohwi.shape[1] == kernel_shape[0]
assert weights_ohwi.shape[2] == kernel_shape[1]
assert weights_ohwi.shape[3] == 3
# Check that scale_bias matches weight tensor
assert list(op.args[2].checked_type.shape)[0] == ofm_channels
expected_padding = infra.compute_padding_shape(
ifm_shape,
expected_ofm_shape,
padding,
(kernel_shape[0], kernel_shape[1]),
strides,
dilation,
)
assert list(op.attrs.padding) == list(expected_padding)
assert list(op.attrs.strides) == list(strides)
assert list(op.attrs.dilation) == list(dilation)
if activation == "RELU":
assert str(op.attrs.activation) == "CLIP"
conv2d_pattern_table = [
(
ethosu.QnnConv2DParams.composite_name,
ethosu.qnn_conv2d_pattern(),
lambda pat: ethosu.QnnConv2DParams(pat).is_valid(),
)
]
tflite_graph = create_tflite_graph_single()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
mod, conv_params = relay.frontend.from_tflite(
tflite_model,
shape_dict={"input": ifm_shape},
dtype_dict={"input": dtype},
)
mod["main"] = bind_params_by_name(mod["main"], conv_params)
mod = partition_ethosu_by_table(mod, conv2d_pattern_table)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
legalize.Conv2DRewriter(), mod["tvmgen_default_ethos_u_main_0"]
)
verify(mod["tvmgen_default_ethos_u_main_0"])
def test_tflite_conv2d_with_separate_padding_legalize():
dtype = "int8"
ifm_shape = (1, 55, 34, 3)
kernel_shape = (3, 2)
strides = (1, 1)
dilation = (2, 1)
padding = (0, 0, 1, 1)
def create_tflite_graph_single():
class Model(tf.Module):
@tf.function
def tf_function(self, x):
tf_strides = [1, strides[0], strides[1], 1]
op = tf.pad(
x,
[[0, 0], [padding[0], padding[2]], [padding[1], padding[3]], [0, 0]],
"CONSTANT",
)
weight_shape = [kernel_shape[0], kernel_shape[1], ifm_shape[3], 3]
weight = tf.constant(np.random.uniform(size=weight_shape), dtype=tf.float32)
return tf.nn.conv2d(
op,
weight,
strides=tf_strides,
padding="VALID",
dilations=dilation,
)
model = Model()
concrete_func = model.tf_function.get_concrete_function(
tf.TensorSpec(ifm_shape, dtype=tf.float32)
)
# Convert the model
def representative_dataset():
for _ in range(100):
data = np.random.rand(*tuple(ifm_shape))
yield [data.astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model = converter.convert()
return tflite_model
def verify(ext_func):
op = ext_func.body
ofm_channels = op.attrs.ofm_channels
# check IFM
ifm = op.args[0].checked_type
assert list(ifm.shape) == list(ifm_shape)
assert str(ifm.dtype) == dtype
assert ifm.shape[3] == ofm_channels
# check OFM
ofm = op.checked_type
expected_ofm_shape = infra.compute_ofm_shape(
ifm_shape, padding, kernel_shape, strides, dilation
)
assert list(ofm.shape) == list(expected_ofm_shape)
assert str(ofm.dtype) == dtype
assert ofm.shape[3] == ofm_channels
# check weights
weights_ohwi = op.args[1].data.asnumpy()
assert str(weights_ohwi.dtype) == dtype
assert weights_ohwi.shape[0] == ofm_channels
assert weights_ohwi.shape[1] == kernel_shape[0]
assert weights_ohwi.shape[2] == kernel_shape[1]
assert weights_ohwi.shape[3] == 3
# Check that scale_bias matches weight tensor
assert list(op.args[2].checked_type.shape)[0] == ofm_channels
assert list(op.attrs.padding) == list(padding)
assert list(op.attrs.strides) == list(strides)
assert list(op.attrs.dilation) == list(dilation)
conv2d_pattern_table = [
(
ethosu.QnnConv2DParams.composite_name,
ethosu.qnn_conv2d_pattern(),
lambda pat: ethosu.QnnConv2DParams(pat).is_valid(),
)
]
tflite_graph = create_tflite_graph_single()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
mod, conv_params = relay.frontend.from_tflite(
tflite_model,
shape_dict={"input": ifm_shape},
dtype_dict={"input": dtype},
)
mod["main"] = bind_params_by_name(mod["main"], conv_params)
mod = partition_ethosu_by_table(mod, conv2d_pattern_table)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
legalize.Conv2DRewriter(), mod["tvmgen_default_ethos_u_main_0"]
)
verify(mod["tvmgen_default_ethos_u_main_0"])
def test_tflite_conv2d_with_separate_channel_padding_legalize():
dtype = "int8"
ifm_shape = (1, 55, 34, 3)
kernel_shape = (3, 2)
strides = (1, 1)
dilation = (2, 1)
padding_ch = (1, 1)
class ArePadOnGraph(ExprVisitor):
"""
Visits the Graph recursively and checks if it contains 'nn.pad' op
"""
def __init__(self):
ExprVisitor.__init__(self)
self.on_graph = False
def visit_call(self, call):
if isinstance(call.op, tvm.ir.Op):
if str(call.op.name) == "nn.pad":
self.on_graph = True
return super().visit_call(call)
def are_pad_on_graph(self, subgraph) -> bool:
"""
This function recursively visits the graph and checks if 'nn.pad' op is on graph
"""
self.visit(subgraph)
return self.on_graph
def create_tflite_graph():
class Model(tf.Module):
@tf.function
def tf_function(self, x):
tf_strides = [1, strides[0], strides[1], 1]
op = tf.pad(
x,
[[0, 0], [0, 0], [0, 0], [padding_ch[0], padding_ch[1]]],
"CONSTANT",
)
# HWIO
weight_shape = [
kernel_shape[0],
kernel_shape[1],
ifm_shape[3] + padding_ch[0] + padding_ch[1],
3,
]
weight = tf.constant(np.random.uniform(size=weight_shape), dtype=tf.float32)
return tf.nn.conv2d(
op,
weight,
strides=tf_strides,
padding="VALID",
dilations=dilation,
)
model = Model()
concrete_func = model.tf_function.get_concrete_function(
tf.TensorSpec(ifm_shape, dtype=tf.float32)
)
# Convert the model
def representative_dataset():
for _ in range(100):
data = np.random.rand(*tuple(ifm_shape))
yield [data.astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model = converter.convert()
return tflite_model
def verify(ext_func):
assert ArePadOnGraph().are_pad_on_graph(ext_func.body) == True
conv2d_pattern_table = [
(
ethosu.ChannelPadParams.composite_name,
ethosu.pad_pattern(),
lambda pat: ethosu.ChannelPadParams(pat).is_valid(),
),
(
ethosu.QnnConv2DParams.composite_name,
ethosu.qnn_conv2d_pattern(),
lambda pat: ethosu.QnnConv2DParams(pat).is_valid(),
),
]
tflite_graph = create_tflite_graph()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
mod, conv_params = relay.frontend.from_tflite(
tflite_model,
shape_dict={"input": ifm_shape},
dtype_dict={"input": dtype},
)
mod["main"] = bind_params_by_name(mod["main"], conv_params)
mod = partition_ethosu_by_table(mod, conv2d_pattern_table)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
legalize.Conv2DRewriter(), mod["tvmgen_default_ethos_u_main_0"]
)
verify(mod["tvmgen_default_ethos_u_main_0"])
@pytest.mark.parametrize("ifm_shape", [(1, 299, 299, 3), (1, 123, 17, 7)])
@pytest.mark.parametrize("kernel_shape", [(7, 3), (22, 5)])
@pytest.mark.parametrize("padding", ["SAME", "VALID"])
@pytest.mark.parametrize("strides, dilation", [((1, 1), (2, 1)), ((3, 2), (1, 1))])
@pytest.mark.parametrize("activation", ["RELU", None])
def test_tflite_depthwise_conv_2d_legalize(
ifm_shape, kernel_shape, padding, strides, dilation, activation
):
dtype = "int8"
def create_tflite_graph():
class Model(tf.Module):
@tf.function
def depthwise_conv2d(self, x):
weight_shape = [kernel_shape[0], kernel_shape[1], ifm_shape[3], 1]
weight = tf.constant(np.random.uniform(size=weight_shape), dtype=tf.float32)
# The input strides to the TensorFlow API needs to be of shape 1x4
tf_strides = [1, strides[0], strides[1], 1]
op = tf.nn.depthwise_conv2d(
x, weight, strides=tf_strides, padding=padding, dilations=dilation
)
if activation:
op = tf.nn.relu(op)
return op
model = Model()
concrete_func = model.depthwise_conv2d.get_concrete_function(
tf.TensorSpec(ifm_shape, dtype=tf.float32)
)
# Convert the model
def representative_dataset():
for _ in range(100):
data = np.random.rand(*tuple(ifm_shape))
yield [data.astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model = converter.convert()
return tflite_model
def verify(ext_func):
op = ext_func.body
ofm_channels = op.attrs.ofm_channels
# check IFM
ifm = op.args[0].checked_type
assert list(ifm.shape) == list(ifm_shape)
assert str(ifm.dtype) == dtype
assert ifm.shape[3] == ofm_channels
# check OFM
ofm = op.checked_type
expected_ofm_shape = infra.compute_ofm_shape(
ifm_shape, padding, kernel_shape, strides, dilation
)
assert list(ofm.shape) == list(expected_ofm_shape)
assert str(ofm.dtype) == dtype
assert ofm.shape[3] == ofm_channels
# check weights
weights_ohwi = op.args[1].data.asnumpy()
assert str(weights_ohwi.dtype) == dtype
assert weights_ohwi.shape[0] == ofm_channels
assert weights_ohwi.shape[1] == kernel_shape[0]
assert weights_ohwi.shape[2] == kernel_shape[1]
assert weights_ohwi.shape[3] == 1 # only depth multiplier 1 is supported
# Check that scale_bias matches weight tensor
assert list(op.args[2].checked_type.shape)[0] == ofm_channels
expected_padding = infra.compute_padding_shape(
ifm_shape, expected_ofm_shape, padding, kernel_shape, strides, dilation
)
assert list(op.attrs.padding) == list(expected_padding)
assert op.attrs.ofm_channels == ofm_channels
assert list(op.attrs.strides) == list(strides)
assert list(op.attrs.dilation) == list(dilation)
if activation == "RELU":
assert str(op.attrs.activation) == "CLIP"
depthwise_pattern_table = [
(
ethosu.QnnDepthwiseConv2DParams.composite_name,
ethosu.qnn_depthwise_conv2d_pattern(),
lambda pat: ethosu.QnnDepthwiseConv2DParams(pat).is_valid(),
)
]
tflite_graph = create_tflite_graph()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
mod, params = relay.frontend.from_tflite(
tflite_model,
shape_dict={"input": ifm_shape},
dtype_dict={"input": dtype},
)
mod["main"] = bind_params_by_name(mod["main"], params)
mod = partition_ethosu_by_table(mod, depthwise_pattern_table)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
legalize.DepthwiseConv2DRewriter(), mod["tvmgen_default_ethos_u_main_0"]
)
verify(mod["tvmgen_default_ethos_u_main_0"])
def test_tflite_depthwise_conv2d_with_separate_padding_legalize():
dtype = "int8"
ifm_shape = (1, 23, 32, 7)
kernel_shape = (1, 2)
strides = (3, 2)
dilation = (1, 1)
padding = (0, 0, 1, 1)
def create_tflite_graph():
class Model(tf.Module):
@tf.function
def tf_function(self, x):
tf_strides = [1, strides[0], strides[1], 1]
op = tf.pad(
x,
[[0, 0], [padding[0], padding[2]], [padding[1], padding[3]], [0, 0]],
"CONSTANT",
)
weight_shape = [kernel_shape[0], kernel_shape[1], ifm_shape[3], 1]
weight = tf.constant(np.random.uniform(size=weight_shape), dtype=tf.float32)
return tf.nn.depthwise_conv2d(
op,
weight,
strides=tf_strides,
padding="VALID",
dilations=dilation,
)
model = Model()
concrete_func = model.tf_function.get_concrete_function(
tf.TensorSpec(ifm_shape, dtype=tf.float32)
)
# Convert the model
def representative_dataset():
for _ in range(100):
data = np.random.rand(*tuple(ifm_shape))
yield [data.astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model = converter.convert()
return tflite_model
def verify(ext_func):
op = ext_func.body
ofm_channels = op.attrs.ofm_channels
# check IFM
ifm = op.args[0].checked_type
assert list(ifm.shape) == list(ifm_shape)
assert str(ifm.dtype) == dtype
assert ifm.shape[3] == ofm_channels
# check OFM
ofm = op.checked_type
expected_ofm_shape = infra.compute_ofm_shape(
ifm_shape, padding, kernel_shape, strides, dilation
)
assert list(ofm.shape) == list(expected_ofm_shape)
assert str(ofm.dtype) == dtype
assert ofm.shape[3] == ofm_channels
# check weights
weights_ohwi = op.args[1].data.asnumpy()
assert str(weights_ohwi.dtype) == dtype
assert weights_ohwi.shape[0] == ofm_channels
assert weights_ohwi.shape[1] == kernel_shape[0]
assert weights_ohwi.shape[2] == kernel_shape[1]
assert weights_ohwi.shape[3] == 1 # only depth multiplier 1 is supported
# Check that scale_bias matches weight tensor
assert list(op.args[2].checked_type.shape)[0] == ofm_channels
assert list(op.attrs.padding) == list(padding)
assert op.attrs.ofm_channels == ofm_channels
assert list(op.attrs.strides) == list(strides)
assert list(op.attrs.dilation) == list(dilation)
depthwise_pattern_table = [
(
ethosu.QnnDepthwiseConv2DParams.composite_name,
ethosu.qnn_depthwise_conv2d_pattern(),
lambda pat: ethosu.QnnDepthwiseConv2DParams(pat).is_valid(),
)
]
tflite_graph = create_tflite_graph()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
mod, params = relay.frontend.from_tflite(
tflite_model,
shape_dict={"input": ifm_shape},
dtype_dict={"input": dtype},
)
mod["main"] = bind_params_by_name(mod["main"], params)
mod = partition_ethosu_by_table(mod, depthwise_pattern_table)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
legalize.DepthwiseConv2DRewriter(), mod["tvmgen_default_ethos_u_main_0"]
)
verify(mod["tvmgen_default_ethos_u_main_0"])
@pytest.mark.parametrize("ifm_shape", [(1, 55, 55, 3), (1, 23, 32, 7)])
@pytest.mark.parametrize("padding", [(0, 1, 0, 0), (1, 1, 1, 1), (1, 1, 5, 5)])
@pytest.mark.parametrize("const_value", [0, 5, 125, -5])
def test_tflite_separate_padding_legalize(ifm_shape, padding, const_value):
dtype = "int8"
kernel_shape = (1, 1)
strides = (1, 1)
dilation = (1, 1)
def create_tflite_graph():
class Model(tf.Module):
@tf.function
def tf_function(self, x):
return tf.pad(
x,
[[0, 0], [padding[0], padding[2]], [padding[1], padding[3]], [0, 0]],
"CONSTANT",
const_value,
)
model = Model()
concrete_func = model.tf_function.get_concrete_function(
tf.TensorSpec(ifm_shape, dtype=tf.float32)
)
# Convert the model
def representative_dataset():
for _ in range(100):
data = np.random.rand(*tuple(ifm_shape))
yield [data.astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model = converter.convert()
return tflite_model
def verify(ext_func):
op = ext_func.body
ofm_channels = op.attrs.ofm_channels
# check IFM
ifm = op.args[0].checked_type
assert list(ifm.shape) == list(ifm_shape)
assert str(ifm.dtype) == dtype
assert ifm.shape[3] == ofm_channels
# check OFM
ofm = op.checked_type
expected_ofm_shape = infra.compute_ofm_shape(
ifm_shape, padding, kernel_shape, strides, dilation
)
assert list(ofm.shape) == list(expected_ofm_shape)
assert str(ofm.dtype) == dtype
assert ofm.shape[3] == ofm_channels
# check weights
weights_ohwi = op.args[1].data.asnumpy()
assert str(weights_ohwi.dtype) == dtype
assert weights_ohwi.shape[0] == ofm_channels
assert weights_ohwi.shape[1] == kernel_shape[0]
assert weights_ohwi.shape[2] == kernel_shape[1]
assert weights_ohwi.shape[3] == 1 # only depth multiplier 1 is supported
# Check that scale_bias matches weight tensor
assert list(op.args[2].checked_type.shape)[0] == ofm_channels
assert list(op.attrs.padding) == list(padding)
assert op.attrs.ofm_channels == ofm_channels
assert list(op.attrs.strides) == list(strides)
assert list(op.attrs.dilation) == list(dilation)
pad_pattern_table = [
(
ethosu.PadParams.composite_name,
ethosu.pad_pattern(),
lambda pat: ethosu.PadParams(pat).is_valid(),
),
]
tflite_graph = create_tflite_graph()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
mod, params = relay.frontend.from_tflite(
tflite_model,
shape_dict={"input": ifm_shape},
dtype_dict={"input": dtype},
)
mod["main"] = bind_params_by_name(mod["main"], params)
mod = partition_ethosu_by_table(mod, pad_pattern_table)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
legalize.PadRewriter(), mod["tvmgen_default_ethos_u_main_0"]
)
verify(mod["tvmgen_default_ethos_u_main_0"])
@pytest.mark.parametrize("ifm_shape", [(1, 55, 55, 3), (1, 23, 32, 7)])
@pytest.mark.parametrize("channel_padding", [(0, 1), (1, 1), (5, 2)])
@pytest.mark.parametrize("const_value", [0, 5, 125, -5])
def test_tflite_separate_channel_padding_legalize(ifm_shape, channel_padding, const_value):
dtype = "int8"
padding = (0, 0, 0, 0)
class AreConcatenateOnGraph(ExprVisitor):
"""
Visits the Graph recursively and checks if it contains 'concatenate' op
"""
def __init__(self):
ExprVisitor.__init__(self)
self.on_graph = False
def visit_call(self, call):
if isinstance(call.op, tvm.ir.Op):
if str(call.op.name) == "concatenate":
self.on_graph = True
return super().visit_call(call)
def are_concatenate_on_graph(self, subgraph) -> bool:
"""
This function recursively visits the graph and checks if 'concatenate' op is on graph
"""
self.visit(subgraph)
return self.on_graph
def create_tflite_graph():
class Model(tf.Module):
@tf.function
def tf_function(self, x):
return tf.pad(
x,
[
[0, 0],
[padding[0], padding[2]],
[padding[1], padding[3]],
[channel_padding[0], channel_padding[1]],
],
"CONSTANT",
const_value,
)
model = Model()
concrete_func = model.tf_function.get_concrete_function(
tf.TensorSpec(ifm_shape, dtype=tf.float32)
)
# Convert the model
def representative_dataset():
for _ in range(100):
data = np.random.rand(*tuple(ifm_shape))
yield [data.astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model = converter.convert()
return tflite_model
def verify(ext_func, channel_padding):
op = ext_func.body
pad_before = 0
pad_after = 0
if channel_padding[0] == 0 and channel_padding[1] > 0:
pad_after = ext_func.body.args[0][1].args[0].checked_type.shape[3]
ifm = ext_func.body.args[0][0].args[0].checked_type
if channel_padding[0] > 0 and channel_padding[1] == 0:
pad_before = ext_func.body.args[0][0].args[0].checked_type.shape[3]
ifm = ext_func.body.args[0][1].args[0].checked_type
if channel_padding[0] > 0 and channel_padding[1] > 0:
pad_before = ext_func.body.args[0][0].args[0].checked_type.shape[3]
ifm = ext_func.body.args[0][1].args[0].checked_type
pad_after = ext_func.body.args[0][2].args[0].checked_type.shape[3]
# check IFM
assert list(ifm.shape) == list(ifm_shape)
assert str(ifm.dtype) == dtype
assert ifm.shape[3] == ifm_shape[3]
# check OFM
ofm = op.checked_type
expected_ofm_shape = list(ifm_shape)
expected_ofm_shape[3] = channel_padding[0] + ifm_shape[3] + channel_padding[1]
assert list(ofm.shape) == expected_ofm_shape
assert str(ofm.dtype) == dtype
# check padding
assert [pad_before, pad_after] == list(channel_padding)
# check if relay contains 'concatenate' op
assert AreConcatenateOnGraph().are_concatenate_on_graph(ext_func.body) == True
pad_pattern_table = [
(
ethosu.ChannelPadParams.composite_name,
ethosu.pad_pattern(),
lambda pat: ethosu.ChannelPadParams(pat).is_valid(),
),
]
tflite_graph = create_tflite_graph()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
mod, params = relay.frontend.from_tflite(
tflite_model,
shape_dict={"input": ifm_shape},
dtype_dict={"input": dtype},
)
mod["main"] = bind_params_by_name(mod["main"], params)
mod = partition_ethosu_by_table(mod, pad_pattern_table)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
legalize.ChannelPadRewriter(), mod["tvmgen_default_ethos_u_main_0"]
)
verify(mod["tvmgen_default_ethos_u_main_0"], channel_padding)
@pytest.mark.parametrize("pooling_type", ["MAX", "AVG"])
@pytest.mark.parametrize("ifm_shape", [[1, 3, 4, 3], [1, 4, 5, 2]])
@pytest.mark.parametrize(
"pool_shape, strides, activation_function, padding",
[([1, 2], [1, 2], "NONE", "SAME"), ([2, 3], [2, 3], "RELU", "VALID")],
)
def test_tflite_pool2d_legalize(
ifm_shape, pooling_type, strides, pool_shape, activation_function, padding
):
dtype = "int8"
def create_tflite_graph():
class Model(tf.Module):
@tf.function
def tf_function(self, x):
if pooling_type == "MAX":
op = tf.nn.max_pool(x, pool_shape, strides, padding)
elif pooling_type == "AVG":
op = tf.nn.avg_pool(x, pool_shape, strides, padding)
if activation_function == "RELU":
op = tf.nn.relu(op)
return op
model = Model()
concrete_func = model.tf_function.get_concrete_function(
tf.TensorSpec(ifm_shape, dtype=tf.float32)
)
# Convert the model
def representative_dataset():
for _ in range(100):
data = np.random.rand(*tuple(ifm_shape))
yield [data.astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model = converter.convert()
return tflite_model
def verify(ext_func):
ofm_shape = infra.compute_ofm_shape(ifm_shape, padding, pool_shape, strides)
op = ext_func.body
assert list(op.args[0].checked_type.shape) == ifm_shape
assert op.args[0].checked_type.dtype == dtype
assert list(op.checked_type.shape) == ofm_shape
assert op.checked_type.dtype == dtype
assert op.attrs.pooling_type == pooling_type
assert list(op.attrs.strides) == strides
assert list(op.attrs.padding) == infra.compute_padding_shape(
ifm_shape, ofm_shape, padding, pool_shape, strides
)
assert list(op.attrs.pool_shape) == pool_shape
assert op.attrs.ofm_channels == ifm_shape[3]
if activation_function == "RELU":
assert str(op.attrs.activation) == "CLIP"
if pooling_type == "MAX":
rewriter = legalize.MaxPoolingRewriter()
pattern_table = [
(
ethosu.MaxPool2DParams.composite_name,
ethosu.qnn_maxpool2d_pattern(),
lambda pat: ethosu.MaxPool2DParams(pat).is_valid(),
),
]
elif pooling_type == "AVG":
rewriter = legalize.AvgPoolingRewriter()
pattern_table = [
(
ethosu.AvgPool2DParams.composite_name,
ethosu.qnn_avgpool2d_pattern(),
lambda pat: ethosu.AvgPool2DParams(pat).is_valid(),
),
]
tflite_graph = create_tflite_graph()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
mod, _ = relay.frontend.from_tflite(
tflite_model,
shape_dict={"x": ifm_shape},
dtype_dict={"x": dtype},
)
mod = partition_ethosu_by_table(mod, pattern_table)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
rewriter, mod["tvmgen_default_ethos_u_main_0"]
)
verify(mod["tvmgen_default_ethos_u_main_0"])
@pytest.mark.parametrize("pooling_type", ["MAX", "AVG"])
@pytest.mark.parametrize(
"ifm_shape, pool_shape, strides, activation_function, padding",
[
([1, 4, 4, 3], [4, 4], [4, 4], "NONE", "SAME"),
([1, 4, 4, 3], [4, 4], [4, 4], "RELU", "VALID"),
([1, 25, 5, 64], [25, 5], [25, 5], "NONE", "VALID"),
([1, 25, 5, 64], [25, 5], [25, 5], "RELU", "SAME"),
],
)
def test_tflite_pool2d_same_ifm_and_kernel_shape_legalize(
pooling_type, ifm_shape, pool_shape, strides, activation_function, padding
):
dtype = "int8"
strides_legalized = [1, 1]
def create_tflite_graph():
class Model(tf.Module):
@tf.function
def tf_function(self, x):
if pooling_type == "MAX":
op = tf.nn.max_pool(x, pool_shape, strides, padding)
elif pooling_type == "AVG":
op = tf.nn.avg_pool(x, pool_shape, strides, padding)
if activation_function == "RELU":
op = tf.nn.relu(op)
return op
model = Model()
concrete_func = model.tf_function.get_concrete_function(
tf.TensorSpec(ifm_shape, dtype=tf.float32)
)
# Convert the model
def representative_dataset():
for _ in range(100):
data = np.random.rand(*tuple(ifm_shape))
yield [data.astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model = converter.convert()
return tflite_model
def expected_mod():
expected_ir_string = ""
if activation_function == "NONE" and pooling_type == "AVG":
expected_ir_string = f"""
#[version = "0.0.5"]
def @main(%x: Tensor[{str(tuple(ifm_shape))}, {dtype}], output_tensor_names=\
["Identity"]) -> Tensor[(1, 1, 1, {str(ifm_shape[3])}), {dtype}] {{
@tvmgen_default_ethos_u_main_0(%x)
}}
def @tvmgen_default_ethos_u_main_0(%y: Tensor[{str(tuple(ifm_shape))}, {dtype}], \
Compiler="ethos-u", Primitive=1, Inline=1, \
global_symbol="tvmgen_default_ethos_u_main_0") -> Tensor[(1, 1, 1, \
{str(ifm_shape[3])}), {dtype}] {{
%2 = fn (%z: Tensor[{str(tuple(ifm_shape))}, {dtype}], \
PartitionedFromPattern="cast_nn.avg_pool2d_cast_", \
Composite="ethos-u.avgpool2d") -> Tensor[(1, 1, 1, {str(ifm_shape[3])}), \
{dtype}] {{
%0 = cast(%z, dtype="int32") ;
%1 = nn.avg_pool2d(%0, pool_size={str(pool_shape)}, strides={str(strides)}, \
padding=[0, 0, 0, 0], layout="NHWC") ;
cast(%1, dtype="{dtype}")
}} ;
%2(%y)
}}
"""
if activation_function == "RELU" and pooling_type == "AVG":
expected_ir_string = f"""
#[version = "0.0.5"]
def @main(%x: Tensor[{str(tuple(ifm_shape))}, {dtype}], output_tensor_names=\
["Identity"]) -> Tensor[(1, 1, 1, {str(ifm_shape[3])}), {dtype}] {{
@tvmgen_default_ethos_u_main_0(%x)
}}
def @tvmgen_default_ethos_u_main_0(%y: Tensor[{str(tuple(ifm_shape))}, {dtype}], \
Compiler="ethos-u", Primitive=1, Inline=1, \
global_symbol="tvmgen_default_ethos_u_main_0") -> Tensor[(1, 1, 1, \
{str(ifm_shape[3])}), {dtype}] {{
%3 = fn (%z: Tensor[{str(tuple(ifm_shape))}, {dtype}], \
PartitionedFromPattern="cast_nn.avg_pool2d_cast_clip_", \
Composite="ethos-u.avgpool2d") -> Tensor[(1, 1, 1, {str(ifm_shape[3])}), \
{dtype}] {{
%0 = cast(%z, dtype="int32") ;
%1 = nn.avg_pool2d(%0, pool_size={str(pool_shape)}, strides={str(strides)}, \
padding=[0, 0, 0, 0], layout="NHWC") ;
%2 = cast(%1, dtype="{dtype}") ;
clip(%2, a_min=-128f, a_max=127f)
}} ;
%3(%y)
}}
"""
if activation_function == "NONE" and pooling_type == "MAX":
expected_ir_string = f"""
#[version = "0.0.5"]
def @main(%x: Tensor[{str(tuple(ifm_shape))}, {dtype}], output_tensor_names=\
["Identity"]) -> Tensor[(1, 1, 1, {str(ifm_shape[3])}), {dtype}] {{
@tvmgen_default_ethos_u_main_0(%x)
}}
def @tvmgen_default_ethos_u_main_0(%y: Tensor[{str(tuple(ifm_shape))}, {dtype}], \
Compiler="ethos-u", Primitive=1, Inline=1, \
global_symbol="tvmgen_default_ethos_u_main_0") -> Tensor[(1, 1, 1, \
{str(ifm_shape[3])}), {dtype}] {{
%0 = fn (%z: Tensor[{str(tuple(ifm_shape))}, {dtype}], \
PartitionedFromPattern="nn.max_pool2d_", \
Composite="ethos-u.maxpool2d") -> Tensor[(1, 1, 1, {str(ifm_shape[3])}), \
{dtype}] {{
nn.max_pool2d(%z, pool_size={str(pool_shape)}, strides={str(strides)}, \
padding=[0, 0, 0, 0], layout="NHWC")
}} ;
%0(%y)
}}
"""
if activation_function == "RELU" and pooling_type == "MAX":
expected_ir_string = f"""
#[version = "0.0.5"]
def @main(%x: Tensor[{str(tuple(ifm_shape))}, {dtype}] , output_tensor_names=\
["Identity"]) -> Tensor[(1, 1, 1, {str(ifm_shape[3])}), {dtype}] {{
@tvmgen_default_ethos_u_main_0(%x)
}}
def @tvmgen_default_ethos_u_main_0(%y: Tensor[{str(tuple(ifm_shape))}, {dtype}] , \
Compiler="ethos-u", Primitive=1, Inline=1, \
global_symbol="tvmgen_default_ethos_u_main_0") -> Tensor[(1, 1, 1, \
{str(ifm_shape[3])}), {dtype}] {{
%1 = fn (%z: Tensor[{str(tuple(ifm_shape))}, {dtype}] , \
PartitionedFromPattern="nn.max_pool2d_clip_", \
Composite="ethos-u.maxpool2d") -> Tensor[(1, 1, 1, {str(ifm_shape[3])}), \
{dtype}] {{
%0 = nn.max_pool2d(%z, pool_size={str(pool_shape)}, strides={str(strides)}, \
padding=[0, 0, 0, 0], layout="NHWC");
clip(%0, a_min=-128f, a_max=127f)
}};
%1(%y)
}}
"""
return tvm.relay.fromtext(expected_ir_string)
def verify(ext_func):
ofm_shape = infra.compute_ofm_shape(ifm_shape, padding, pool_shape, strides)
op = ext_func.body
assert list(op.args[0].checked_type.shape) == ifm_shape
assert op.args[0].checked_type.dtype == dtype
assert list(op.checked_type.shape) == ofm_shape
assert op.checked_type.dtype == dtype
assert op.attrs.pooling_type == pooling_type
assert list(op.attrs.strides) == strides_legalized
assert list(op.attrs.padding) == infra.compute_padding_shape(
ifm_shape, ofm_shape, padding, pool_shape, strides
)
assert list(op.attrs.padding) == infra.compute_padding_shape(
ifm_shape, ofm_shape, padding, pool_shape, strides_legalized
)
assert list(op.attrs.pool_shape) == pool_shape
assert op.attrs.ofm_channels == ifm_shape[3]
if activation_function == "RELU":
assert str(op.attrs.activation) == "CLIP"
if pooling_type == "MAX":
rewriter = legalize.MaxPoolingRewriter()
pattern_table = [
(
ethosu.MaxPool2DParams.composite_name,
ethosu.qnn_maxpool2d_pattern(),
lambda pat: ethosu.MaxPool2DParams(pat).is_valid(),
),
]
if pooling_type == "AVG":
rewriter = legalize.AvgPoolingRewriter()
pattern_table = [
(
ethosu.AvgPool2DParams.composite_name,
ethosu.qnn_avgpool2d_pattern(),
lambda pat: ethosu.AvgPool2DParams(pat).is_valid(),
),
]
tflite_graph = create_tflite_graph()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
mod, _ = relay.frontend.from_tflite(
tflite_model,
shape_dict={"x": ifm_shape},
dtype_dict={"x": dtype},
)
mod = partition_ethosu_by_table(mod, pattern_table)
expected = expected_mod()
tvm.ir.assert_structural_equal(mod, expected)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
rewriter, mod["tvmgen_default_ethos_u_main_0"]
)
verify(mod["tvmgen_default_ethos_u_main_0"])
@pytest.mark.parametrize("operator_type", ["ADD", "SUB", "MUL", "MIN", "MAX"])
@pytest.mark.parametrize(
"ifm_shape, ifm2_shape, reversed_operands",
[
([1, 2, 3, 4], [1, 2, 3, 4], False),
([1, 2, 3, 4], [1, 1, 3, 1], False),
([1, 1, 3, 1], [1, 2, 3, 4], True),
([1, 4, 4], [4, 1], False),
([4], [4], False),
([4], [1, 2, 3, 4], True),
([1, 4, 4], [4, 1], False),
],
)
@pytest.mark.parametrize("activation_function", [None, tf.nn.relu])
def test_tflite_binary_elemwise_legalize(
operator_type,
ifm_shape,
ifm2_shape,
reversed_operands,
activation_function,
):
np.random.seed(0)
dtype = "int8"
def create_tflite_graph():
class Model(tf.Module):
@tf.function
def tf_function(self, x, y):
if operator_type == "ADD":
op = tf.math.add(x, y)
elif operator_type == "SUB":
op = tf.math.subtract(x, y)
elif operator_type == "MUL":
op = tf.math.multiply(x, y)
elif operator_type == "MIN":
op = tf.math.minimum(x, y)
elif operator_type == "MAX":
op = tf.math.maximum(x, y)
if activation_function:
op = activation_function(op)
return op
model = Model()
concrete_func = model.tf_function.get_concrete_function(
tf.TensorSpec(ifm_shape, dtype=tf.float32), tf.TensorSpec(ifm2_shape, dtype=tf.float32)
)
# Convert the model
def representative_dataset():
for _ in range(100):
data = np.random.rand(*tuple(ifm_shape))
data2 = np.random.rand(*tuple(ifm2_shape)) * 2
yield [data.astype(np.float32), data2.astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model = converter.convert()
return tflite_model
def verify(ext_func):
out_shape = ifm2_shape if reversed_operands else ifm_shape
shapes = [ifm_shape, ifm2_shape]
ifm_index, ifm2_index = (1, 0) if reversed_operands else (0, 1)
op = ext_func.body
has_reshaped_output = False
has_separate_requantize = False
shapes_padded = [[1] * (4 - len(s)) + s for s in shapes]
out_padded = [1] * (4 - len(out_shape)) + out_shape
if op.op.name == "contrib.ethosu.identity":
op = op.args[0]
has_separate_requantize = True
if op.op.name == "reshape":
has_reshaped_output = True
op = op.args[0]
assert list(op.args[0].checked_type.shape) == shapes_padded[ifm_index]
assert list(op.args[1].checked_type.shape) == shapes_padded[ifm2_index]
assert op.args[0].checked_type.dtype == dtype
assert list(op.checked_type.shape) == out_padded
assert op.checked_type.dtype == dtype
assert op.attrs.operator_type == operator_type
assert op.attrs.reversed_operands == reversed_operands
if activation_function != None:
assert str(op.attrs.activation) == "CLIP"
if operator_type in ["MIN", "MAX"]:
if has_separate_requantize:
# In case when requantize cannot be fused with MIN/MAX + CLIP due to hardware constraints
# there should be default quantization values since requantize is separate operation.
assert float(op.attrs.ifm_scale) == 1.0
assert int(op.attrs.ifm_zero_point) == 0
assert float(op.attrs.ifm2_scale) == 1.0
assert int(op.attrs.ifm2_zero_point) == 0
assert float(op.attrs.ofm_scale) == 1.0
assert int(op.attrs.ofm_zero_point) == 0
else:
# MIN and MAX with an activation must have a requantize operation
# baked into the output. To check the extra requantize node was
# picked up by the pattern, we can make sure the quantization
# information is not default.
assert float(op.attrs.ifm_scale) != 1.0
assert int(op.attrs.ifm_zero_point) != 0
assert float(op.attrs.ifm2_scale) != 1.0
assert int(op.attrs.ifm2_zero_point) != 0
assert float(op.attrs.ofm_scale) != 1.0
assert int(op.attrs.ofm_zero_point) != 0
if has_reshaped_output:
assert list(ext_func.body.checked_type.shape) == out_shape
if operator_type == "ADD":
rewriter = legalize.AddRewriter()
pattern_table = [
(
ethosu.AddParams.composite_name,
ethosu.qnn_add_pattern(),
lambda pat: ethosu.AddParams(pat).is_valid(),
),
]
elif operator_type == "SUB":
rewriter = legalize.SubRewriter()
pattern_table = [
(
ethosu.SubParams.composite_name,
ethosu.qnn_subtract_pattern(),
lambda pat: ethosu.SubParams(pat).is_valid(),
),
]
elif operator_type == "MUL":
rewriter = legalize.MulRewriter()
pattern_table = [
(
ethosu.MulParams.composite_name,
ethosu.qnn_mul_pattern(),
lambda pat: ethosu.MulParams(pat).is_valid(),
),
]
elif operator_type == "MIN":
rewriter = [legalize.MinRewriter(), legalize.RequantizeRewriter()]
pattern_table = [
(
ethosu.MinParams.composite_name,
ethosu.minimum_clip_requantize_pattern(),
lambda pat: ethosu.MinParams(pat).is_valid(),
),
(
ethosu.MinParams.composite_name,
ethosu.minimum_pattern(),
lambda pat: ethosu.MinParams(pat).is_valid(),
),
(
ethosu.RequantizeParams.composite_name,
ethosu.requantize_pattern(),
lambda pat: ethosu.RequantizeParams(pat).is_valid(),
),
]
elif operator_type == "MAX":
rewriter = [legalize.MaxRewriter(), legalize.RequantizeRewriter()]
pattern_table = [
(
ethosu.MaxParams.composite_name,
ethosu.maximum_clip_requantize_pattern(),
lambda pat: ethosu.MaxParams(pat).is_valid(),
),
(
ethosu.MaxParams.composite_name,
ethosu.maximum_pattern(),
lambda pat: ethosu.MaxParams(pat).is_valid(),
),
(
ethosu.RequantizeParams.composite_name,
ethosu.requantize_pattern(),
lambda pat: ethosu.RequantizeParams(pat).is_valid(),
),
]
tflite_graph = create_tflite_graph()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
mod, _ = relay.frontend.from_tflite(
tflite_model,
shape_dict={"x": ifm_shape, "y": ifm2_shape},
dtype_dict={"x": dtype, "y": dtype},
)
mod = partition_ethosu_by_table(mod, pattern_table)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
rewriter, mod["tvmgen_default_ethos_u_main_0"]
)
verify(mod["tvmgen_default_ethos_u_main_0"])
# This test is for checking the case when requantize cannot be fused with MIN/MAX + CLIP due to hardware constraints.
def test_tflite_max_relu_n1_to_1_legalize():
ifm_shape = [1, 4, 8, 16]
test_tflite_binary_elemwise_legalize("MAX", ifm_shape, ifm_shape, False, relu_n1_to_1)
def test_binary_add_from_constant_scalar():
dtype = "uint8"
ifm_shape = (1, 4, 4, 8)
def create_graph():
inp = relay.var("input", shape=ifm_shape, dtype=dtype)
scalar = relay.const(np.ones((1, 1, 1, 1), dtype=dtype), dtype=dtype)
add = relay.qnn.op.add(
inp,
scalar,
relay.const(1.0, dtype="float32"),
relay.const(0, dtype="int32"),
relay.const(1.0, dtype="float32"),
relay.const(0, dtype="int32"),
relay.const(1.0, dtype="float32"),
relay.const(0, dtype="int32"),
)
func = relay.Function(relay.analysis.free_vars(add), add)
return tvm.IRModule.from_expr(func)
def verify(ext_func):
op = ext_func.body
assert list(op.args[0].checked_type.shape) == [1, 4, 4, 8]
assert list(op.args[1].checked_type.shape) == [1, 1, 1, 1]
assert op.args[0].checked_type.dtype == "uint8"
assert list(op.checked_type.shape) == [1, 4, 4, 8]
assert op.checked_type.dtype == "uint8"
assert op.attrs.operator_type == "ADD"
rewriter = legalize.AddRewriter()
pattern_table = [
(
ethosu.AddParams.composite_name,
ethosu.qnn_add_pattern(),
lambda pat: ethosu.AddParams(pat).is_valid(),
),
]
mod = create_graph()
mod = partition_ethosu_by_table(mod, pattern_table)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
rewriter, mod["tvmgen_default_ethos_u_main_0"]
)
verify(mod["tvmgen_default_ethos_u_main_0"])
@pytest.mark.parametrize(
"ifm_shape, ifm2_shape, reversed_operands",
[
([1, 2, 3, 4], [1, 2, 3, 4], False),
([1, 2, 3, 4], [1, 1, 3, 1], False),
([1, 1, 3, 1], [1, 2, 3, 4], True),
],
)
def test_ethosu_left_shift_binary_elemwise_legalize(ifm_shape, ifm2_shape, reversed_operands):
dtype = "int32"
operator_type = "SHL"
def create_graph():
input1 = relay.var("x1", shape=ifm_shape, dtype=dtype)
input2 = relay.var("x2", shape=ifm2_shape, dtype=dtype)
c1 = relay.left_shift(input1, input2)
f = relay.Function([input1, input2], c1)
mod = tvm.IRModule()
mod["main"] = f
return mod
def verify(ext_func):
out_shape = ifm2_shape if reversed_operands else ifm_shape
shapes = [ifm_shape, ifm2_shape]
ifm_index, ifm2_index = (1, 0) if reversed_operands else (0, 1)
op = ext_func.body
assert list(op.args[0].checked_type.shape) == shapes[ifm_index]
assert list(op.args[1].checked_type.shape) == shapes[ifm2_index]
assert op.args[0].checked_type.dtype == dtype
assert list(op.checked_type.shape) == out_shape
assert op.checked_type.dtype == dtype
assert op.attrs.operator_type == operator_type
assert op.attrs.reversed_operands == reversed_operands
assert str(op.attrs.activation) == "NONE"
rewriter = legalize.ShlRewriter()
pattern_table = [
(
ethosu.ShlParams.composite_name,
ethosu.shl_pattern(),
lambda pat: ethosu.ShlParams(pat).is_valid(),
),
]
mod = create_graph()
mod = partition_ethosu_by_table(mod, pattern_table)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
rewriter, mod["tvmgen_default_ethos_u_main_0"]
)
verify(mod["tvmgen_default_ethos_u_main_0"])
@pytest.mark.parametrize(
"ifm_shape, new_shape",
[
((1, 4, 1, 2), (4, 2)),
((1, 5, 1, 20), (100,)),
((12, 20), (1, 6, 4, 10)),
((30,), (10, 1, 3)),
],
)
def test_relay_reshape_legalize(ifm_shape, new_shape):
ifm = relay.var("ifm", shape=ifm_shape, dtype="int8")
reshape = relay.op.reshape(ifm, new_shape)
func = relay.Function([ifm], reshape)
mod = tvm.IRModule()
mod["main"] = func
mod = relay.transform.InferType()(mod)
reshape_pattern_table = [
(
ethosu.ReshapeParams.composite_name,
ethosu.reshape_pattern(),
lambda pat: ethosu.ReshapeParams(pat).is_valid(),
),
]
mod = partition_ethosu_by_table(mod, reshape_pattern_table)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
legalize.ReshapeRewriter(), mod["tvmgen_default_ethos_u_main_0"]
)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
legalize.NoOpRewriter(), mod["tvmgen_default_ethos_u_main_0"]
)
mod = relay.transform.InferType()(mod)
ext_func = mod["tvmgen_default_ethos_u_main_0"]
identity = ext_func.body
assert identity.op.name == "contrib.ethosu.identity"
# check that the reshape is still there
reshape = identity.args[0]
assert reshape.op.name == "reshape"
# check that identity's output shape matches reshape's output shape
assert tuple(identity.checked_type.shape) == new_shape
@pytest.mark.parametrize(
"ifm_shape, begin, size",
[
([1, 10, 50, 4], [0, 5, 11, 2], [1, 5, 11, 1]),
([15, 17, 3], [3, 0, 1], [8, 17, 2]),
([7, 6043], [0, 704], [1, 2860]),
([5000], [123], [2151]),
],
)
def test_tflite_slice(ifm_shape, begin, size):
dtype = "int8"
def create_tflite_graph():
class Model(tf.Module):
@tf.function
def slice_func(self, x):
return tf.slice(x, begin, size)
model = Model()
# Save the model
concrete_func = model.slice_func.get_concrete_function(
tf.TensorSpec(ifm_shape, dtype=tf.float32)
)
# Convert the model
def representative_dataset():
for _ in range(100):
data = np.random.rand(*tuple(ifm_shape))
yield [data.astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model = converter.convert()
return tflite_model
def verify(ext_func):
identity = ext_func.body
assert identity.op.name == "contrib.ethosu.identity"
# check that the strided_slice is still there
strided_slice = identity.args[0]
assert strided_slice.op.name == "strided_slice"
# check that identity's output shape matches strided slice's output shape
assert list(identity.checked_type.shape) == size
tflite_graph = create_tflite_graph()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
mod, _ = relay.frontend.from_tflite(
tflite_model,
shape_dict={"input": ifm_shape},
dtype_dict={"input": dtype},
)
strided_slice_pattern_table = [
(
ethosu.StridedSliceParams.composite_name,
ethosu.strided_slice_pattern(),
lambda pat: ethosu.StridedSliceParams(pat).is_valid(),
),
]
mod = partition_ethosu_by_table(mod, strided_slice_pattern_table)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
legalize.StridedSliceRewriter(), mod["tvmgen_default_ethos_u_main_0"]
)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
legalize.NoOpRewriter(), mod["tvmgen_default_ethos_u_main_0"]
)
mod = relay.transform.InferType()(mod)
verify(mod["tvmgen_default_ethos_u_main_0"])
@pytest.mark.parametrize(
"ifm_shape, begin, end",
[([1, 1, 5, 8], [0, 0, 0, 0], [1, 1, 2, 3]), ([1, 3, 3], [0, 1, 2], [1, 2, 3])],
)
def test_tflite_strided_slice(ifm_shape, begin, end):
dtype = "int8"
def create_tflite_graph():
class Model(tf.Module):
@tf.function
def strided_slice_func(self, x):
return tf.strided_slice(x, begin, end)
model = Model()
# Save the model
concrete_func = model.strided_slice_func.get_concrete_function(
tf.TensorSpec(ifm_shape, dtype=tf.float32)
)
# Convert the model
def representative_dataset():
for _ in range(100):
data = np.random.rand(*tuple(ifm_shape))
yield [data.astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model = converter.convert()
return tflite_model
def verify(ext_func):
identity = ext_func.body
assert identity.op.name == "contrib.ethosu.identity"
# check that the strided_slice is still there
strided_slice = identity.args[0]
assert strided_slice.op.name == "strided_slice"
# check that identity's output shape matches strided slice's output shape
size = list(np.array(end) - np.array(begin))
assert list(identity.checked_type.shape) == size
tflite_graph = create_tflite_graph()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
mod, _ = relay.frontend.from_tflite(
tflite_model,
shape_dict={"input": ifm_shape},
dtype_dict={"input": dtype},
)
strided_slice_pattern_table = [
(
ethosu.StridedSliceParams.composite_name,
ethosu.strided_slice_pattern(),
lambda pat: ethosu.StridedSliceParams(pat).is_valid(),
),
]
mod = partition_ethosu_by_table(mod, strided_slice_pattern_table)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
legalize.StridedSliceRewriter(), mod["tvmgen_default_ethos_u_main_0"]
)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
legalize.NoOpRewriter(), mod["tvmgen_default_ethos_u_main_0"]
)
mod = relay.transform.InferType()(mod)
verify(mod["tvmgen_default_ethos_u_main_0"])
@pytest.mark.parametrize("operator_type", ["ABS"])
@pytest.mark.parametrize(
"ifm_shape",
[[1, 2, 3, 4], [1, 7, 3], [8, 3, 1], [11, 22], [300]],
)
def test_tflite_unary_elemwise_legalize(
operator_type,
ifm_shape,
):
dtype = "int8"
def create_tflite_graph():
class Model(tf.Module):
@tf.function
def abs_func(self, x):
if operator_type == "ABS":
op = tf.math.abs(x)
return op
model = Model()
# Save the model
concrete_func = model.abs_func.get_concrete_function(
tf.TensorSpec(ifm_shape, dtype=tf.float32)
)
# Convert the model
def representative_dataset():
for _ in range(100):
data = np.random.rand(*tuple(ifm_shape))
yield [data.astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model = converter.convert()
return tflite_model
def verify(ext_func):
out_shape = ifm_shape
func_body = ext_func.body
# If we legalized the unary elementwise op into 4D
if func_body.op.name == "reshape":
reshape = func_body
unary = func_body.args[0]
reshape2 = unary.args[0]
# Check the input to the reshape
reshape2_in_shape = [i for i in reshape2.args[0].checked_type.shape]
assert reshape2_in_shape == ifm_shape
# Check that the unary elementwise operator is 4D after reshape
assert len(unary.checked_type.shape) == 4
assert unary.args[0].checked_type.dtype == dtype
# Check that the output of the graph has the same shape as input
reshape_out_shape = [i for i in reshape.checked_type.shape]
assert reshape_out_shape == ifm_shape
assert unary.attrs.operator_type == operator_type
else:
unary = func_body
# Check the IFM
assert list(unary.args[0].checked_type.shape) == ifm_shape
assert unary.args[0].checked_type.dtype == dtype
# Check the OFM
assert list(unary.checked_type.shape) == out_shape
assert unary.checked_type.dtype == dtype
# operator type check
assert unary.attrs.operator_type == operator_type
if operator_type == "ABS":
rewriter = legalize.AbsRewriter()
pattern_table = [
(
ethosu.AbsParams.composite_name,
ethosu.abs_pattern(),
lambda pat: ethosu.AbsParams(pat).is_valid(),
),
]
tflite_graph = create_tflite_graph()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
mod, _ = relay.frontend.from_tflite(
tflite_model,
shape_dict={"input": ifm_shape},
dtype_dict={"input": dtype},
)
mod = partition_ethosu_by_table(mod, pattern_table)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
rewriter, mod["tvmgen_default_ethos_u_main_0"]
)
verify(mod["tvmgen_default_ethos_u_main_0"])
def test_tflite_tanh_legalize():
dtype = "int8"
ifm_shape = (1, 241, 132, 7)
def create_tflite_graph():
class Model(tf.Module):
@tf.function
def tanh_func(self, x):
op = tf.math.tanh(x)
return op
model = Model()
concrete_func = model.tanh_func.get_concrete_function(
tf.TensorSpec(ifm_shape, dtype=tf.float32)
)
# Convert the model
def representative_dataset():
for _ in range(100):
data = np.random.rand(*tuple(ifm_shape))
yield [data.astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model = converter.convert()
return tflite_model
tflite_graph = create_tflite_graph()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
mod, params = relay.frontend.from_tflite(
tflite_model,
shape_dict={"input": ifm_shape},
dtype_dict={"input": dtype},
)
mod = ethosu.partition_for_ethosu(mod, params)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
legalize.TanhRewriter(), mod["tvmgen_default_ethos_u_main_0"]
)
mod = relay.transform.InferType()(mod)
func_body = mod["tvmgen_default_ethos_u_main_0"].body
assert func_body.op.name == "contrib.ethosu.identity"
assert func_body.attrs.activation == "TANH"
assert tuple(func_body.args[0].checked_type.shape) == (ifm_shape)
assert tuple(func_body.args[1].checked_type.shape) == (256,)
@pytest.mark.parametrize("dtype", ["int8", "uint8"])
@pytest.mark.parametrize(
"ifm_shape, axis, keep_dims, use_same_quantization",
[
# mean to average pool
[(1, 8, 16, 16), (1,), True, True],
[(1, 8, 16, 16), (2,), False, True],
[(1, 8, 16, 16), (1, 2), False, True],
[(3, 3, 4), (0,), True, True],
[(3, 3, 4), (1,), False, True],
[(8, 5), (0,), False, True],
[(8, 5), (1,), True, True],
# mean to depthwise
[(1, 8, 16, 16), (1,), True, False],
[(1, 8, 16, 16), (2,), True, False],
[(1, 8, 16, 16), (1, 2), False, False],
[(8, 4), (0,), False, False],
[(1, 65, 2, 1), (1, 2), True, False], # special case when h > 64
],
)
def test_mean(ifm_shape, axis, keep_dims, use_same_quantization, dtype):
def create_tflite_graph():
class Model(tf.Module):
@tf.function
def tf_function(self, x):
op = tf.math.reduce_mean(x, axis=axis, keepdims=keep_dims)
return op
model = Model()
concrete_func = model.tf_function.get_concrete_function(
tf.TensorSpec(ifm_shape, dtype=tf.float32)
)
# Convert the model
def representative_dataset():
for _ in range(100):
data = np.random.rand(*tuple(ifm_shape))
yield [data.astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model = converter.convert()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model, 0)
mod, _ = relay.frontend.from_tflite(
tflite_model,
shape_dict={"input": ifm_shape},
dtype_dict={"input": dtype},
)
return mod
def create_relay_graph_with_same_quantization():
ifm = relay.var("input", shape=ifm_shape, dtype=dtype)
cast = relay.cast(ifm, dtype="int32")
mean = relay.mean(cast, axis=axis, keepdims=keep_dims)
requantize = relay.qnn.op.requantize(
mean,
input_scale=relay.const(1.0, dtype="float32"),
input_zero_point=relay.const(0, dtype="int32"),
output_scale=relay.const(1.0, dtype="float32"),
output_zero_point=relay.const(0, dtype="int32"),
out_dtype=dtype,
)
func = relay.Function(relay.analysis.free_vars(requantize), requantize)
mod = tvm.IRModule.from_expr(func)
return mod
def verify(ext_func):
out_var = ext_func.body
next_op = out_var
pooling_op = None
depthwise_op = None
if (
isinstance(next_op, relay.expr.Call)
and isinstance(next_op.op, tvm.ir.op.Op)
and next_op.op.name == "reshape"
):
next_op = next_op.args[0]
if util.is_named_ethosu_op(next_op, "pooling"):
pooling_op = next_op
next_op = next_op.args[0]
if util.is_named_ethosu_op(next_op, "depthwise_conv2d"):
depthwise_op = next_op
next_op = next_op.args[0]
while (
isinstance(next_op, relay.expr.Call)
and isinstance(next_op.op, tvm.ir.op.Op)
and next_op.op.name == "reshape"
):
next_op = next_op.args[0]
in_var = next_op
def calculate_expected_output_shape():
for i in range(len(ifm_shape)):
if i in axis:
if keep_dims:
yield 1
else:
yield ifm_shape[i]
out_shape = tuple(calculate_expected_output_shape())
# check IFM
assert tuple(in_var.checked_type.shape) == ifm_shape
if use_same_quantization:
assert in_var.checked_type.dtype == dtype
else:
# in_var's dtype is equal to int8 due to TFLite's requantize
assert in_var.checked_type.dtype == "int8"
# check OFM
assert tuple(out_var.checked_type.shape) == out_shape
if use_same_quantization:
assert out_var.checked_type.dtype == dtype
else:
# out_var's dtype is equal to int8 due to TFLite's requantize
assert out_var.checked_type.dtype == "int8"
# check expected legalization case
if pooling_op:
attrs = pooling_op.attrs
assert (
attrs.ifm_scale == attrs.ofm_scale and attrs.ifm_zero_point == attrs.ofm_zero_point
)
else:
assert depthwise_op
attrs = depthwise_op.attrs
assert (
attrs.ifm_scale != attrs.ofm_scale or attrs.ifm_zero_point != attrs.ofm_zero_point
)
rewriter = legalize.MeanRewriter()
pattern_table = [
(
ethosu.MeanParams.composite_name,
ethosu.mean_pattern(),
lambda pat: ethosu.MeanParams(pat).is_valid(),
),
]
mod = (
create_relay_graph_with_same_quantization()
if use_same_quantization
else create_tflite_graph()
)
mod = partition_ethosu_by_table(mod, pattern_table)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
rewriter, mod["tvmgen_default_ethos_u_main_0"]
)
verify(mod["tvmgen_default_ethos_u_main_0"])
@pytest.mark.parametrize(
"ifm_shape, axis, keepdims, relu",
[
[(1, 4, 2, 8), 3, False, False],
[(1, 4, 4, 1), 3, False, True],
[(3, 5, 7), 2, False, True],
[(1, 4, 2, 8), 3, True, False],
[(3, 5, 7), 2, True, False],
],
)
def test_ethosu_sum(ifm_shape, axis, keepdims, relu):
dtype = "int8"
def create_tflite_graph():
class Model(tf.Module):
@tf.function
def tf_function(self, x):
op = tf.math.reduce_sum(x, axis=axis, keepdims=keepdims)
return tf.nn.relu(op) if relu else op
model = Model()
concrete_func = model.tf_function.get_concrete_function(
tf.TensorSpec(ifm_shape, dtype=tf.float32)
)
# Convert the model
def representative_dataset():
for _ in range(100):
data = np.random.rand(*tuple(ifm_shape))
yield [data.astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model = converter.convert()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model, 0)
mod, _ = relay.frontend.from_tflite(
tflite_model,
shape_dict={"input": ifm_shape},
dtype_dict={"input": dtype},
)
return mod
def verify(ext_func):
out_var = ext_func.body
binary_elementwise_op = None
pooling_op = None
next_op = out_var
if (
isinstance(next_op, relay.expr.Call)
and isinstance(next_op.op, tvm.ir.op.Op)
and next_op.op.name == "reshape"
):
next_op = next_op.args[0]
binary_elementwise_op = next_op
pooling_op = binary_elementwise_op.args[0]
next_op = pooling_op.args[0]
if (
isinstance(next_op, relay.expr.Call)
and isinstance(next_op.op, tvm.ir.op.Op)
and next_op.op.name == "reshape"
):
next_op = next_op.args[0]
in_var = next_op
def calculate_expected_output_shape():
for i in range(len(ifm_shape)):
if i != axis:
yield ifm_shape[i]
elif keepdims:
yield 1
out_shape = tuple(calculate_expected_output_shape())
# check IFM
assert tuple(in_var.checked_type.shape) == ifm_shape
assert in_var.checked_type.dtype == dtype
# check OFM
assert tuple(out_var.checked_type.shape) == out_shape
assert out_var.checked_type.dtype == dtype
# check expected legalization case
assert pooling_op
attrs = pooling_op.attrs
assert attrs.pooling_type == "SUM"
if relu:
assert attrs.activation == "CLIP"
assert binary_elementwise_op
attrs = binary_elementwise_op.attrs
assert attrs.operator_type == "MUL"
assert attrs.ifm_channels == attrs.ifm2_channels == 1
assert attrs.ofm_dtype == "int8"
rewriter = legalize.SumRewriter()
pattern_table = [
(
ethosu.SumParams.composite_name,
ethosu.sum_pattern(),
lambda pat: ethosu.SumParams(pat).is_valid(),
),
]
mod = create_tflite_graph()
mod = partition_ethosu_by_table(mod, pattern_table)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
rewriter, mod["tvmgen_default_ethos_u_main_0"]
)
verify(mod["tvmgen_default_ethos_u_main_0"])
@pytest.mark.parametrize(
"shapes, axis",
[
([(2, 3), (4, 3)], 0),
([(10, 2, 1), (10, 14, 1)], 1),
([(10,), (13,), (14,)], 0),
([(1, 5, 2, 1), (1, 5, 7, 1), (1, 5, 3, 1)], 2),
],
)
def test_tflite_concat_legalize(shapes, axis):
def create_tflite_graph():
class Model(tf.Module):
@tf.function
def tf_function(self, shapes, axis):
op = tf.concat(shapes, axis)
return op
model = Model()
concrete_func = model.tf_function.get_concrete_function(
[tf.TensorSpec(shape, tf.float32) for shape in shapes], axis
)
def representative_dataset():
for _ in range(100):
datas = [np.random.rand(*shape) for shape in shapes]
yield [data.astype(np.float32) for data in datas]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model = converter.convert()
return tflite_model
def verify(ext_func):
new_concat_axis = np.sum(shape[axis] for shape in shapes)
out_shape = list(shapes[0])
out_shape[axis] = new_concat_axis
op = ext_func.body
for i, _ in enumerate(shapes):
assert list(op.args[0][i].checked_type.shape) == list(shapes[i])
assert list(op.checked_type.shape) == out_shape
assert op.checked_type.dtype == "int8"
concat_pattern_table = [
(
ethosu.ConcatParams.composite_name,
ethosu.concat_pattern(),
lambda pat: ethosu.ConcatParams(pat).is_valid(),
)
]
tflite_graph = create_tflite_graph()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
relay_module, _ = relay.frontend.from_tflite(
tflite_model,
shape_dict={("ifm" + str(i)): shape for i, shape in enumerate(shapes)},
dtype_dict={("ifm" + str(i)): "int8" for i, _ in enumerate(shapes)},
)
mod = partition_ethosu_by_table(relay_module, concat_pattern_table)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
legalize.ConcatRewriter(), mod["tvmgen_default_ethos_u_main_0"]
)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
legalize.NoOpRewriter(), mod["tvmgen_default_ethos_u_main_0"]
)
mod["tvmgen_default_ethos_u_main_0"] = relay.transform.InferType()(mod)[
"tvmgen_default_ethos_u_main_0"
]
verify(mod["tvmgen_default_ethos_u_main_0"])
def test_tflite_sigmoid_legalize():
dtype = "int8"
ifm_shape = (1, 237, 91, 7)
def create_tflite_graph():
class Model(tf.Module):
@tf.function
def sigmoid_func(self, x):
op = tf.math.sigmoid(x)
return op
model = Model()
concrete_func = model.sigmoid_func.get_concrete_function(
tf.TensorSpec(ifm_shape, dtype=tf.float32)
)
# Convert the model
def representative_dataset():
for _ in range(100):
data = np.random.rand(*tuple(ifm_shape))
yield [data.astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_output_type = tf.int8
converter.inference_input_type = tf.int8
tflite_model = converter.convert()
return tflite_model
tflite_graph = create_tflite_graph()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
mod, params = relay.frontend.from_tflite(
tflite_model,
shape_dict={"input": ifm_shape},
dtype_dict={"input": dtype},
)
mod = ethosu.partition_for_ethosu(mod, params)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
legalize.SigmoidRewriter(), mod["tvmgen_default_ethos_u_main_0"]
)
mod = relay.transform.InferType()(mod)
func_body = mod["tvmgen_default_ethos_u_main_0"].body
assert func_body.op.name == "contrib.ethosu.identity"
assert func_body.attrs.activation == "SIGMOID"
assert tuple(func_body.args[0].checked_type.shape) == (ifm_shape)
assert tuple(func_body.args[1].checked_type.shape) == (256,)
@pytest.mark.parametrize(
"ifm_shape, num_or_size_splits, axis",
[
((1, 4, 6, 8), 3, 2),
((4, 6, 8), 2, 0),
((5, 15), 3, 1),
((3, 7), 1, 1),
((100,), 25, 0),
],
)
def test_tflite_split_legalize(ifm_shape, num_or_size_splits, axis):
dtype = "int8"
def create_tflite_graph():
class Model(tf.Module):
@tf.function
def tf_function(self, x, num_or_size_splits, axis):
op = tf.split(x, num_or_size_splits, axis=axis)
return op
model = Model()
concrete_func = model.tf_function.get_concrete_function(
tf.TensorSpec(ifm_shape, tf.float32), num_or_size_splits, axis
)
def representative_dataset():
for _ in range(100):
data = np.random.rand(*tuple(ifm_shape))
yield [data.astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model = converter.convert()
return tflite_model
def verify(ext_func):
# dig out the split
single_output_split = num_or_size_splits == 1
split = (
ext_func.body.tuple_value
if single_output_split
else ext_func.body.args[0][0].args[0].tuple_value
)
assert split.op.name == "split"
# Split is specified by number of equal chunks
assert split.attrs.indices_or_sections == num_or_size_splits
assert split.attrs.axis == axis
tflite_graph = create_tflite_graph()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
mod, _ = relay.frontend.from_tflite(
tflite_model,
shape_dict={"input": ifm_shape},
dtype_dict={"input": dtype},
)
mod = ethosu.partition_for_ethosu(mod)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
legalize.PartitionedSplitRewriter(), mod["tvmgen_default_ethos_u_main_0"]
)
mod["tvmgen_default_ethos_u_main_0"] = relay.transform.InferType()(mod)[
"tvmgen_default_ethos_u_main_0"
]
verify(mod["tvmgen_default_ethos_u_main_0"])
@pytest.mark.parametrize(
"ifm_shape, num_or_size_splits, axis",
[
((1, 4, 6, 8), (1, 3, 4), 3),
((10, 18, 4), (1, 4, 3, 2), 0),
((22, 7), (4, -1), 1),
((25,), (25,), 0),
],
)
def test_tflite_split_v_legalize(ifm_shape, num_or_size_splits, axis):
dtype = "int8"
def create_tflite_graph():
class Model(tf.Module):
@tf.function
def tf_function(self, x, num_or_size_splits, axis):
# TF split gets converted into TFLite's split_v
op = tf.split(x, num_or_size_splits, axis=axis)
return op
model = Model()
concrete_func = model.tf_function.get_concrete_function(
tf.TensorSpec(ifm_shape, tf.float32), num_or_size_splits, axis
)
def representative_dataset():
for _ in range(100):
data = np.random.rand(*tuple(ifm_shape))
yield [data.astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model = converter.convert()
return tflite_model
def verify(ext_func):
# dig out the split
single_output_split = len(num_or_size_splits) == 1
split = (
ext_func.body.tuple_value
if single_output_split
else ext_func.body.args[0][0].args[0].tuple_value
)
assert split.op.name == "split"
# Split is specified by the size of sections, so converting num_or_size_splits
# into the indices where the tensor is split at since this is how split is represented
# in Relay
split_sections = [] if single_output_split else [num_or_size_splits[0]]
for split_size in num_or_size_splits[1:-1]:
sec = split_sections[-1] + split_size
split_sections.append(sec)
assert list(split.attrs.indices_or_sections) == split_sections
assert split.attrs.axis == axis
tflite_graph = create_tflite_graph()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
mod, _ = relay.frontend.from_tflite(
tflite_model,
shape_dict={"input": ifm_shape},
dtype_dict={"input": dtype},
)
mod = ethosu.partition_for_ethosu(mod)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
legalize.PartitionedSplitRewriter(), mod["tvmgen_default_ethos_u_main_0"]
)
mod["tvmgen_default_ethos_u_main_0"] = relay.transform.InferType()(mod)[
"tvmgen_default_ethos_u_main_0"
]
verify(mod["tvmgen_default_ethos_u_main_0"])
@pytest.mark.parametrize(
"ifm_shape,ifm_scale,ifm_zp,ofm_scale,ofm_zp",
[[(1, 8, 8, 3), 1.0, 0, 1.0, 0], [(1, 20, 30, 3), 1.345, 34, 0.32, -23]],
)
def test_ethosu_requantize(ifm_shape, ifm_scale, ifm_zp, ofm_scale, ofm_zp):
dtype = "int8"
def create_model():
ifm = relay.var("ifm", shape=ifm_shape, dtype="int8")
requantize = relay.qnn.op.requantize(
ifm,
relay.const(ifm_scale, dtype="float32"),
relay.const(ifm_zp, dtype="int32"),
relay.const(ofm_scale, dtype="float32"),
relay.const(ofm_zp, dtype="int32"),
)
return tvm.IRModule.from_expr(relay.Function([ifm], requantize))
def verify(ext_func):
op = ext_func.body
# Check IFM
ifm = op.args[0].checked_type
assert list(ifm.shape) == list(ifm_shape)
assert str(ifm.dtype) == dtype
# Check OFM
ofm = op.checked_type
assert list(ofm.shape) == list(ifm_shape)
assert str(ofm.dtype) == dtype
# Check quantization params
assert math.isclose(op.attrs.ifm_scale, ifm_scale, abs_tol=1e-7)
assert op.attrs.ifm_zero_point == ifm_zp
assert math.isclose(op.attrs.ofm_scale, ofm_scale, abs_tol=1e-7)
assert op.attrs.ofm_zero_point == ofm_zp
rewriter = legalize.RequantizeRewriter()
pattern_table = [
(
ethosu.RequantizeParams.composite_name,
ethosu.requantize_pattern(),
lambda pat: ethosu.RequantizeParams(pat).is_valid(),
),
]
mod = create_model()
mod = partition_ethosu_by_table(mod, pattern_table)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
rewriter, mod["tvmgen_default_ethos_u_main_0"]
)
verify(mod["tvmgen_default_ethos_u_main_0"])
def test_multiple_requantize_offload():
"""
Testing requantize offload in the case one requantize operation is part of
an existing pattern (in this case Mean: cast->mean->requantize) and the
other is a stand-alone requantize.
"""
def create_model():
ifm = relay.var("input", shape=(1, 3, 3, 4), dtype="int8")
cast = relay.cast(ifm, dtype="int32")
mean = relay.mean(cast, axis=1, keepdims=True)
requantize = relay.qnn.op.requantize(
mean,
input_scale=relay.const(1.0, dtype="float32"),
input_zero_point=relay.const(0, dtype="int32"),
output_scale=relay.const(1.0, dtype="float32"),
output_zero_point=relay.const(0, dtype="int32"),
)
requantize = relay.qnn.op.requantize(
requantize,
input_scale=relay.const(1.0, dtype="float32"),
input_zero_point=relay.const(0, dtype="int32"),
output_scale=relay.const(1.0, dtype="float32"),
output_zero_point=relay.const(0, dtype="int32"),
)
return tvm.IRModule.from_expr(relay.Function([ifm], requantize))
def verify(ext_func):
# If mean operation and separate requantize were offloaded correctly,
# there should only be a pooling operation followed by an identity
# operation leagalized.
op = ext_func.body
assert op.op.name == "contrib.ethosu.identity"
op = op.args[0]
assert ext_func.body.args[0].op.name == "contrib.ethosu.pooling"
op = op.args[0]
assert isinstance(op, relay.Var)
mod = create_model()
mod = ethosu.partition_for_ethosu(mod)
mod = legalize.LegalizeEthosU()(mod)
verify(mod["tvmgen_default_ethos_u_main_0"])
@pytest.mark.parametrize("ifm_shape,axis", [((2,), 0), ((1, 3, 3), 2)])
def test_tflite_expand_dims(ifm_shape, axis):
dtype = "int8"
def create_tflite_graph():
class Model(tf.Module):
@tf.function
def tf_function(self, x):
return tf.expand_dims(x, axis=axis)
model = Model()
concrete_func = model.tf_function.get_concrete_function(
tf.TensorSpec(ifm_shape, tf.float32)
)
def representative_dataset():
for _ in range(100):
data = np.random.rand(*tuple(ifm_shape))
yield [data.astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model = converter.convert()
return tflite_model
def verify(ext_func):
op = ext_func.body
expected_shape = list(ifm_shape)
expected_shape.insert(axis, 1)
# Check IFM
assert list(op.args[0].checked_type.shape) == list(ifm_shape)
assert op.args[0].checked_type.dtype == dtype
# Check OFM
assert list(op.checked_type.shape) == expected_shape
assert op.checked_type.dtype == dtype
# Check op
assert op.op.name == "reshape"
tflite_graph = create_tflite_graph()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
mod, _ = relay.frontend.from_tflite(
tflite_model,
shape_dict={"input": ifm_shape},
dtype_dict={"input": dtype},
)
mod = ethosu.partition_for_ethosu(mod)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
legalize.ExpandDimsRewriter(), mod["tvmgen_default_ethos_u_main_0"]
)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
legalize.ReshapeRewriter(), mod["tvmgen_default_ethos_u_main_0"]
)
mod["tvmgen_default_ethos_u_main_0"] = relay.transform.InferType()(mod)[
"tvmgen_default_ethos_u_main_0"
]
verify(mod["tvmgen_default_ethos_u_main_0"])
@pytest.mark.parametrize(
"ifm_shape,axis", [((1, 1, 2, 1), 0), ((1, 3, 3, 1), 3), ((1, 1, 2, 1), None)]
)
def test_tflite_squeeze(ifm_shape, axis):
dtype = "int8"
def create_tflite_graph():
class Model(tf.Module):
@tf.function
def tf_function(self, x):
return tf.squeeze(x, axis=axis)
model = Model()
concrete_func = model.tf_function.get_concrete_function(
tf.TensorSpec(ifm_shape, tf.float32)
)
def representative_dataset():
for _ in range(100):
data = np.random.rand(*tuple(ifm_shape))
yield [data.astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model = converter.convert()
return tflite_model
def verify(ext_func):
op = ext_func.body
expected_shape = list(ifm_shape)
if isinstance(axis, int):
expected_shape = ifm_shape[:axis] + ifm_shape[axis + 1 :]
else:
expected_shape = list(filter(lambda a: a != 1, expected_shape))
# Check IFM
assert list(op.args[0].checked_type.shape) == list(ifm_shape)
assert op.args[0].checked_type.dtype == dtype
# Check OFM
assert list(op.checked_type.shape) == list(expected_shape)
assert op.checked_type.dtype == dtype
# Check op
assert op.op.name == "reshape"
tflite_graph = create_tflite_graph()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
mod, _ = relay.frontend.from_tflite(
tflite_model,
shape_dict={"input": ifm_shape},
dtype_dict={"input": dtype},
)
mod = ethosu.partition_for_ethosu(mod)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
legalize.SqueezeRewriter(), mod["tvmgen_default_ethos_u_main_0"]
)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
legalize.ReshapeRewriter(), mod["tvmgen_default_ethos_u_main_0"]
)
mod["tvmgen_default_ethos_u_main_0"] = relay.transform.InferType()(mod)[
"tvmgen_default_ethos_u_main_0"
]
verify(mod["tvmgen_default_ethos_u_main_0"])
@pytest.mark.parametrize(
"ifm_shape,size,half_pixel",
[
[(1, 2, 2, 1), (4, 4), False],
[(1, 2, 2, 1), (4, 4), True],
[(1, 4, 7, 3), (8, 14), False],
[(1, 3, 5, 3), (3, 5), False],
[(1, 6, 6, 96), (12, 12), False],
[(1, 6, 6, 96), (12, 12), True],
],
)
def test_tflite_resize2d_nearest_neighbor(ifm_shape, size, half_pixel):
align_corners = False
dtype = "int8"
def create_tflite_graph():
@tf.function
def resize_model(x):
return tf.compat.v1.image.resize_nearest_neighbor(
x,
size,
align_corners=align_corners,
half_pixel_centers=half_pixel,
)
concrete_func = resize_model.get_concrete_function(
tf.TensorSpec(ifm_shape, dtype=tf.float32)
)
def representative_dataset():
for _ in range(100):
data = np.random.rand(*tuple(ifm_shape))
yield [data.astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model = converter.convert()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model, 0)
mod, _ = relay.frontend.from_tflite(
tflite_model,
shape_dict={"input": ifm_shape},
dtype_dict={"input": dtype},
)
return mod
def verify(ext_func):
op = ext_func.body
in_var = op.args[0]
# check IFM
assert tuple(in_var.checked_type.shape) == ifm_shape
assert in_var.checked_type.dtype == dtype
# check OFM
attrs = dict(op.attrs)
out_shape = (ifm_shape[0], size[0], size[1], ifm_shape[3])
assert tuple(op.checked_type.shape) == out_shape
assert op.checked_type.dtype == dtype
# Check Op attributes
if size[0] == ifm_shape[1] and size[1] == ifm_shape[2]:
assert op.op.name == "contrib.ethosu.identity"
else:
assert attrs["pooling_type"] == "AVG"
assert attrs["upscale"] == "NEAREST"
rewriter = legalize.Resize2dRewriter()
pattern_table = [
(
ethosu.Resize2dParams.composite_name,
ethosu.resize2d_pattern(),
lambda pat: ethosu.Resize2dParams(pat).is_valid(),
),
]
mod = create_tflite_graph()
mod = partition_ethosu_by_table(mod, pattern_table)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
rewriter, mod["tvmgen_default_ethos_u_main_0"]
)
verify(mod["tvmgen_default_ethos_u_main_0"])
@pytest.mark.parametrize(
"ifm_shape,size,align_corners",
[
[(1, 2, 2, 1), (4, 4), False],
[(1, 4, 7, 3), (8, 14), False],
[(1, 2, 2, 1), (3, 3), True],
[(1, 4, 7, 3), (7, 13), True],
[(1, 3, 5, 3), (3, 5), False],
],
)
def test_tflite_resize2d_bilinear(ifm_shape, size, align_corners):
dtype = "int8"
def create_tflite_graph():
@tf.function
def resize_model(x):
return tf.compat.v1.image.resize_bilinear(
x, size, align_corners=align_corners, half_pixel_centers=False
)
concrete_func = resize_model.get_concrete_function(
tf.TensorSpec(ifm_shape, dtype=tf.float32)
)
def representative_dataset():
for _ in range(100):
data = np.random.rand(*tuple(ifm_shape))
yield [data.astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model = converter.convert()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model, 0)
mod, _ = relay.frontend.from_tflite(
tflite_model,
shape_dict={"input": ifm_shape},
dtype_dict={"input": dtype},
)
return mod
def verify(ext_func):
op = ext_func.body
in_var = op.args[0]
# check IFM
assert tuple(in_var.checked_type.shape) == ifm_shape
assert in_var.checked_type.dtype == dtype
# check OFM
attrs = dict(op.attrs)
out_shape = (ifm_shape[0], size[0], size[1], ifm_shape[3])
assert tuple(op.checked_type.shape) == out_shape
assert op.checked_type.dtype == dtype
# Check Op attributes
if size[0] == ifm_shape[1] and size[1] == ifm_shape[2]:
assert op.op.name == "contrib.ethosu.identity"
else:
assert attrs["pooling_type"] == "AVG"
assert attrs["upscale"] == "NEAREST"
# Check padding
if align_corners:
assert list(attrs["padding"]) == [0, 0, 0, 0]
else:
assert list(attrs["padding"]) == [0, 0, 1, 1]
rewriter = legalize.Resize2dRewriter()
pattern_table = [
(
ethosu.Resize2dParams.composite_name,
ethosu.resize2d_pattern(),
lambda pat: ethosu.Resize2dParams(pat).is_valid(),
),
]
mod = create_tflite_graph()
mod = partition_ethosu_by_table(mod, pattern_table)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
rewriter, mod["tvmgen_default_ethos_u_main_0"]
)
verify(mod["tvmgen_default_ethos_u_main_0"])
@pytest.mark.parametrize(
"ifm_shape,ofm_shape,kernel_shape,padding",
[
[(1, 2, 2, 1), (1, 4, 4, 1), (3, 3), "SAME"],
[(1, 2, 2, 1), (1, 9, 9, 1), (7, 7), "VALID"],
[(1, 2, 4, 3), (1, 4, 8, 3), (3, 3), "SAME"],
[(1, 10, 5, 3), (1, 21, 13, 3), (3, 5), "VALID"],
],
)
@pytest.mark.parametrize("has_bias", [False, True])
def test_tflite_transpose_convolution(ifm_shape, ofm_shape, kernel_shape, padding, has_bias):
dtype = "int8"
dilations = (1, 1)
strides = (2, 2)
def create_tflite_graph():
@tf.function
def conv2d_transpose(x):
bias_shape = ofm_shape[3]
bias = tf.constant(np.random.uniform(size=bias_shape), dtype=tf.float32)
weight_shape = [kernel_shape[0], kernel_shape[1], ifm_shape[3], ofm_shape[3]]
weight = tf.constant(np.random.uniform(size=weight_shape), dtype=tf.float32)
tf_strides = [1, strides[0], strides[1], 1]
op = tf.nn.conv2d_transpose(
x,
weight,
output_shape=ofm_shape,
strides=tf_strides,
padding=padding,
dilations=dilations,
)
if has_bias:
op = tf.nn.bias_add(op, bias)
return op
concrete_func = conv2d_transpose.get_concrete_function(
tf.TensorSpec(ifm_shape, dtype=tf.float32)
)
def representative_dataset():
for _ in range(100):
data = np.random.rand(*tuple(ifm_shape))
yield [data.astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model = converter.convert()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model, 0)
mod, params = relay.frontend.from_tflite(
tflite_model,
shape_dict={"input": ifm_shape},
dtype_dict={"input": dtype},
)
return mod, params
def verify(ext_func):
strided_slice = ext_func.body
conv = strided_slice.args[0]
ofm_channels = conv.attrs.ofm_channels
# Check IFM
ifm = conv.args[0].checked_type
assert list(ifm.shape) == list(ifm_shape)
assert str(ifm.dtype) == dtype
assert ifm.shape[3] == ofm_channels
# Check OFM
ofm = strided_slice.checked_type
assert list(ofm.shape) == list(ofm_shape)
assert str(ofm.dtype) == dtype
assert ofm.shape[3] == ofm_channels
# Check weights
weights_ohwi = conv.args[1].data.asnumpy()
assert str(weights_ohwi.dtype) == dtype
assert list(weights_ohwi.shape) == [
ofm_channels,
kernel_shape[0],
kernel_shape[1],
ifm_shape[3],
]
# Check that scale_bias matches weight tensor
assert list(conv.args[2].checked_type.shape)[0] == ofm_channels
# Calculate expected padding for conv2d op
if padding == "VALID":
expected_padding = [0, 0, 0, 0]
elif padding == "SAME":
pad_top, pad_bottom = get_pad_value(ofm_shape[1], kernel_shape[0], strides[0])
pad_left, pad_right = get_pad_value(ofm_shape[2], kernel_shape[1], strides[1])
expected_padding = [pad_top, pad_left, pad_bottom, pad_right]
pad_top = kernel_shape[0] - 1 - expected_padding[0]
pad_left = kernel_shape[1] - 1 - expected_padding[1]
pad_bottom = kernel_shape[0] - 1 - expected_padding[2]
pad_right = kernel_shape[1] - 1 - expected_padding[3]
if strides == [2, 2]:
pad_bottom -= 1
pad_right -= 1
expected_padding = [pad_top, pad_left, pad_bottom, pad_right]
assert list(conv.attrs.padding) == list(expected_padding)
assert list(conv.attrs.strides) == [1, 1]
rewriter = legalize.Conv2DTransposeRewriter()
pattern_table = [
(
ethosu.QnnConv2DTransposeParams.composite_name,
ethosu.qnn_conv2d_transpose_pattern(),
lambda pat: ethosu.QnnConv2DTransposeParams(pat).is_valid(),
),
]
mod, params = create_tflite_graph()
mod["main"] = bind_params_by_name(mod["main"], params)
mod = partition_ethosu_by_table(mod, pattern_table)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
rewriter, mod["tvmgen_default_ethos_u_main_0"]
)
verify(mod["tvmgen_default_ethos_u_main_0"])
@pytest.mark.parametrize(
"ifm_shapes,axis",
[
([(1, 2, 2), (1, 2, 2), (1, 2, 2)], 2),
([(5, 4), (5, 4)], 1),
([(1,), (1,)], 0),
([(3, 1), (3, 1), (3, 1), (3, 1)], 0),
],
)
def test_tflite_pack(ifm_shapes, axis):
dtype = "int8"
def create_tflite_graph():
class Model(tf.Module):
@tf.function
def tf_function(self, inputs, axis):
return tf.stack(inputs, axis=axis)
model = Model()
concrete_func = model.tf_function.get_concrete_function(
[tf.TensorSpec(shape, tf.float32) for shape in ifm_shapes], axis
)
def representative_dataset():
for _ in range(100):
datas = [np.random.rand(*shape) for shape in ifm_shapes]
yield [data.astype(np.float32) for data in datas]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model = converter.convert()
return tflite_model
def verify(ext_func):
new_pack_axis = len(ifm_shapes)
ifm_shape = list(ifm_shapes[0])
op = ext_func.body
after_reshape = ifm_shape[:axis] + [1] + ifm_shape[axis:]
out_shape = ifm_shape[:axis] + [new_pack_axis] + ifm_shape[axis:]
assert op.op.name == "concatenate"
# Check shapes after expand_dims (legalized as reshape)
for i in range(len(ifm_shapes)):
assert list(op.args[0][i].checked_type.shape) == after_reshape
assert op.args[0][i].checked_type.dtype == dtype
# Check output
assert list(op.checked_type.shape) == out_shape
assert op.checked_type.dtype == dtype
pack_pattern_table = [
(
ethosu.ConcatParams.composite_name,
ethosu.concat_pattern(),
lambda pat: ethosu.ConcatParams(pat).is_valid(),
),
(
ethosu.ExpandDimsParams.composite_name,
ethosu.expand_dims_pattern(),
lambda pat: ethosu.ExpandDimsParams(pat).is_valid(),
),
(
ethosu.ReshapeParams.composite_name,
ethosu.reshape_pattern(),
lambda pat: ethosu.ReshapeParams(pat).is_valid(),
),
]
tflite_graph = create_tflite_graph()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
relay_module, _ = relay.frontend.from_tflite(
tflite_model,
shape_dict={("ifm" + str(i)): shape for i, shape in enumerate(ifm_shapes)},
dtype_dict={("ifm" + str(i)): dtype for i, _ in enumerate(ifm_shapes)},
)
mod = partition_ethosu_by_table(relay_module, pack_pattern_table)
seq = [
legalize.ConcatRewriter(),
legalize.ExpandDimsRewriter(),
legalize.ReshapeRewriter(),
legalize.NoOpRewriter(),
]
for legalizer in seq:
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
legalizer, mod["tvmgen_default_ethos_u_main_0"]
)
mod["tvmgen_default_ethos_u_main_0"] = relay.transform.InferType()(mod)[
"tvmgen_default_ethos_u_main_0"
]
verify(mod["tvmgen_default_ethos_u_main_0"])
@pytest.mark.parametrize(
"ifm_shape,axis",
[[(1, 2, 3, 4), 1], [(2, 3), 1], [(5, 6, 7), 2]],
)
def test_tflite_unpack(ifm_shape, axis):
dtype = "int8"
def create_tflite_graph():
class Model(tf.Module):
@tf.function
def tf_function(self, x, axis):
return tf.unstack(x, axis=axis)
model = Model()
concrete_func = model.tf_function.get_concrete_function(
tf.TensorSpec(ifm_shape, tf.float32), axis
)
def representative_dataset():
for _ in range(100):
data = np.random.rand(*tuple(ifm_shape))
yield [data.astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model = converter.convert()
return tflite_model
def verify(ext_func):
outputs = ext_func.body.args[0].fields
shape = list(ifm_shape)
unpacked_shape = shape[:axis] + shape[axis + 1 :]
split_shape = shape[:axis] + [1] + shape[axis + 1 :]
assert len(outputs) == shape[axis]
for i, output in enumerate(outputs):
expr = output.args[0].args[0]
expr = expr.tuple_value[expr.index]
expr = expr.args[0]
# Checking expected unpacked output shape.
# Squeeze is legalized to a reshape.
assert expr.op.name == "reshape"
assert list(expr.checked_type.shape) == unpacked_shape
assert output.checked_type.dtype == dtype
expr = expr.args[0]
expr = expr.tuple_value[expr.index]
expr = expr.args[0]
# Check input is split correctly
assert list(expr.args[0].checked_type.shape) == shape
assert list(expr.checked_type.shape) == split_shape
assert expr.checked_type.dtype == dtype
# Check split attrs
begin_shape = [0] * len(ifm_shape)
begin_shape[axis] = i
assert list(expr.attrs.begin) == begin_shape
end_shape = shape[:axis] + [i + 1] + shape[axis + 1 :]
assert list(expr.attrs.end) == end_shape
assert list(expr.attrs.strides) == [1]
pack_pattern_table = [
(
ethosu.SplitParams.composite_name,
ethosu.split_pattern(),
lambda pat: ethosu.SplitParams(pat).is_valid(),
),
(
ethosu.SqueezeParams.composite_name,
ethosu.squeeze_pattern(),
lambda pat: ethosu.SqueezeParams(pat).is_valid(),
),
(
ethosu.ReshapeParams.composite_name,
ethosu.reshape_pattern(),
lambda pat: ethosu.ReshapeParams(pat).is_valid(),
),
]
tflite_graph = create_tflite_graph()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
mod, _ = relay.frontend.from_tflite(
tflite_model,
shape_dict={"input": ifm_shape},
dtype_dict={"input": dtype},
)
mod = partition_ethosu_by_table(mod, pack_pattern_table)
seq = [
legalize.PartitionedSplitRewriter(),
legalize.SplitRewriter(),
legalize.SqueezeRewriter(),
legalize.ReshapeRewriter(),
legalize.NoOpRewriter(),
]
for legalizer in seq:
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
legalizer, mod["tvmgen_default_ethos_u_main_0"]
)
mod["tvmgen_default_ethos_u_main_0"] = relay.transform.InferType()(mod)[
"tvmgen_default_ethos_u_main_0"
]
verify(mod["tvmgen_default_ethos_u_main_0"])
@pytest.mark.parametrize("ifm_shape", [(1, 15, 15, 3), (1, 8, 9, 1)])
@pytest.mark.parametrize("alpha", [0.2, 0.634])
def test_tflite_leaky_relu(ifm_shape, alpha):
dtype = "int8"
def create_tflite_graph():
class Model(tf.Module):
@tf.function
def leaky_relu_func(self, x):
return tf.nn.leaky_relu(x, alpha=alpha)
model = Model()
concrete_func = model.leaky_relu_func.get_concrete_function(
tf.TensorSpec(ifm_shape, tf.float32),
)
def representative_dataset():
for _ in range(100):
data = np.random.rand(*tuple(ifm_shape))
yield [data.astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model = converter.convert()
return tflite_model
def verify(ext_func):
func_body = ext_func.body
assert func_body.op.name == "contrib.ethosu.identity"
assert func_body.attrs.activation == "LUT"
assert tuple(func_body.args[0].checked_type.shape) == (ifm_shape)
assert tuple(func_body.args[1].checked_type.shape) == (256,)
tflite_graph = create_tflite_graph()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
mod, _ = relay.frontend.from_tflite(
tflite_model,
shape_dict={"input": ifm_shape},
dtype_dict={"input": dtype},
)
mod = ethosu.partition_for_ethosu(mod)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
legalize.LeakyReLURewriter(), mod["tvmgen_default_ethos_u_main_0"]
)
mod["tvmgen_default_ethos_u_main_0"] = relay.transform.InferType()(mod)[
"tvmgen_default_ethos_u_main_0"
]
verify(mod["tvmgen_default_ethos_u_main_0"])
@pytest.mark.parametrize("ifm_shape", [(1, 14), (1, 151)])
@pytest.mark.parametrize("ofm_channels", [32, 64])
@pytest.mark.parametrize("use_bias", [True, False])
@pytest.mark.parametrize("activation_function", ["RELU", "NONE"])
def test_tflite_fully_connected(
ifm_shape,
ofm_channels,
use_bias,
activation_function,
):
dtype = "int8"
def create_tflite_graph():
class Model(tf.Module):
@tf.function
def fully_connected(self, x):
bias_shape = ofm_channels
bias = tf.constant(np.random.uniform(size=bias_shape), dtype=tf.float32)
w = tf.constant(
np.random.uniform(size=[ifm_shape[1], ofm_channels]),
dtype=tf.float32,
)
x = tf.matmul(x, w)
if use_bias:
x = tf.nn.bias_add(x, bias)
if activation_function:
x = tf.nn.relu(x)
return x
model = Model()
concrete_func = model.fully_connected.get_concrete_function(
tf.TensorSpec(ifm_shape, dtype=tf.float32)
)
# Convert the model
def representative_dataset():
for _ in range(100):
data = np.random.rand(*tuple(ifm_shape))
yield [data.astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model = converter.convert()
return tflite_model
def verify(ext_func):
op = ext_func.body.args[0]
ofm_channels = op.attrs.ofm_channels
# check IFM
ifm = op.args[0].checked_type
assert list(ifm.shape) == [1, 1] + list(ifm_shape)
assert str(ifm.dtype) == dtype
# check OFM
ofm = op.checked_type
assert list(ofm.shape) == [1, 1, 1, ofm_channels]
assert str(ofm.dtype) == dtype
# check weights
weights_ohwi = op.args[1].data.asnumpy()
assert str(weights_ohwi.dtype) == dtype
assert list(weights_ohwi.shape) == [ofm_channels, 1, 1, ifm_shape[1]]
# Check that scale_bias matches weight tensor
assert list(op.args[2].checked_type.shape)[0] == ofm_channels
assert list(op.attrs.padding) == [0, 0, 0, 0]
assert list(op.attrs.strides) == [1, 1]
assert list(op.attrs.dilation) == [1, 1]
if activation_function == "RELU":
assert str(op.attrs.activation) == "CLIP"
fc_pattern_table = [
(
ethosu.FullyConnectedParams.composite_name,
ethosu.qnn_fc_pattern(),
lambda pat: ethosu.FullyConnectedParams(pat).is_valid(),
)
]
tflite_graph = create_tflite_graph()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
mod, fc_params = relay.frontend.from_tflite(
tflite_model,
shape_dict={"input": ifm_shape},
dtype_dict={"input": dtype},
)
mod["main"] = bind_params_by_name(mod["main"], fc_params)
mod = partition_ethosu_by_table(mod, fc_pattern_table)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
legalize.FullyConnectedRewriter(), mod["tvmgen_default_ethos_u_main_0"]
)
verify(mod["tvmgen_default_ethos_u_main_0"])
@pytest.mark.parametrize("ifm_shape", [(1, 5, 5, 3), (1, 12, 9, 1)])
def test_tflite_hard_swish(ifm_shape):
dtype = "int8"
def create_tflite_graph():
class Model(tf.Module):
@tf.function
def tf_function(self, x):
op = tf.keras.layers.Lambda(
lambda x: x * tf.keras.activations.relu(x + 3.0, max_value=6.0) / 6.0
)(x)
return op
model = Model()
concrete_func = model.tf_function.get_concrete_function(
tf.TensorSpec(ifm_shape, tf.float32)
)
def representative_dataset():
for _ in range(100):
data = np.random.rand(*tuple(ifm_shape))
yield [data.astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model = converter.convert()
return tflite_model
tflite_graph = create_tflite_graph()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
mod, params = relay.frontend.from_tflite(
tflite_model,
shape_dict={"input": ifm_shape},
dtype_dict={"input": dtype},
)
mod = ethosu.partition_for_ethosu(mod, params)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
legalize.HardSwishRewriter(), mod["tvmgen_default_ethos_u_main_0"]
)
mod = relay.transform.InferType()(mod)
func_body = mod["tvmgen_default_ethos_u_main_0"].body
assert func_body.op.name == "contrib.ethosu.identity"
assert func_body.attrs.activation == "LUT"
assert tuple(func_body.args[0].checked_type.shape) == (ifm_shape)
assert tuple(func_body.args[1].checked_type.shape) == (256,)
def test_tflite_softmax():
np.random.seed(0)
dtype = "int8"
ifm_shape = (1, 12)
def create_tflite_graph():
@tf.function
def softmax(x):
return tf.nn.softmax(x)
concrete_func = softmax.get_concrete_function(tf.TensorSpec(ifm_shape, dtype=tf.float32))
# Convert the model
def representative_dataset():
for _ in range(100):
data = np.random.uniform(low=-1, high=2, size=tuple(ifm_shape))
yield [data.astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model = converter.convert()
return tflite_model
def verify(ext_func):
out_op = ext_func.body
ops = []
# List of expected operations, their type and activation parameters if it exists
expected_ops_params = [
("reshape", None, [None, None, None, None, None, None]),
("reshape", None, [None, None, None, None, None, None]),
("contrib.ethosu.pooling", "MAX", [0.011756093241274357, -43, None, None, 0.0, -43]),
(
"contrib.ethosu.binary_elementwise",
"SUB",
[0.011756093241274357, -43, 0.0, -43, 1.0, 127],
),
("contrib.ethosu.binary_elementwise", "SHR", [1.0, 0, 0.0, 0, 0.0, -43]),
("contrib.ethosu.pooling", "SUM", [0.0, 0, None, None, 0.0, -43]),
("contrib.ethosu.unary_elementwise", "CLZ", [0.0, 0, None, None, 0.0, -43]),
("contrib.ethosu.binary_elementwise", "SUB", [0.0, 0, 0.0, 0, 0.0, -43]),
("contrib.ethosu.binary_elementwise", "SHL", [0.0, 0, 0.0, 0, 0.0, -43]),
("contrib.ethosu.binary_elementwise", "SUB", [0.0, 0, 0.0, 0, 0.0, -43]),
("contrib.ethosu.binary_elementwise", "SHL", [0.0, 0, 0.0, 0, 0.0, -43]),
("contrib.ethosu.binary_elementwise", "ADD", [0.0, 0, 0.0, 0, 1.0, 0]),
("contrib.ethosu.binary_elementwise", "MUL", [1.0, 0, 1.0, 0, 2.0, 0]),
("contrib.ethosu.binary_elementwise", "ADD", [2.0, 0, 0.0, 0, 1.0, 0]),
("contrib.ethosu.binary_elementwise", "MUL", [1.0, 0, 1.0, 0, 2.0, 0]),
("contrib.ethosu.binary_elementwise", "SUB", [2.0, 0, 0.0, 0, 1.0, 0]),
("contrib.ethosu.binary_elementwise", "MUL", [1.0, 0, 1.0, 0, 2.0, 0]),
("contrib.ethosu.binary_elementwise", "MUL", [2.0, 0, 0.0, 0, 0.0, -43]),
("contrib.ethosu.binary_elementwise", "ADD", [1.0, 0, 0.0, 0, 1.0, 0]),
("contrib.ethosu.binary_elementwise", "MUL", [1.0, 0, 1.0, 0, 2.0, 0]),
("contrib.ethosu.binary_elementwise", "SUB", [2.0, 0, 0.0, 0, 1.0, 0]),
("contrib.ethosu.binary_elementwise", "MUL", [1.0, 0, 1.0, 0, 2.0, 0]),
("contrib.ethosu.binary_elementwise", "MUL", [2.0, 0, 0.0, 0, 0.0, -43]),
("contrib.ethosu.binary_elementwise", "ADD", [1.0, 0, 0.0, 0, 1.0, 0]),
("contrib.ethosu.binary_elementwise", "MUL", [1.0, 0, 1.0, 0, 2.0, 0]),
("contrib.ethosu.binary_elementwise", "SUB", [2.0, 0, 0.0, 0, 1.0, 0]),
("contrib.ethosu.binary_elementwise", "MUL", [1.0, 0, 1.0, 0, 2.0, 0]),
("contrib.ethosu.binary_elementwise", "MUL", [2.0, 0, 0.0, 0, 0.0, -43]),
("contrib.ethosu.binary_elementwise", "ADD", [1.0, 0, 0.0, 0, 1.0, 0]),
("contrib.ethosu.binary_elementwise", "MUL", [1.0, 0, 0.0, 0, 1.0, 0]),
("contrib.ethosu.binary_elementwise", "MUL", [1.0, 0, 1.0, 0, 2.0, 0]),
("contrib.ethosu.binary_elementwise", "SUB", [0.0, 0, 0.0, 0, 0.0, -43]),
("contrib.ethosu.binary_elementwise", "SHR", [2.0, 0, 0.0, 0, 0.00390625, -128]),
("reshape", None, [None, None, None, None, None, None]),
]
def get_attr_value(op, attr_name):
if hasattr(op.attrs, attr_name):
return op.attrs[attr_name]
else:
return None
def get_op_type(op):
if hasattr(op.attrs, "pooling_type"):
return op.attrs.pooling_type
elif hasattr(op.attrs, "operator_type"):
return op.attrs.operator_type
return None
def get_activation_params(op):
activation_params = []
activation_params.append(get_attr_value(op, "ifm_scale"))
activation_params.append(get_attr_value(op, "ifm_zero_point"))
activation_params.append(get_attr_value(op, "ifm2_scale"))
activation_params.append(get_attr_value(op, "ifm2_zero_point"))
activation_params.append(get_attr_value(op, "ofm_scale"))
activation_params.append(get_attr_value(op, "ofm_zero_point"))
return activation_params
def _visit(stmt):
if isinstance(stmt, relay.expr.Call):
ops.append(stmt)
relay.analysis.post_order_visit(out_op, _visit)
# check IFM
ifm = ops[0].args[0].checked_type
assert list(ifm.shape) == list(ifm_shape)
assert str(ifm.dtype) == dtype
# check OFM
ofm = out_op.checked_type
assert list(ofm.shape) == list(ifm_shape)
assert ofm.dtype == dtype
# check operations
for op, expected_op_params in zip(ops, expected_ops_params):
activation_params = get_activation_params(op)
expected_op_name, expected_op_type, expected_activation_params = expected_op_params
assert op.op.name == expected_op_name
assert expected_op_type == get_op_type(op)
for activation_param, expected_activation_param in zip(
activation_params, expected_activation_params
):
if isinstance(activation_param, float):
assert math.isclose(expected_activation_param, activation_param, abs_tol=1e-7)
else:
assert expected_activation_param == activation_param
softmax_pattern_table = [
(
ethosu.SoftMaxParams.composite_name,
ethosu.softmax_pattern(),
lambda pat: ethosu.SoftMaxParams(pat).is_valid(),
)
]
tflite_graph = create_tflite_graph()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
mod, params = relay.frontend.from_tflite(
tflite_model,
shape_dict={"input": ifm_shape},
dtype_dict={"input": dtype},
)
mod["main"] = bind_params_by_name(mod["main"], params)
mod = partition_ethosu_by_table(mod, softmax_pattern_table)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
legalize.SoftmaxRewriter(), mod["tvmgen_default_ethos_u_main_0"]
)
mod = relay.transform.InferType()(mod)
verify(mod["tvmgen_default_ethos_u_main_0"])
@pytest.mark.parametrize("ifm_shape", [(1, 55, 55, 3)])
@pytest.mark.parametrize("kernel_shape", [(3, 3)])
@pytest.mark.parametrize("strides, dilation", [((1, 1), (1, 1))])
@pytest.mark.parametrize("op_padding", ["SAME", "VALID"])
@pytest.mark.parametrize("sep_padding", [(0, 0, 1, 1), (7, 5, 4, 5)])
@pytest.mark.parametrize(
"op_pairs", [("conv2d", "conv2d"), ("depthwise", "depthwise"), ("conv2d", "depthwise")]
)
def test_tflite_shared_pad_legalize(
ifm_shape,
kernel_shape,
strides,
dilation,
op_padding,
sep_padding,
op_pairs,
):
dtype = "int8"
def create_tflite_graph():
class Model(tf.Module):
@tf.function
def tf_function(self, x):
def make_depthwise_or_conv2d(pair_idx):
if op_pairs[pair_idx] == "depthwise":
weight_shape = [kernel_shape[0], kernel_shape[1], ifm_shape[3], 1]
weight = tf.constant(np.random.uniform(size=weight_shape), dtype=tf.float32)
return tf.nn.depthwise_conv2d(
x, weight, strides=tf_strides, padding=op_padding, dilations=dilation
)
weight_shape = [kernel_shape[0], kernel_shape[1], ifm_shape[3], 3]
weight = tf.constant(np.random.uniform(size=weight_shape), dtype=tf.float32)
return tf.nn.conv2d(
x,
weight,
strides=tf_strides,
padding=op_padding,
dilations=dilation,
)
x = tf.pad(
x,
[
[0, 0],
[sep_padding[0], sep_padding[2]],
[sep_padding[1], sep_padding[3]],
[0, 0],
],
"CONSTANT",
)
# The input strides to the TensorFlow API needs to be of shape 1x4
tf_strides = [1, strides[0], strides[1], 1]
x1 = make_depthwise_or_conv2d(0)
x2 = make_depthwise_or_conv2d(1)
x3 = tf.math.add(x1, x2)
return x3
model = Model()
concrete_func = model.tf_function.get_concrete_function(
tf.TensorSpec(ifm_shape, dtype=tf.float32)
)
# Convert the model
def representative_dataset():
for _ in range(100):
data = np.random.rand(*tuple(ifm_shape))
yield [data.astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model = converter.convert()
return tflite_model
conv2d_pattern_table = [
(
ethosu.QnnConv2DParams.composite_name,
ethosu.qnn_conv2d_pattern(),
lambda pat: ethosu.QnnConv2DParams(pat).is_valid(),
),
(
ethosu.QnnDepthwiseConv2DParams.composite_name,
ethosu.qnn_depthwise_conv2d_pattern(),
lambda pat: ethosu.QnnDepthwiseConv2DParams(pat).is_valid(),
),
]
tflite_graph = create_tflite_graph()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
mod, params = relay.frontend.from_tflite(
tflite_model,
shape_dict={"input": ifm_shape},
dtype_dict={"input": dtype},
)
mod["main"] = bind_params_by_name(mod["main"], params)
mod = partition_ethosu_by_table(mod, conv2d_pattern_table)
mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
[legalize.Conv2DRewriter(), legalize.DepthwiseConv2DRewriter()],
mod["tvmgen_default_ethos_u_main_0"],
)
mod["tvmgen_default_ethos_u_main_1"] = dataflow_pattern.rewrite(
[legalize.Conv2DRewriter(), legalize.DepthwiseConv2DRewriter()],
mod["tvmgen_default_ethos_u_main_1"],
)
if op_pairs[0] == "depthwise":
assert (
mod["tvmgen_default_ethos_u_main_0"].body.op.name == "contrib.ethosu.depthwise_conv2d"
)
else:
assert mod["tvmgen_default_ethos_u_main_0"].body.op.name == "contrib.ethosu.conv2d"
if op_pairs[1] == "depthwise":
assert (
mod["tvmgen_default_ethos_u_main_1"].body.op.name == "contrib.ethosu.depthwise_conv2d"
)
else:
assert mod["tvmgen_default_ethos_u_main_1"].body.op.name == "contrib.ethosu.conv2d"
if __name__ == "__main__":
tvm.testing.main()
| 142,954 | 36.511152 | 117 | py |
tvm | tvm-main/tests/python/contrib/test_ethosu/test_codegen.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
import pytest
pytest.importorskip("ethosu.vela")
import numpy as np
import tflite.Model
import tvm
import tensorflow as tf
from tvm import relay
from tvm.relay.backend.contrib.ethosu import util
from tvm.relay.op.contrib.ethosu import partition_for_ethosu
from tvm.testing.aot import generate_ref_data
from . import infra
ACCEL_TYPES = ["ethos-u55-256", "ethos-u55-128", "ethos-u55-64", "ethos-u55-32", "ethos-u65-256"]
def is_u55_accel_type(accel_type):
return "u55" in accel_type
@pytest.mark.parametrize("accel_type", ACCEL_TYPES + ["ethos-u65-512"])
@pytest.mark.parametrize("ifm_shape", [(1, 299, 299, 2), (1, 55, 55, 3)])
@pytest.mark.parametrize("kernel_shape", [(3, 2), (1, 3)])
@pytest.mark.parametrize("strides, dilation", [((1, 1), (2, 1)), ((3, 2), (1, 1))])
@pytest.mark.parametrize("padding", ["SAME", "VALID"])
@pytest.mark.parametrize("activation", ["NONE", "RELU"])
def test_ethosu_conv2d_single(
ifm_shape,
kernel_shape,
strides,
dilation,
padding,
accel_type,
activation,
):
np.random.seed(0)
@tf.function
def conv2d(x):
# Use tf.nn API to create the model
tf_strides = [1, strides[0], strides[1], 1]
op = tf.nn.conv2d(
x,
filters=tf.constant(
np.random.uniform(size=[kernel_shape[0], kernel_shape[1], ifm_shape[3], 3]),
dtype=tf.float32,
),
strides=tf_strides,
padding=padding,
dilations=dilation,
)
if activation == "RELU":
op = tf.nn.relu(op)
return op
infra.compare_tvm_with_tflite(conv2d, [ifm_shape], accel_type)
def test_tflite_conv2d_with_separate_pad():
np.random.seed(0)
ifm_shape = (1, 55, 34, 3)
kernel_shape = (3, 2)
strides = (1, 1)
dilation = (2, 1)
padding = (0, 0, 1, 1)
@tf.function
def conv2d(x):
tf_strides = [1, strides[0], strides[1], 1]
op = tf.pad(
x,
[[0, 0], [padding[0], padding[2]], [padding[1], padding[3]], [0, 0]],
"CONSTANT",
)
weight_shape = [kernel_shape[0], kernel_shape[1], ifm_shape[3], 3]
weight = tf.constant(np.random.uniform(size=weight_shape), dtype=tf.float32)
return tf.nn.conv2d(
op,
weight,
strides=tf_strides,
padding="VALID",
dilations=dilation,
)
infra.compare_tvm_with_tflite(conv2d, [ifm_shape], "ethos-u55-256")
@pytest.mark.parametrize("ifm_shape", [(1, 214, 227, 2), (1, 27, 42, 3)])
@pytest.mark.parametrize("kernel_shape", [(3, 2), (1, 3)])
@pytest.mark.parametrize("strides, dilation", [((1, 1), (2, 1)), ((3, 2), (1, 1))])
@pytest.mark.parametrize("padding", ["SAME", "VALID"])
@pytest.mark.parametrize("accel_type", ACCEL_TYPES + ["ethos-u65-512"])
@pytest.mark.parametrize("activation", ["NONE", "RELU"])
def test_ethosu_conv2d_double(
ifm_shape,
kernel_shape,
strides,
dilation,
padding,
accel_type,
activation,
):
np.random.seed(0)
@tf.function
def conv2d_double(x):
# Use tf.nn API to create the model with two convolutions
op = tf.nn.conv2d(
x,
filters=tf.constant(
np.random.uniform(size=[kernel_shape[0], kernel_shape[1], ifm_shape[3], 5]),
dtype=tf.float32,
),
strides=strides,
padding=padding,
dilations=dilation,
)
# Second convolution
op2 = tf.nn.conv2d(
op,
filters=tf.constant(
np.random.uniform(size=(kernel_shape[0], kernel_shape[1], 5, 3)),
dtype=tf.float32,
),
strides=strides,
padding=padding,
dilations=dilation,
)
if activation == "RELU":
op2 = tf.nn.relu(op2)
return op2
infra.compare_tvm_with_tflite(conv2d_double, [ifm_shape], accel_type)
@pytest.mark.parametrize("accel_type", ACCEL_TYPES)
@pytest.mark.parametrize(
"op_pairs", [("conv2d", "conv2d"), ("depthwise", "depthwise"), ("conv2d", "depthwise")]
)
def test_tflite_shared_pad(
accel_type,
op_pairs,
):
np.random.seed(0)
ifm_shape = (1, 55, 32, 3)
kernel_shape = (3, 3)
strides = (3, 2)
dilation = (1, 1)
activation_function = "RELU"
op_padding = "SAME"
sep_padding = (0, 0, 1, 1)
@tf.function
def tf_function(x):
def make_depthwise_or_conv2d(pair_idx, x):
# The input strides to the TensorFlow API needs to be of shape 1x4
tf_strides = [1, strides[0], strides[1], 1]
if op_pairs[pair_idx] == "depthwise":
weight_shape = [kernel_shape[0], kernel_shape[1], ifm_shape[3], 1]
weight = tf.constant(np.random.uniform(size=weight_shape), dtype=tf.float32)
op = tf.nn.depthwise_conv2d(
x, weight, strides=tf_strides, padding=op_padding, dilations=dilation
)
else:
weight_shape = [kernel_shape[0], kernel_shape[1], ifm_shape[3], 3]
weight = tf.constant(np.random.uniform(size=weight_shape), dtype=tf.float32)
op = tf.nn.conv2d(
x,
weight,
strides=tf_strides,
padding=op_padding,
dilations=dilation,
)
if activation_function == "RELU":
op = tf.nn.relu(op)
return op
x = tf.pad(
x,
[
[0, 0],
[sep_padding[0], sep_padding[2]],
[sep_padding[1], sep_padding[3]],
[0, 0],
],
"CONSTANT",
)
x1 = make_depthwise_or_conv2d(0, x)
x2 = make_depthwise_or_conv2d(1, x)
x3 = tf.math.add(x1, x2)
return x3
infra.compare_tvm_with_tflite(tf_function, [ifm_shape], accel_type)
@pytest.mark.parametrize("weight_min, weight_max", [(0.0, 1e-11), (-1e10, 1e10)])
def test_out_of_range_scaling(weight_min, weight_max):
np.random.seed(0)
ifm_shape = (1, 6, 6, 2)
strides = (1, 1)
kernel_shape = (1, 1)
dilation = (1, 1)
padding = "SAME"
activation = "RELU"
accel_type = "ethos-u55-128"
@tf.function
def conv_invalid_scale(x):
# Use tf.nn API to create the model
tf_strides = [1, strides[0], strides[1], 1]
weights = np.random.uniform(size=[kernel_shape[0], kernel_shape[1], 2, 2])
# Overwrite to force quantization that produces out of range shift values
weights[0][0][0][0] = weight_min
weights[0][0][1][0] = weight_max
op = tf.nn.conv2d(
x,
filters=tf.constant(
weights,
dtype=tf.float32,
),
strides=tf_strides,
padding=padding,
dilations=dilation,
)
if activation == "RELU":
op = tf.nn.relu(op)
return op
infra.compare_tvm_with_tflite(conv_invalid_scale, [ifm_shape], accel_type)
@pytest.mark.parametrize("accel_type", ACCEL_TYPES)
@pytest.mark.parametrize("ifm_shape", [(1, 55, 55, 3), (1, 23, 32, 7)])
@pytest.mark.parametrize(
"kernel_shape, activation_function",
[((3, 3), "RELU"), ((1, 2), "NONE")],
)
@pytest.mark.parametrize("padding", ["SAME", "VALID"])
@pytest.mark.parametrize("strides, dilation", [((1, 1), (2, 2)), ((3, 2), (1, 1))])
def test_tflite_depthwise_conv2d(
accel_type,
ifm_shape,
kernel_shape,
padding,
strides,
dilation,
activation_function,
):
np.random.seed(0)
@tf.function
def depthwise_conv2d(x):
weight_shape = [kernel_shape[0], kernel_shape[1], ifm_shape[3], 1]
weight = tf.constant(np.random.uniform(size=weight_shape), dtype=tf.float32)
# The input strides to the TensorFlow API needs to be of shape 1x4
tf_strides = [1, strides[0], strides[1], 1]
op = tf.nn.depthwise_conv2d(
x, weight, strides=tf_strides, padding=padding, dilations=dilation
)
if activation_function == "RELU":
op = tf.nn.relu(op)
return op
infra.compare_tvm_with_tflite(depthwise_conv2d, [ifm_shape], accel_type)
def test_tflite_depthwise_conv2d_with_separate_pad():
np.random.seed(0)
ifm_shape = (1, 23, 32, 7)
kernel_shape = (1, 2)
strides = (3, 2)
dilation = (1, 1)
padding = (0, 0, 1, 1)
@tf.function
def depthwise_conv2d(x):
tf_strides = [1, strides[0], strides[1], 1]
op = tf.pad(
x,
[[0, 0], [padding[0], padding[2]], [padding[1], padding[3]], [0, 0]],
"CONSTANT",
)
weight_shape = [kernel_shape[0], kernel_shape[1], ifm_shape[3], 1]
weight = tf.constant(np.random.uniform(size=weight_shape), dtype=tf.float32)
return tf.nn.depthwise_conv2d(
op,
weight,
strides=tf_strides,
padding="VALID",
dilations=dilation,
)
infra.compare_tvm_with_tflite(depthwise_conv2d, [ifm_shape], "ethos-u55-256")
@pytest.mark.parametrize("ifm_shape", [(1, 55, 55, 3), (1, 23, 32, 7)])
@pytest.mark.parametrize("padding", [(0, 1, 0, 0), (1, 1, 1, 1), (1, 1, 5, 5)])
@pytest.mark.parametrize("const_value", [0, 5, 125, -5])
def test_tflite_separate_pad(
ifm_shape,
padding,
const_value,
):
np.random.seed(0)
@tf.function
def pad2d(x):
return tf.pad(
x,
[[0, 0], [padding[0], padding[2]], [padding[1], padding[3]], [0, 0]],
"CONSTANT",
const_value,
)
infra.compare_tvm_with_tflite(pad2d, [ifm_shape], "ethos-u55-256")
@pytest.mark.parametrize("ifm_shape", [(1, 55, 55, 3), (1, 23, 32, 7)])
@pytest.mark.parametrize("channel_padding", [(0, 1), (1, 1), (5, 2)])
@pytest.mark.parametrize("const_value", [0, 5, 125, -5])
def test_tflite_separate_channel_pad(
ifm_shape,
channel_padding,
const_value,
):
np.random.seed(0)
@tf.function
def concat_func(x):
x = tf.pad(
x,
[[0, 0], [0, 0], [0, 0], [channel_padding[0], channel_padding[1]]],
"CONSTANT",
const_value,
)
return x
infra.compare_tvm_with_tflite(concat_func, [ifm_shape], "ethos-u55-256", enable_cascader=False)
@pytest.mark.parametrize(
"accel_type",
ACCEL_TYPES,
)
@pytest.mark.parametrize("pooling_type", ["MAX", "AVG"])
@pytest.mark.parametrize("ifm_shape", [[1, 3, 4, 3], [1, 4, 5, 2]])
@pytest.mark.parametrize(
"pool_shape, strides, activation_function, padding",
[([1, 2], [1, 2], "NONE", "SAME"), ([2, 3], [2, 3], "RELU", "VALID")],
)
def test_ethosu_pooling(
accel_type,
ifm_shape,
pooling_type,
strides,
pool_shape,
activation_function,
padding,
):
np.random.seed(0)
@tf.function
def pooling(x):
if pooling_type == "MAX":
op = tf.nn.max_pool(x, pool_shape, strides, padding)
elif pooling_type == "AVG":
op = tf.nn.avg_pool(x, pool_shape, strides, padding)
if activation_function == "RELU":
op = tf.nn.relu(op)
return op
infra.compare_tvm_with_tflite(pooling, [ifm_shape], accel_type)
@pytest.mark.parametrize(
"accel_type",
ACCEL_TYPES,
)
@pytest.mark.parametrize("pooling_type", ["MAX", "AVG"])
@pytest.mark.parametrize(
"ifm_shape, pool_shape, strides, activation_function, padding",
[
([1, 4, 4, 3], [4, 4], [4, 4], "NONE", "SAME"),
([1, 4, 4, 3], [4, 4], [4, 4], "RELU", "VALID"),
([1, 25, 5, 64], [25, 5], [25, 5], "NONE", "VALID"),
([1, 25, 5, 64], [25, 5], [25, 5], "RELU", "SAME"),
],
)
def test_ethosu_pooling_same_ifm_and_kernel_shape(
accel_type, pooling_type, ifm_shape, pool_shape, strides, activation_function, padding
):
np.random.seed(0)
@tf.function
def pooling(x):
if pooling_type == "MAX":
op = tf.nn.max_pool(x, pool_shape, strides, padding)
elif pooling_type == "AVG":
op = tf.nn.avg_pool(x, pool_shape, strides, padding)
if activation_function == "RELU":
op = tf.nn.relu(op)
return op
infra.compare_tvm_with_tflite(pooling, [ifm_shape], accel_type)
@pytest.mark.parametrize(
"accel_type",
["ethos-u55-256", "ethos-u65-256"],
)
@pytest.mark.parametrize("ifm_shape", [[1, 148, 29], [4, 148, 29], [1, 12], [8, 12]])
def test_ethosu_softmax(
accel_type,
ifm_shape,
):
np.random.seed(0)
@tf.function
def softmax(x):
return tf.nn.softmax(x)
infra.compare_tvm_with_tflite(softmax, [ifm_shape], accel_type, ranges=[(-1, 1)])
@pytest.mark.parametrize("accel_type", ACCEL_TYPES)
@pytest.mark.parametrize("operator_type", ["ADD", "SUB", "MUL", "MIN", "MAX"])
@pytest.mark.parametrize(
"ifm_shape, ifm2_shape",
[
([1, 2, 3, 4], [1, 2, 3, 4]),
([1, 2, 3, 4], [1, 1, 1, 1]),
([1, 1, 1, 1], [1, 2, 3, 4]),
([1, 4, 4], [4, 1]),
],
)
@pytest.mark.parametrize("activation_function", ["NONE", "RELU"])
def test_ethosu_binary_elementwise(
accel_type,
operator_type,
ifm_shape,
ifm2_shape,
activation_function,
):
np.random.seed(0)
@tf.function
def binary_elementwise(lhs, rhs):
if operator_type == "ADD":
op = tf.math.add(lhs, rhs)
elif operator_type == "SUB":
op = tf.math.subtract(lhs, rhs)
elif operator_type == "MUL":
op = tf.math.multiply(lhs, rhs)
elif operator_type == "MIN":
op = tf.math.minimum(lhs, rhs)
elif operator_type == "MAX":
op = tf.math.maximum(lhs, rhs)
if activation_function == "RELU":
op = tf.nn.relu(op)
return op
infra.compare_tvm_with_tflite(
binary_elementwise,
shapes=[ifm_shape, ifm2_shape],
ranges=[(0, 1), (0, 2)],
accel_type=accel_type,
enable_cascader=is_u55_accel_type(accel_type),
)
@pytest.mark.parametrize("accel_type", ACCEL_TYPES)
@pytest.mark.parametrize(
"ifm_shape, ifm2_shape",
[
([4], [4]),
([4], [1, 2, 3, 4]),
([1, 4, 4], [4, 1]),
],
)
def test_binary_add_with_non_4d_shapes(
request,
accel_type,
ifm_shape,
ifm2_shape,
):
np.random.seed(0)
@tf.function
def binary_elementwise(lhs, rhs):
return tf.math.add(lhs, rhs)
infra.compare_tvm_with_tflite(
binary_elementwise,
shapes=[ifm_shape, ifm2_shape],
ranges=[(0, 1), (0, 2)],
accel_type=accel_type,
enable_cascader=is_u55_accel_type(accel_type),
)
@pytest.mark.parametrize(
"accel_type",
ACCEL_TYPES,
)
@pytest.mark.parametrize(
"ifm_shape, axis, keep_dims, use_same_quantization, dtype",
[
# mean to average pool
[(1, 8, 16, 16), (2,), False, True, "int8"],
[(1, 8, 16, 16), (2,), False, True, "uint8"],
[(3, 3, 4), (0,), True, True, "int8"],
[(8, 5), (0,), False, True, "int8"],
# mean to depthwise
[(1, 8, 16, 16), (2,), True, False, "int8"],
[(1, 8, 16, 16), (2,), True, False, "uint8"],
[(1, 8, 16, 16), (2, 1), False, False, "int8"],
[(8, 4), (0,), False, False, "int8"],
[(1, 65, 2, 1), (1, 2), True, False, "int8"], # special case when h > 64
[(1, 65, 2, 1), (1, 2), True, False, "uint8"], # special case when h > 64
],
)
def test_mean(accel_type, ifm_shape, axis, keep_dims, use_same_quantization, dtype):
np.random.seed(0)
def create_mod_from_tflite():
class Model(tf.Module):
@tf.function
def tf_function(self, x):
op = tf.math.reduce_mean(x, axis=axis, keepdims=keep_dims)
return op
model = Model()
concrete_func = model.tf_function.get_concrete_function(
tf.TensorSpec(ifm_shape, dtype=tf.float32)
)
# Convert the model
def representative_dataset():
for _ in range(100):
data = np.random.rand(*tuple(ifm_shape))
yield [data.astype(np.float32)]
converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_graph = converter.convert()
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
mod, _ = relay.frontend.from_tflite(
tflite_model,
shape_dict={"ifm": ifm_shape},
dtype_dict={"ifm": dtype},
)
input_data, output_data = infra.generate_ref_data_tflite(tflite_graph)
return mod, input_data, output_data
def create_mod_from_relay():
ifm = relay.var("input", shape=ifm_shape, dtype=dtype)
cast = relay.cast(ifm, dtype="int32")
mean = relay.mean(cast, axis=axis, keepdims=keep_dims)
requantize = relay.qnn.op.requantize(
mean,
input_scale=relay.const(1.0, dtype="float32"),
input_zero_point=relay.const(0, dtype="int32"),
output_scale=relay.const(1.0, dtype="float32"),
output_zero_point=relay.const(0, dtype="int32"),
out_dtype=dtype,
)
func = relay.Function(relay.analysis.free_vars(requantize), requantize)
mod = tvm.IRModule.from_expr(func)
low, high = (0, 256) if dtype == "uint8" else (-127, 128)
input_data = {"input": np.random.randint(low=low, high=high, size=ifm_shape, dtype=dtype)}
output_data = generate_ref_data(mod, input_data)
return mod, input_data, output_data
mod, input_data, output_data = (
create_mod_from_relay() if use_same_quantization else create_mod_from_tflite()
)
mod = partition_for_ethosu(mod)
test_runner = infra.create_test_runner(accel_type)
compiled_models = infra.build_source(mod, input_data, output_data, test_runner)
# Assumes only two runtime.Modules are created -- i.e. single offload module
ethosu_module = compiled_models[0].executor_factory.lib.imported_modules[0].imported_modules[0]
# Verify generated C source
get_artifacts = tvm._ffi.get_global_func("runtime.module.ethos-u.get_artifacts")
compilation_artifacts = get_artifacts(ethosu_module)
cmms = bytes.fromhex(compilation_artifacts[0].command_stream)
infra.print_payload(cmms)
infra.verify_source(compiled_models, test_runner)
@pytest.mark.parametrize(
"accel_type",
ACCEL_TYPES,
)
@pytest.mark.parametrize(
"ifm_shape, axis, keepdims, relu",
[
[(1, 4, 2, 8), 3, False, False],
[(1, 4, 4, 1), 3, False, True],
[(3, 5, 7), 2, False, True],
[(1, 4, 2, 8), 3, True, False],
[(3, 5, 7), 2, True, False],
],
)
def test_ethosu_sum(accel_type, ifm_shape, axis, keepdims, relu):
np.random.seed(0)
@tf.function
def sum_func(x):
op = tf.math.reduce_sum(x, axis=axis, keepdims=keepdims)
return tf.nn.relu(op) if relu else op
infra.compare_tvm_with_tflite(
sum_func,
[ifm_shape],
accel_type,
enable_cascader=is_u55_accel_type(accel_type),
)
# Case to check reduce_sum operation with different input types.
@pytest.mark.parametrize("dtype", ["int8", "int32"])
def test_add_reduce_sum(dtype):
ifm_shape = (1, 2, 2, 4)
accel_type = "ethos-u55-256"
np.random.seed(0)
def create_model():
ifm = relay.var("ifm", shape=ifm_shape, dtype=dtype)
ifm2 = relay.var("ifm2", shape=ifm_shape, dtype=dtype)
ifm_scale = 0.0 if dtype == "int32" else 1.0
op = infra.make_ethosu_binary_elementwise(
ifm,
ifm2,
ifm_shape[3],
ifm_shape[3],
"ADD",
dtype,
ifm_scale=ifm_scale,
ifm2_scale=ifm_scale,
)
op = infra.make_ethosu_pooling(
ifm=op,
pooling_type="SUM",
pool_shape=(1, 1),
ofm_channels=1,
ofm_dtype="int32",
strides=(1, 1),
padding=(0, 0, 0, 0),
rounding_mode="NATURAL",
)
return tvm.IRModule.from_expr(relay.Function([ifm, ifm2], op))
def generate_output_data(input_data):
lhs = input_data["ifm"]
rhs = input_data["ifm2"]
# reduce_sum output type is int32.
output_dtype = "int32"
add = lhs + rhs
return [np.sum(add, axis=3).astype(output_dtype)]
cpu_mod = create_model()
# Generate reference data
in_min, in_max = -10, 19
lhs = np.random.randint(in_min, in_max, size=ifm_shape, dtype=dtype)
rhs = np.random.randint(in_min, in_max, size=ifm_shape, dtype=dtype)
input_data = {
"ifm": lhs,
"ifm2": rhs,
}
output_data = {"output": generate_output_data(input_data)[0]}
ethosu_mod = infra.create_ethosu_partition(cpu_mod)
infra.compare_ethosu_with_reference(ethosu_mod, input_data, output_data, accel_type)
@pytest.mark.parametrize("accel_type", ACCEL_TYPES)
@pytest.mark.parametrize("dtype", ["int8", "uint8"])
@pytest.mark.parametrize("constant", [np.ones((1, 1, 1, 1)), np.array(1)])
def test_elementwise_add_from_constant_scalar(accel_type, dtype, constant):
np.random.seed(0)
ifm_shape = (1, 4, 4, 8)
def create_relay_graph():
inp = relay.var("input", shape=ifm_shape, dtype=dtype)
scalar = relay.const(constant, dtype=dtype)
add = relay.qnn.op.add(
inp,
scalar,
relay.const(1.0, dtype="float32"),
relay.const(0, dtype="int32"),
relay.const(1.0, dtype="float32"),
relay.const(0, dtype="int32"),
relay.const(1.0, dtype="float32"),
relay.const(0, dtype="int32"),
)
return tvm.IRModule.from_expr(relay.Function(relay.analysis.free_vars(add), add))
cpu_mod = create_relay_graph()
ethosu_mod = partition_for_ethosu(cpu_mod)
# Generate reference data
input_data = {
"input": np.random.randint(
low=np.iinfo(dtype).min, high=np.iinfo(dtype).max, size=ifm_shape, dtype=dtype
),
}
output_data = generate_ref_data(cpu_mod, input_data)
# Scalar constants are not supported by the cascader
infra.compare_ethosu_with_reference(
ethosu_mod, input_data, output_data, accel_type, enable_cascader=False
)
@pytest.mark.parametrize("accel_type", ACCEL_TYPES)
@pytest.mark.parametrize(
"ifm_shape, ifm2_shape",
[
([1, 2, 3, 4], [1, 2, 3, 4]),
([1, 2, 3, 4], [1, 1, 3, 1]),
([1, 1, 3, 1], [1, 2, 3, 4]),
],
)
def test_ethosu_left_shift_binary_elemwise(
accel_type,
ifm_shape,
ifm2_shape,
):
np.random.seed(0)
dtype = "int32"
def create_model():
ifm = relay.var("ifm", shape=ifm_shape, dtype=dtype)
ifm2 = relay.var("ifm2", shape=ifm2_shape, dtype=dtype)
c1 = relay.left_shift(ifm, ifm2)
return tvm.IRModule.from_expr(relay.Function([ifm, ifm2], c1))
cpu_mod = create_model()
# Generate reference data
in_min, in_max = util.get_range_for_dtype_str(dtype)
input_data = {
"ifm": np.random.randint(in_min, high=in_max, size=ifm_shape, dtype=dtype),
"ifm2": np.random.randint(0, high=32, size=ifm2_shape, dtype=dtype),
}
output_data = generate_ref_data(cpu_mod, input_data)
ethosu_mod = partition_for_ethosu(cpu_mod)
infra.compare_ethosu_with_reference(ethosu_mod, input_data, output_data, accel_type)
@pytest.mark.parametrize("accel_type", ACCEL_TYPES)
@pytest.mark.parametrize(
"ifm_shape, ifm2_shape, reversed_operands, ofm_dtype",
[
([1, 2, 3, 4], [1, 2, 3, 4], False, "int8"),
([1, 2, 3, 1], [1, 1, 3, 1], False, "int32"),
([1, 1, 3, 1], [1, 2, 3, 1], True, "int32"),
],
)
def test_ethosu_right_shift_binary_elemwise(
ifm_shape, ifm2_shape, reversed_operands, accel_type, ofm_dtype
):
np.random.seed(0)
dtype = "int32"
def create_model():
ifm = relay.var("ifm", shape=ifm_shape, dtype=dtype)
ifm2 = relay.var("ifm2", shape=ifm2_shape, dtype=dtype)
shr_op = infra.make_ethosu_binary_elementwise(
ifm, ifm2, ifm_shape[3], ifm2_shape[3], "SHR", ofm_dtype, reversed_operands
)
return tvm.IRModule.from_expr(relay.Function([ifm, ifm2], shr_op))
def generate_output_data(input_data):
lhs = input_data["ifm"]
rhs = input_data["ifm2"]
if reversed_operands:
lhs = np.broadcast_to(lhs, ifm2_shape)
lhs, rhs = rhs, lhs
else:
rhs = np.broadcast_to(rhs, ifm_shape)
def rounding_right_shift(lhs, rhs):
r = 1 << (rhs - 1)
return (lhs + r) >> rhs
return [
np.array([rounding_right_shift(x[0], x[1]) for x in zip(lhs.flat, rhs.flat)]).astype(
ofm_dtype
)
]
cpu_mod = create_model()
# Generate reference data
in_min, in_max = util.get_range_for_dtype_str(dtype)
in_min, in_max = 18, 19
lhs = np.random.randint(in_min, high=in_max, size=ifm_shape, dtype=dtype)
rhs = np.random.randint(1, high=2, size=ifm2_shape, dtype=dtype)
input_data = {
"ifm": lhs,
"ifm2": rhs,
}
output_data = {"output": generate_output_data(input_data)[0]}
ethosu_mod = infra.create_ethosu_partition(cpu_mod)
infra.compare_ethosu_with_reference(ethosu_mod, input_data, output_data, accel_type)
@pytest.mark.parametrize("accel_type", ["ethos-u55-256", "ethos-u65-256"])
@pytest.mark.parametrize(
"ifm_shape, ifm2_shape, scale, shift, dtype",
[
([1, 1, 1, 16], [1, 1, 1, 16], 5, 2, "int8"),
([1, 2, 3, 1], [1, 1, 3, 1], 2, 1, "int8"),
([1, 5, 1, 8], [1, 1, 1, 8], 1, 2, "int32"),
],
)
def test_ethosu_rescale_mul_binary_elemwise(ifm_shape, ifm2_shape, scale, shift, accel_type, dtype):
np.random.seed(0)
def create_model():
ifm = relay.var("ifm", shape=ifm_shape, dtype=dtype)
ifm2 = relay.var("ifm2", shape=ifm2_shape, dtype=dtype)
rescale_mul_op = infra.make_ethosu_binary_elementwise(
ifm,
ifm2,
ifm_shape[3],
ifm2_shape[3],
"MUL",
dtype,
use_rescale=True,
rescale_scale=scale,
rescale_shift=shift,
)
return tvm.IRModule.from_expr(relay.Function([ifm, ifm2], rescale_mul_op))
def generate_output_data(input_data):
lhs = input_data["ifm"]
rhs = input_data["ifm2"]
rhs = np.broadcast_to(rhs, ifm_shape)
def rounding_right_shift(lhs, shift):
r = 1 << (shift - 1)
return (lhs + r) >> shift
def apply_scale(lhs, scale):
if dtype == "int32":
# For 32-bit operations scale is not applied but shift is
return lhs
else:
return lhs * scale
return [
rounding_right_shift(
apply_scale(np.multiply(lhs.astype("int32"), rhs.astype("int32")), scale), shift
).astype(dtype)
]
cpu_mod = create_model()
# Generate reference data
lhs = np.random.randint(low=-10, high=15, size=ifm_shape, dtype=dtype)
rhs = np.random.randint(low=1, high=5, size=ifm2_shape, dtype=dtype)
input_data = {
"ifm": lhs,
"ifm2": rhs,
}
output_data = {"output": generate_output_data(input_data)[0]}
ethosu_mod = infra.create_ethosu_partition(cpu_mod)
infra.compare_ethosu_with_reference(ethosu_mod, input_data, output_data, accel_type)
@pytest.mark.parametrize("accel_type", ACCEL_TYPES)
@pytest.mark.parametrize("ifm_shape", [(3, 2), (1, 15, 11, 7), (3, 1, 12), (400,)])
@pytest.mark.parametrize("ifm_scale, ifm_zp, ofm_scale, ofm_zp", [(1, 0, 1, 0), (0.015, 3, 0.2, 5)])
def test_ethosu_identity_codegen(
request, ifm_shape, ifm_scale, ifm_zp, ofm_scale, ofm_zp, accel_type
):
np.random.seed(0)
def create_model():
ifm = relay.var("ifm", shape=ifm_shape, dtype="int8")
identity = infra.make_ethosu_identity(
ifm,
ifm_scale=ifm_scale,
ifm_zero_point=ifm_zp,
ofm_scale=ofm_scale,
ofm_zero_point=ofm_zp,
)
return tvm.IRModule.from_expr(relay.Function([ifm], identity))
def generate_output_data(input_data):
requant_data = (ifm_scale * (input_data["ifm"] - ifm_zp)) / ofm_scale + ofm_zp
return [np.round(np.clip(requant_data, -128, 127)).astype("int8")]
cpu_mod = create_model()
input_data = {"ifm": np.random.randint(-120, high=120, size=ifm_shape, dtype="int8")}
output_data = {"output": generate_output_data(input_data)[0]}
ethosu_mod = infra.create_ethosu_partition(cpu_mod)
infra.compare_ethosu_with_reference(
ethosu_mod,
input_data,
output_data,
accel_type,
output_tolerance=1,
enable_cascader=is_u55_accel_type(accel_type),
)
@pytest.mark.parametrize("accel_type", ACCEL_TYPES)
@pytest.mark.parametrize(
"ifm_shape, new_shape",
[
((1, 4, 1, 2), (1, 1, 1, 8)),
((12, 20), (1, 6, 4, 10)),
((12, 20), (6, 4, 10)),
((20,), (4, 5)),
((12, 2, 10), (0, -3)),
((11, 3, 25), (-1,)),
((8, 7, 3), (-4, 1, 8, -2)),
],
)
def test_relay_reshape_codegen(ifm_shape, new_shape, accel_type):
np.random.seed(0)
def create_model():
ifm = relay.var("ifm", shape=ifm_shape, dtype="int8")
reshape = relay.op.reshape(ifm, newshape=new_shape)
return tvm.IRModule.from_expr(relay.Function([ifm], reshape))
cpu_mod = create_model()
input_data = {"ifm": np.random.randint(-128, high=127, size=ifm_shape, dtype="int8")}
output_data = generate_ref_data(cpu_mod, input_data)
ethosu_mod = infra.create_ethosu_partition(cpu_mod)
infra.compare_ethosu_with_reference(
ethosu_mod,
input_data,
output_data,
accel_type,
enable_cascader=is_u55_accel_type(accel_type),
)
@pytest.mark.parametrize("accel_type", ACCEL_TYPES)
@pytest.mark.parametrize(
"ifm_shape, begin, size",
[
([1, 10, 50, 4], [0, 5, 11, 2], [1, 5, 11, 1]),
([15, 17, 3], [3, 0, 1], [8, 17, 2]),
([7, 6043], [0, 704], [1, 2860]),
([5000], [123], [2151]),
],
)
def test_tflite_slice(request, accel_type, ifm_shape, begin, size):
np.random.seed(0)
@tf.function
def slice_func(x):
return tf.slice(x, begin, size)
infra.compare_tvm_with_tflite(
slice_func, [ifm_shape], accel_type, enable_cascader=is_u55_accel_type(accel_type)
)
@pytest.mark.parametrize("accel_type", ACCEL_TYPES)
@pytest.mark.parametrize(
"ifm_shape, begin, end",
[([1, 1, 5, 8], [0, 0, 0, 0], [1, 1, 2, 3]), ([1, 3, 3], [0, 1, 2], [1, 2, 3])],
)
def test_tflite_strided_slice(accel_type, ifm_shape, begin, end):
np.random.seed(0)
@tf.function
def strided_slice_func(x):
return tf.strided_slice(x, begin, end)
infra.compare_tvm_with_tflite(
strided_slice_func, [ifm_shape], accel_type, enable_cascader=is_u55_accel_type(accel_type)
)
@pytest.mark.parametrize("accel_type", ACCEL_TYPES)
@pytest.mark.parametrize("operator_type", ["ABS"])
@pytest.mark.parametrize(
"ifm_shape",
[[1, 5, 12, 4], [1, 1, 2], [4, 3, 2], [10, 20], [345]],
)
def test_ethosu_unary_elementwise(
request,
accel_type,
operator_type,
ifm_shape,
):
np.random.seed(0)
@tf.function
def abs_func(x):
if operator_type == "ABS":
op = tf.math.abs(x)
return op
infra.compare_tvm_with_tflite(
abs_func,
[ifm_shape],
accel_type,
enable_cascader=is_u55_accel_type(accel_type),
)
def test_ethosu_section_name():
np.random.seed(0)
@tf.function
def depthwise_conv2d(x):
weight_shape = [3, 3, 3, 1]
weight = tf.constant(np.random.uniform(size=weight_shape), dtype=tf.float32)
tf_strides = [1, 1, 1, 1]
op = tf.nn.depthwise_conv2d(x, weight, strides=tf_strides, padding="SAME", dilations=(2, 2))
return op
mod, tflite_graph = infra.get_tflite_graph(depthwise_conv2d, [(1, 55, 55, 3)])
# Generate reference data
input_data, output_data = infra.generate_ref_data_tflite(tflite_graph)
test_runner = infra.create_test_runner()
compiled_models = infra.build_source(mod, input_data, output_data, test_runner)
# Assumes only two runtime.Modules are created -- i.e. single offload module
ethosu_module = compiled_models[0].executor_factory.lib.imported_modules[0].imported_modules[0]
# Verify generated C source
source = ethosu_module.get_source()
assert (
'__attribute__((section(".rodata.tvm"), aligned(16))) static int8_t tvmgen_default_ethos_u_main_0_cms_data_data'
in source
)
assert (
'__attribute__((section(".rodata.tvm"), aligned(16))) static int8_t tvmgen_default_ethos_u_main_0_weights'
in source
)
@pytest.mark.parametrize("accel_type", ACCEL_TYPES)
def test_ethosu_clz(accel_type):
np.random.seed(0)
ifm_shape = (1, 42, 5, 4)
def create_model():
ifm = relay.var("ifm", shape=ifm_shape, dtype="int32")
clz = infra.make_ethosu_unary_elementwise(ifm, 4, "CLZ")
return tvm.IRModule.from_expr(relay.Function([ifm], clz))
def generate_output_data(input_data):
def clz_comp(n):
n_bin = np.binary_repr(n)
if n_bin[0] == "-":
return 0
else:
return 32 - len(n_bin)
return [
np.array([clz_comp(i) for i in input_data["ifm"].ravel()])
.reshape(ifm_shape)
.astype("int32")
]
cpu_mod = create_model()
input_data = {"ifm": np.random.randint(-500000, high=500000, size=ifm_shape, dtype="int32")}
output_data = {"output": generate_output_data(input_data)[0]}
ethosu_mod = infra.create_ethosu_partition(cpu_mod)
infra.compare_ethosu_with_reference(ethosu_mod, input_data, output_data, accel_type)
@pytest.mark.parametrize("accel_type", ACCEL_TYPES)
def test_tflite_tanh(accel_type):
np.random.seed(0)
ifm_shape = [1, 115, 32, 7]
@tf.function
def tanh_func(x):
op = tf.nn.tanh(x)
return op
infra.compare_tvm_with_tflite(
tanh_func, [ifm_shape], accel_type, enable_cascader=is_u55_accel_type(accel_type)
)
@pytest.mark.parametrize("accel_type", ACCEL_TYPES)
@pytest.mark.parametrize("ifm_shape", [(1, 5, 5, 3), (1, 12, 9, 1)])
def test_tflite_hard_swish(accel_type, ifm_shape):
np.random.seed(0)
@tf.function
def hard_swish_func(x):
op = tf.keras.layers.Lambda(
lambda x: x * tf.keras.activations.relu(x + 3.0, max_value=6.0) / 6.0
)(x)
return op
infra.compare_tvm_with_tflite(hard_swish_func, [ifm_shape], accel_type, ranges=[(-1, 1)])
@pytest.mark.parametrize("accel_type", ACCEL_TYPES)
@pytest.mark.parametrize(
"shapes, axis",
[
([(2, 3), (4, 3)], 0),
([(3, 2, 1), (3, 1, 1)], 1),
([(10,), (13,), (14,)], 0),
([(1, 5, 2, 1), (1, 5, 7, 1), (1, 5, 3, 1)], 2),
],
)
def test_tflite_concat(shapes, axis, accel_type):
np.random.seed(0)
@tf.function
def concat_func(*inputs):
op = tf.concat(list(inputs), axis)
return op
infra.compare_tvm_with_tflite(concat_func, shapes, accel_type, enable_cascader=False)
@pytest.mark.parametrize("accel_type", ACCEL_TYPES)
def test_tflite_sigmoid(accel_type):
np.random.seed(0)
ifm_shape = [1, 135, 41, 6]
@tf.function
def sigmoid_function(x):
op = tf.nn.sigmoid(x)
return op
infra.compare_tvm_with_tflite(
sigmoid_function, [ifm_shape], accel_type, enable_cascader=is_u55_accel_type(accel_type)
)
# This codegen test checks both, split and split_v
@pytest.mark.parametrize("accel_type", ACCEL_TYPES)
@pytest.mark.parametrize(
"ifm_shape, num_or_size_splits, axis",
[
((1, 4, 6, 8), (1, 3, 4), 3),
((4, 6, 8), 2, 0),
((50,), 25, 0),
((5, 11), 1, 1),
((13,), (13,), 0),
((22, 7), (4, -1), 1),
],
)
def test_tflite_split(accel_type, ifm_shape, num_or_size_splits, axis):
np.random.seed(0)
@tf.function
def split_func(x):
op = tf.split(x, num_or_size_splits, axis=axis)
return op
infra.compare_tvm_with_tflite(split_func, [ifm_shape], accel_type, enable_cascader=False)
@pytest.mark.parametrize("accel_type", ACCEL_TYPES)
@pytest.mark.parametrize(
"ifm_shape,ifm_scale,ifm_zp,ofm_scale,ofm_zp",
[
[(1, 8, 8, 3), 1.0, 0, 1.0, 0],
[(1, 20, 30, 3), 1.345, 34, 0.32, -23],
],
)
def test_ethosu_requantize(accel_type, ifm_shape, ifm_scale, ifm_zp, ofm_scale, ofm_zp):
np.random.seed(0)
dtype = "int8"
def create_model():
ifm = relay.var("ifm", shape=ifm_shape, dtype="int8")
requantize = relay.qnn.op.requantize(
ifm,
relay.const(ifm_scale, dtype="float32"),
relay.const(ifm_zp, dtype="int32"),
relay.const(ofm_scale, dtype="float32"),
relay.const(ofm_zp, dtype="int32"),
)
return tvm.IRModule.from_expr(relay.Function([ifm], requantize))
cpu_mod = create_model()
input_data = {"ifm": np.random.randint(-128, high=127, size=ifm_shape, dtype=dtype)}
output_data = generate_ref_data(cpu_mod, input_data)
ethosu_mod = partition_for_ethosu(cpu_mod)
infra.compare_ethosu_with_reference(
ethosu_mod,
input_data,
output_data,
accel_type,
enable_cascader=is_u55_accel_type(accel_type),
)
@pytest.mark.parametrize("accel_type", ACCEL_TYPES)
@pytest.mark.parametrize("ifm_shape,axis", [((2,), 0), ((1, 3, 3), 2)])
def test_tflite_expand_dims(accel_type, ifm_shape, axis):
np.random.seed(0)
@tf.function
def expand_dims_func(x):
return tf.expand_dims(x, axis=axis)
infra.compare_tvm_with_tflite(
expand_dims_func, [ifm_shape], accel_type, enable_cascader=is_u55_accel_type(accel_type)
)
@pytest.mark.parametrize("accel_type", ACCEL_TYPES)
@pytest.mark.parametrize(
"ifm_shape,axis", [((1, 1, 2, 1), 0), ((1, 3, 3, 1), 3), ((1, 1, 2, 1), None)]
)
def test_tflite_squeeze(accel_type, ifm_shape, axis):
np.random.seed(0)
@tf.function
def squeeze_func(x):
return tf.squeeze(x, axis=axis)
infra.compare_tvm_with_tflite(
squeeze_func, [ifm_shape], accel_type, enable_cascader=is_u55_accel_type(accel_type)
)
@pytest.mark.parametrize("accel_type", ACCEL_TYPES)
@pytest.mark.parametrize(
"ifm_shape,size,half_pixel",
[
[(1, 2, 2, 1), (4, 4), False],
[(1, 2, 2, 1), (4, 4), True],
[(1, 4, 7, 3), (8, 14), False],
[(1, 3, 5, 3), (3, 5), False],
[(1, 6, 6, 96), (12, 12), False],
[(1, 6, 6, 96), (12, 12), True],
],
)
def test_tflite_resize2d_nearest_neighbor(accel_type, ifm_shape, size, half_pixel):
np.random.seed(0)
align_corners = False
@tf.function
def resize_model(x):
return tf.compat.v1.image.resize_nearest_neighbor(
x,
size,
align_corners=align_corners,
half_pixel_centers=half_pixel,
)
infra.compare_tvm_with_tflite(
resize_model, [ifm_shape], accel_type, enable_cascader=is_u55_accel_type(accel_type)
)
@pytest.mark.parametrize("accel_type", ACCEL_TYPES)
@pytest.mark.parametrize(
"ifm_shape,size,align_corners",
[
[(1, 2, 2, 1), (4, 4), False],
[(1, 4, 7, 3), (8, 14), False],
[(1, 2, 2, 1), (3, 3), True],
[(1, 4, 7, 3), (7, 13), True],
[(1, 3, 5, 3), (3, 5), False],
],
)
def test_tflite_resize2d_bilinear(accel_type, ifm_shape, size, align_corners):
np.random.seed(0)
@tf.function
def resize_model(x):
return tf.compat.v1.image.resize_bilinear(
x, size, align_corners=align_corners, half_pixel_centers=False
)
infra.compare_tvm_with_tflite(
resize_model, [ifm_shape], accel_type, enable_cascader=is_u55_accel_type(accel_type)
)
@pytest.mark.parametrize("accel_type", ACCEL_TYPES)
@pytest.mark.parametrize(
"ifm_shape,ofm_shape,kernel_shape,padding",
[
[(1, 2, 2, 1), (1, 4, 4, 1), (3, 3), "SAME"],
[(1, 2, 2, 1), (1, 9, 9, 1), (7, 7), "VALID"],
[(1, 2, 4, 3), (1, 4, 8, 3), (5, 3), "SAME"],
[(1, 10, 5, 3), (1, 21, 13, 3), (3, 5), "VALID"],
],
)
@pytest.mark.parametrize("has_bias", [False, True])
def test_tflite_transpose_convolution(
accel_type, ifm_shape, ofm_shape, kernel_shape, padding, has_bias
):
np.random.seed(0)
dilations = (1, 1)
strides = (2, 2)
@tf.function
def conv2d_transpose(x):
weight_shape = [kernel_shape[0], kernel_shape[1], ifm_shape[3], ofm_shape[3]]
weight = tf.constant(np.random.uniform(size=weight_shape), dtype=tf.float32)
bias_shape = ofm_shape[3]
bias = tf.constant(np.random.uniform(size=bias_shape), dtype=tf.float32)
tf_strides = [1, strides[0], strides[1], 1]
op = tf.nn.conv2d_transpose(
x,
weight,
output_shape=ofm_shape,
strides=tf_strides,
padding=padding,
dilations=dilations,
)
if has_bias:
op = tf.nn.bias_add(op, bias)
return op
infra.compare_tvm_with_tflite(
conv2d_transpose,
[ifm_shape],
accel_type=accel_type,
enable_cascader=is_u55_accel_type(accel_type),
)
@pytest.mark.parametrize("accel_type", ACCEL_TYPES)
@pytest.mark.parametrize(
"ifm_shapes,axis",
[
([(1, 2, 2), (1, 2, 2), (1, 2, 2)], 2),
([(5, 4), (5, 4)], 1),
([(1,), (1,)], 0),
([(3, 1), (3, 1), (3, 1), (3, 1)], 0),
],
)
def test_tflite_pack(accel_type, ifm_shapes, axis):
np.random.seed(0)
@tf.function
def pack_func(*inputs):
return tf.stack(inputs, axis=axis)
infra.compare_tvm_with_tflite(pack_func, ifm_shapes, accel_type, enable_cascader=False)
@pytest.mark.parametrize("accel_type", ACCEL_TYPES)
@pytest.mark.parametrize(
"ifm_shape,axis",
[[(1, 2, 3, 4), 1], [(2, 3), 1], [(5, 6, 7), 2]],
)
def test_tflite_unpack(accel_type, ifm_shape, axis):
np.random.seed(0)
@tf.function
def unpack_func(x):
return tf.unstack(x, axis=axis)
infra.compare_tvm_with_tflite(unpack_func, [ifm_shape], accel_type, enable_cascader=False)
@pytest.mark.parametrize("accel_type", ACCEL_TYPES)
@pytest.mark.parametrize("ifm_shape", [(1, 15, 15, 3), (1, 8, 9, 1)])
@pytest.mark.parametrize("alpha", [0.2, 0.634])
def test_tflite_leaky_relu(accel_type, ifm_shape, alpha):
np.random.seed(0)
@tf.function
def leaky_relu_func(x):
return tf.nn.leaky_relu(x, alpha=alpha)
infra.compare_tvm_with_tflite(
leaky_relu_func,
[ifm_shape],
accel_type,
enable_cascader=is_u55_accel_type(accel_type),
ranges=[(-1, 1)],
)
# conv2d + relu_n1_to_1 is used because separate activation is not offloaded to NPU.
def test_tflite_relu_n1_to_1():
np.random.seed(0)
accel_type = "ethos-u55-256"
ifm_shape = (1, 55, 34, 3)
kernel_shape = (3, 2)
strides = (1, 1)
@tf.function
def conv2d_relu_n1_to_1(x):
tf_strides = [1, strides[0], strides[1], 1]
weight_shape = [kernel_shape[0], kernel_shape[1], ifm_shape[3], 3]
weight = tf.constant(np.random.uniform(size=weight_shape), dtype=tf.float32)
op = tf.nn.conv2d(
x,
weight,
strides=tf_strides,
padding="VALID",
)
# The specific pattern will be replaced into RELU_N1_TO_1 by tflite.
return tf.math.maximum(-1.0, tf.math.minimum(op, 1.0))
infra.compare_tvm_with_tflite(
conv2d_relu_n1_to_1,
[ifm_shape],
accel_type,
enable_cascader=True,
)
# conv2d + relu6 is used because separate activation is not offloaded to NPU.
def test_tflite_relu6():
np.random.seed(0)
accel_type = "ethos-u55-256"
ifm_shape = (1, 55, 34, 3)
kernel_shape = (3, 2)
strides = (1, 1)
@tf.function
def conv2d_relu6(x):
tf_strides = [1, strides[0], strides[1], 1]
weight_shape = [kernel_shape[0], kernel_shape[1], ifm_shape[3], 3]
weight = tf.constant(np.random.uniform(size=weight_shape), dtype=tf.float32)
op = tf.nn.conv2d(
x,
weight,
strides=tf_strides,
padding="VALID",
)
return tf.nn.relu6(op)
infra.compare_tvm_with_tflite(
conv2d_relu6,
[ifm_shape],
accel_type,
enable_cascader=True,
)
# Specific case when operation cannot be offloaded to NPU by single binary elementwise operation because
# min and max operations cannot be fused with requantize if there are different scales as it's not supported on NPU.
@pytest.mark.parametrize("operation", [tf.math.minimum, tf.math.maximum])
def test_tflite_min_max_relu_n1_to_1(operation):
np.random.seed(0)
accel_type = "ethos-u55-128"
ifm_shape = (1, 12, 16, 8)
@tf.function
def min_max_relu_n1_to_1(lhs, rhs):
op = operation(lhs, rhs)
# The specific pattern will be replaced into RELU_N1_TO_1 by tflite.
return tf.math.maximum(-1.0, tf.math.minimum(op, 1.0))
infra.compare_tvm_with_tflite(
min_max_relu_n1_to_1,
[ifm_shape, ifm_shape],
accel_type,
enable_cascader=True,
ranges=[(-1, 1), (0, 2)],
)
@pytest.mark.parametrize("accel_type", ACCEL_TYPES)
@pytest.mark.parametrize("ifm_shape", [(1, 14), (1, 151)])
@pytest.mark.parametrize("ofm_channels", [32, 64])
@pytest.mark.parametrize("use_bias", [True, False])
@pytest.mark.parametrize("activation_function", ["RELU", "NONE"])
def test_tflite_fully_connected(
accel_type,
ifm_shape,
ofm_channels,
use_bias,
activation_function,
):
np.random.seed(0)
@tf.function
def fully_connected(x):
bias_shape = ofm_channels
bias = tf.constant(np.random.uniform(size=bias_shape), dtype=tf.float32)
w = tf.constant(
np.random.uniform(size=[ifm_shape[1], ofm_channels]),
dtype=tf.float32,
)
x = tf.matmul(x, w)
if use_bias:
x = tf.nn.bias_add(x, bias)
if activation_function:
x = tf.nn.relu(x)
return x
infra.compare_tvm_with_tflite(
fully_connected, [ifm_shape], accel_type, enable_cascader=is_u55_accel_type(accel_type)
)
@pytest.mark.parametrize("accel_type", ["ethos-u55-256", "ethos-u65-256"])
def test_tflite_subtract_sigmoid(accel_type):
np.random.seed(0)
ifm_shape = [1, 6, 8, 4]
@tf.function
def subtract_sigmoid_function(lhs, rhs):
op = tf.math.subtract(lhs, rhs)
op = tf.nn.sigmoid(op)
return op
infra.compare_tvm_with_tflite(
subtract_sigmoid_function,
[ifm_shape, ifm_shape],
accel_type,
enable_cascader=is_u55_accel_type(accel_type),
)
if __name__ == "__main__":
tvm.testing.main()
| 48,728 | 30.03758 | 120 | py |
tvm | tvm-main/tests/python/contrib/test_uma/test_uma_pipeline.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
pytest.importorskip("tflite")
pytest.importorskip("tensorflow")
import os
import tensorflow as tf
from tvm.micro.testing.aot_test_utils import AOT_DEFAULT_RUNNER
from tvm.relay import transform, testing
from tvm.testing.aot import (
AOTTestModel,
AOTTestRunner,
generate_ref_data,
compile_and_run,
create_relay_module_and_inputs_from_tflite_file,
)
import tvm
from test_uma_vanilla_accelerator import VanillaAcceleratorBackend
from tvm import relay
import numpy as np
from collections import OrderedDict
from tvm.relay.backend.contrib.uma.api.utils import uma_available
pytestmark = pytest.mark.skipif(not uma_available(), reason="UMA not available")
@pytest.mark.parametrize(
"interface_api,use_unpacked_api,test_runner,groups,weight_shape",
[("c", True, AOT_DEFAULT_RUNNER, 1, 32)],
)
def test_conv2d(interface_api, use_unpacked_api, test_runner, groups, weight_shape):
"""Test a subgraph with a single conv2d operator."""
mod, inputs, output_list, test_runner = create_conv2d(groups, test_runner, weight_shape)
uma_backend = VanillaAcceleratorBackend()
uma_backend.register()
mod = uma_backend.partition(mod)
target = tvm.target.Target("vanilla_accelerator", host=tvm.target.Target("c"))
compile_and_run(
AOTTestModel(module=mod, inputs=inputs, outputs=output_list),
test_runner,
interface_api,
use_unpacked_api,
target=target,
)
def create_conv2d(groups=1, test_runner=AOT_DEFAULT_RUNNER, weight_shape=32):
dtype = "float32"
ishape = (1, 32, 14, 14)
wshape = (32, weight_shape, 3, 3)
pass_config = {"tir.usmp.enable": True}
test_runner = AOTTestRunner(
makefile=test_runner.makefile,
prologue=test_runner.prologue,
epilogue=test_runner.epilogue,
includes=test_runner.includes,
parameters=test_runner.parameters,
pass_config=pass_config,
)
data0 = relay.var("data", shape=ishape, dtype=dtype)
weight0 = relay.var("weight", shape=wshape, dtype=dtype)
out = relay.nn.conv2d(data0, weight0, kernel_size=(3, 3), padding=(1, 1), groups=groups)
main_f = relay.Function([data0, weight0], out)
mod = tvm.IRModule()
mod["main"] = main_f
mod = transform.InferType()(mod)
i_data = np.random.uniform(0, 1, ishape).astype(dtype)
w1_data = np.random.uniform(0, 1, wshape).astype(dtype)
inputs = OrderedDict([("data", i_data), ("weight", w1_data)])
output_list = generate_ref_data(mod, inputs)
return mod, inputs, output_list, test_runner
def _generate_runtime_data(input_shapes: dict, output_shapes: dict) -> [OrderedDict, OrderedDict]:
assert len(input_shapes) == 1
assert len(output_shapes) == 1
iname = list(input_shapes.keys())[0]
oname = list(output_shapes.keys())[0]
ishape = input_shapes[iname]
oshape = output_shapes[oname]
i_data = np.random.uniform(0, 1, ishape).astype("float32")
o_data = np.random.uniform(0, 1, oshape).astype("float32")
oname = "output" # name set by relay.build in executor_codegen_metadata.outputs
inputs = OrderedDict([(iname, i_data)])
outputs = OrderedDict([(oname, o_data)])
return inputs, outputs
def test_mobilenet():
"""Full network test with Mobilenet"""
use_unpacked_api = True
interface_api = "c"
test_runner = AOT_DEFAULT_RUNNER
mod, params = testing.mobilenet.get_workload(batch_size=1)
uma_backend = VanillaAcceleratorBackend()
uma_backend.register()
target = tvm.target.Target("vanilla_accelerator", host=tvm.target.Target("c"))
target_c = tvm.target.Target("c")
data_shape = [int(x) for x in mod["main"].checked_type.arg_types[0].shape]
data = np.random.uniform(size=data_shape).astype("float32")
input_list = {"data": data}
output_list = generate_ref_data(mod, input_list, params)
mod = uma_backend.partition(mod)
aot_test_model = AOTTestModel(module=mod, inputs=input_list, outputs=output_list, params=params)
compile_and_run(
aot_test_model,
test_runner,
interface_api,
use_unpacked_api,
workspace_byte_alignment=1,
debug_calculated_workspaces=False,
target=[target_c, target],
)
def test_tflite_model():
"""
End-to-end test of TF-Lite file using UMA
"""
tflite_file = "/tmp/model.tflite"
if os.path.exists(tflite_file):
os.remove(tflite_file)
generate_tflite_file(tflite_file)
pytest.importorskip("tflite")
interpreter = tf.lite.Interpreter(model_path=tflite_file)
tf_model_details = interpreter.get_input_details()
mod, _, params = create_relay_module_and_inputs_from_tflite_file(
tflite_file, bind_params_by_name=False
)
uma_backend = VanillaAcceleratorBackend()
uma_backend.register()
target = tvm.target.Target("vanilla_accelerator", host=tvm.target.Target("c"))
target_c = tvm.target.Target("c")
# Generation of test input and output
data_shape = [int(x) for x in mod["main"].params[0].type_annotation.shape]
data = np.random.uniform(size=data_shape).astype("float32")
input_list = {str(tf_model_details[0]["name"]): data}
output_list = generate_ref_data(mod, input_list, params)
# UMA partitioning (needs to be done after generate_ref_data)
mod = uma_backend.partition(mod)
aot_test_model = AOTTestModel(module=mod, inputs=input_list, outputs=output_list, params=params)
test_runner = AOTTestRunner(
pass_config={"tir.usmp.enable": True, "tir.usmp.algorithm": "greedy_by_size"}
)
compile_and_run(
aot_test_model,
test_runner,
interface_api="c",
use_unpacked_api=True,
workspace_byte_alignment=1,
debug_calculated_workspaces=False,
target=[target_c, target],
)
def generate_tflite_file(tflite_filename):
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
x_train, x_test = x_train.reshape(-1, 28, 28, 1), x_test.reshape(-1, 28, 28, 1)
tf_model = tf.keras.models.Sequential(
[
tf.keras.Input(shape=(28, 28, 1)),
tf.keras.layers.Conv2D(4, (3, 3), padding="same", activation="relu"),
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(32, activation="relu"),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10),
]
)
output = tf_model(x_train[:1])
output = output.numpy()
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
loss(y_train[:1], output).numpy()
tf_model.compile(metrics=["accuracy"], optimizer="adam", loss=loss)
tf_model.fit(x_train, y_train, epochs=1)
tflite_converter = tf.lite.TFLiteConverter.from_keras_model(tf_model)
tflite_model = tflite_converter.convert()
with open(tflite_filename, "wb") as f:
f.write(tflite_model)
if __name__ == "__main__":
tvm.testing.main()
| 7,831 | 34.762557 | 100 | py |
tvm | tvm-main/tests/python/topi/python/test_topi_image.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test code for bilinear scale """
import numpy as np
import tvm
from tvm import te
from tvm import topi
import tvm.testing
import tvm.topi.testing
from tvm.contrib.pickle_memoize import memoize
def verify_resize2d(
batch,
in_channel,
in_height,
in_width,
out_height,
out_width,
layout="NCHW",
coord_trans="align_corners",
method="linear",
):
if layout == "NCHW":
A = te.placeholder((batch, in_channel, in_height, in_width), name="A", dtype="float32")
dtype = A.dtype
out_shape = (batch, in_channel, out_height, out_width)
a_np = np.random.uniform(size=(batch, in_channel, in_height, in_width)).astype(dtype)
elif layout == "NHWC":
A = te.placeholder((batch, in_height, in_width, in_channel), name="A", dtype="float32")
dtype = A.dtype
out_shape = (batch, out_height, out_width, in_channel)
a_np = np.random.uniform(size=(batch, in_height, in_width, in_channel)).astype(dtype)
else:
raise NotImplementedError("Layout not supported {} ".format(layout))
B = topi.image.resize2d(
A,
[0.0] * 4,
(out_height, out_width),
layout=layout,
coordinate_transformation_mode=coord_trans,
method=method,
)
scale_h = out_height / in_height
scale_w = out_width / in_width
b_np = tvm.topi.testing.resize2d_python(a_np, (scale_h, scale_w), layout, method, coord_trans)
def check_target(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(B)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(np.zeros(out_shape, dtype=dtype), dev)
f = tvm.build(s, [A, B], target)
f(a, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-3, atol=1e-3)
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
@tvm.testing.uses_gpu
def test_resize2d():
# Scale NCHW
verify_resize2d(4, 16, 32, 32, 50, 50, "NCHW")
# Scale NCHW + Align Corners
verify_resize2d(6, 32, 64, 64, 20, 20, "NCHW")
# Scale NHWC
verify_resize2d(4, 16, 32, 32, 50, 50, "NHWC")
# Scale NHWC + Align Corners
verify_resize2d(6, 32, 64, 64, 20, 20, "NHWC")
for layout in ["NCHW", "NHWC"]:
verify_resize2d(4, 16, 32, 32, 50, 50, layout, "asymmetric", method="nearest_neighbor")
verify_resize2d(4, 16, 32, 32, 64, 50, layout, "asymmetric", method="nearest_neighbor")
verify_resize2d(4, 16, 32, 32, 50, 96, layout, "asymmetric", method="nearest_neighbor")
verify_resize2d(4, 16, 32, 32, 96, 96, layout, "asymmetric", method="nearest_neighbor")
verify_resize2d(4, 16, 32, 32, 50, 50, layout, "align_corners", method="nearest_neighbor")
verify_resize2d(4, 16, 32, 32, 50, 50, layout, "half_pixel", method="nearest_neighbor")
verify_resize2d(4, 16, 32, 32, 50, 50, layout, "asymmetric", method="linear")
verify_resize2d(4, 16, 32, 32, 50, 50, layout, "half_pixel", method="linear")
def verify_resize3d(
batch,
in_channel,
in_depth,
in_height,
in_width,
out_depth,
out_height,
out_width,
layout="NCDHW",
coordinate_transformation_mode="asymmetric",
method="linear",
):
if layout == "NCDHW":
A = te.placeholder(
(batch, in_channel, in_depth, in_height, in_width), name="A", dtype="float32"
)
dtype = A.dtype
out_shape = (batch, in_channel, out_depth, out_height, out_width)
a_np = np.random.uniform(size=(batch, in_channel, in_depth, in_height, in_width)).astype(
dtype
)
elif layout == "NDHWC":
A = te.placeholder(
(batch, in_depth, in_height, in_width, in_channel), name="A", dtype="float32"
)
dtype = A.dtype
out_shape = (batch, out_depth, out_height, out_width, in_channel)
a_np = np.random.uniform(size=(batch, in_depth, in_height, in_width, in_channel)).astype(
dtype
)
else:
raise NotImplementedError("Layout not supported {} ".format(layout))
B = topi.image.resize3d(
A,
[0.0] * 6,
(out_depth, out_height, out_width),
layout=layout,
coordinate_transformation_mode=coordinate_transformation_mode,
method=method,
)
scale_d = out_depth / in_depth
scale_h = out_height / in_height
scale_w = out_width / in_width
b_np = tvm.topi.testing.resize3d_python(
a_np, (scale_d, scale_h, scale_w), layout, method, coordinate_transformation_mode
)
def check_target(target, dev):
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(B)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(np.zeros(out_shape, dtype=dtype), dev)
f = tvm.build(s, [A, B], target)
f(a, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-3, atol=1e-3)
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
@tvm.testing.uses_gpu
def test_resize3d():
# Trilinear
for method in ["nearest_neighbor", "linear"]:
for coord_trans in ["asymmetric", "align_corners", "half_pixel"]:
for layout in ["NCDHW", "NDHWC"]:
verify_resize3d(3, 16, 32, 32, 32, 10, 10, 10, layout, coord_trans, method)
@tvm.testing.uses_gpu
def test_crop_and_resize():
def verify_crop_and_resize(
image_shape,
np_boxes,
np_box_indices,
np_crop_size,
layout="NHWC",
method="bilinear",
extrapolation_value=0.0,
):
images = te.placeholder(image_shape, name="images", dtype="float32")
np_images = np.random.uniform(size=image_shape).astype("float32")
boxes = te.placeholder(np_boxes.shape, name="boxes", dtype="float32")
box_ind = te.placeholder(np_box_indices.shape, name="box_ind", dtype="int32")
batch = len(np_box_indices)
target_height, target_width = np_crop_size[0], np_crop_size[1]
if layout == "NHWC":
channel = image_shape[3]
out_shape = (batch, target_height, target_width, channel)
elif layout == "NCHW":
channel = image_shape[1]
out_shape = (batch, channel, target_height, target_width)
else:
raise NotImplementedError("Layout {} is not supported.".format(layout))
out = topi.image.crop_and_resize(
images,
boxes,
box_ind,
np_crop_size,
layout=layout,
method=method,
extrapolation_value=extrapolation_value,
)
baseline_np = tvm.topi.testing.crop_and_resize_python(
np_images, np_boxes, np_box_indices, np_crop_size, layout, method, extrapolation_value
)
def check_target(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(out)
tvm_images = tvm.nd.array(np_images, dev)
tvm_boxes = tvm.nd.array(np_boxes, dev)
tvm_indices = tvm.nd.array(np_box_indices, dev)
tvm_out = tvm.nd.array(np.zeros(out_shape, dtype="float32"), dev)
f = tvm.build(s, [images, boxes, box_ind, out], target, name="crop_and_resize")
f(tvm_images, tvm_boxes, tvm_indices, tvm_out)
tvm.testing.assert_allclose(tvm_out.numpy(), baseline_np, rtol=1e-3, atol=1e-3)
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
boxes_1 = np.array([[0.2, 0.3, 0.7, 0.9]], dtype="float32")
boxes_2 = np.array([[0.2, 0.3, 0.7, 0.9], [0, 0.1, 0.8, 1]], dtype="float32")
indices_1 = np.array([0], dtype="int32")
indices_2 = np.array([1, 0], dtype="int32")
size_1 = (7, 11)
size_2 = (90, 60)
verify_crop_and_resize((1, 255, 255, 3), boxes_1, indices_1, size_1, layout="NHWC")
verify_crop_and_resize(
(10, 224, 224, 5), boxes_2, indices_2, size_2, extrapolation_value=0.3, layout="NHWC"
)
verify_crop_and_resize((1, 100, 100, 3), boxes_1, indices_1, size_1, method="nearest_neighbor")
verify_crop_and_resize((1, 3, 224, 224), boxes_1, indices_1, size_1, layout="NCHW")
@tvm.testing.uses_gpu
def test_affine_grid():
def verify_affine_grid(num_batch, target_shape):
dtype = "float32"
data_shape = (num_batch, 2, 3)
data = te.placeholder(data_shape, dtype=dtype)
out = topi.image.affine_grid(data, target_shape)
@memoize("topi.tests.test_affine_grid.verify_affine_grid")
def get_ref_data():
data_np = np.random.uniform(size=data_shape).astype(dtype)
out_np = tvm.topi.testing.affine_grid_python(data_np, target_shape)
return data_np, out_np
data_np, out_np = get_ref_data()
def check_target(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(out)
tvm_data = tvm.nd.array(data_np, dev)
tvm_out = tvm.nd.empty(out_np.shape, dtype, dev)
f = tvm.build(s, [data, out], target)
f(tvm_data, tvm_out)
tvm.testing.assert_allclose(tvm_out.numpy(), out_np, rtol=1e-5, atol=1e-5)
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
verify_affine_grid(1, (16, 32))
verify_affine_grid(4, (16, 32))
@tvm.testing.uses_gpu
def test_grid_sample():
def verify_grid_sample(
data_shape,
grid_shape,
method="bilinear",
layout="NCHW",
padding_mode="zeros",
align_corners=True,
):
dtype = "float32"
data = te.placeholder(data_shape, dtype=dtype)
grid = te.placeholder(grid_shape, dtype=dtype)
out = topi.image.grid_sample(data, grid, method, layout, padding_mode, align_corners)
@memoize("topi.tests.test_grid_sample.verify_grid_sample")
def get_ref_data():
data_np = np.random.uniform(size=data_shape).astype(dtype)
# allow grid values to be out-of-bound
grid_np = np.random.uniform(size=grid_shape, low=-1.5, high=1.5).astype(dtype)
out_np = tvm.topi.testing.grid_sample_python(
data_np, grid_np, method, layout, padding_mode, align_corners
)
return data_np, grid_np, out_np
data_np, grid_np, out_np = get_ref_data()
def check_target(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(out)
tvm_data = tvm.nd.array(data_np, dev)
tvm_grid = tvm.nd.array(grid_np, dev)
tvm_out = tvm.nd.empty(out_np.shape, dtype, dev)
f = tvm.build(s, [data, grid, out], target)
f(tvm_data, tvm_grid, tvm_out)
tvm.testing.assert_allclose(tvm_out.numpy(), out_np, rtol=1e-5, atol=1e-5)
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)
methods = ["nearest", "bilinear", "bicubic"]
padding_modes = ["zeros", "border", "reflection"]
align_corners = [True, False]
data_2D_shape = (4, 4, 8, 8)
grid_2D_shape = (4, 2, 16, 16)
layout_2D = "NCHW"
# choosing smaller sizes to be testable on weaker GPUs
data_3D_shape = (4, 4, 4, 4, 4)
grid_3D_shape = (4, 3, 8, 8, 8)
layout_3D = "NCDHW"
for _method in methods:
for _padding in padding_modes:
for _align in align_corners:
verify_grid_sample(
data_2D_shape, grid_2D_shape, _method, layout_2D, _padding, _align
)
# 3D "bicubic"(tricubic) is not supported in pytorch
if _method != "bicubic":
verify_grid_sample(
data_3D_shape, grid_3D_shape, _method, layout_3D, _padding, _align
)
if __name__ == "__main__":
test_resize2d()
test_resize3d()
test_crop_and_resize()
test_affine_grid()
test_grid_sample()
| 13,145 | 36.452991 | 99 | py |
tvm | tvm-main/tests/scripts/request_hook/request_hook.py | #!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import urllib.request
import logging
from urllib.parse import quote
LOGGER = None
# To update this list, run https://github.com/apache/tvm/actions/workflows/upload_ci_resource.yml
# with the URL to download and the SHA-256 hash of the file.
BASE = "https://tvm-ci-resources.s3.us-west-2.amazonaws.com"
URL_MAP = {
"http://data.mxnet.io.s3-website-us-west-1.amazonaws.com/data/val_256_q90.rec": f"{BASE}/mxnet-val_256_q90.rec",
"http://dl.caffe.berkeleyvision.org/bvlc_alexnet.caffemodel": f"{BASE}/bvlc_alexnet.caffemodel",
"http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel": f"{BASE}/bvlc_googlenet.caffemodel",
"http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224.tgz": f"{BASE}/tf-mobilenet_v1_1.0_224.tgz",
"http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v2_quantized_300x300_coco_2019_01_03.tar.gz": f"{BASE}/models/object_detection/ssd_mobilenet_v2_quantized_300x300_coco_2019_01_03.tar.gz",
"http://download.tensorflow.org/models/tflite_11_05_08/mobilenet_v2_1.0_224.tgz": f"{BASE}/models/tflite_11_05_08/mobilenet_v2_1.0_224.tgz",
"http://images.cocodataset.org/zips/val2017.zip": f"{BASE}/cocodataset-val2017.zip",
"http://pjreddie.com/media/files/alexnet.weights?raw=true": f"{BASE}/media/files/alexnet.weights"
+ quote("?raw=true"),
"http://pjreddie.com/media/files/alexnet.weights?raw=true": f"{BASE}/media/files/alexnet.weights"
+ quote("?raw=true"),
"http://pjreddie.com/media/files/extraction.weights?raw=true": f"{BASE}/media/files/extraction.weights"
+ quote("?raw=true"),
"http://pjreddie.com/media/files/extraction.weights?raw=true": f"{BASE}/media/files/extraction.weights"
+ quote("?raw=true"),
"http://pjreddie.com/media/files/resnet50.weights?raw=true": f"{BASE}/media/files/resnet50.weights"
+ quote("?raw=true"),
"http://pjreddie.com/media/files/resnext50.weights?raw=true": f"{BASE}/media/files/resnext50.weights"
+ quote("?raw=true"),
"http://pjreddie.com/media/files/yolov2.weights?raw=true": f"{BASE}/media/files/yolov2.weights"
+ quote("?raw=true"),
"http://pjreddie.com/media/files/yolov3.weights?raw=true": f"{BASE}/media/files/yolov3.weights"
+ quote("?raw=true"),
"http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz": f"{BASE}/imikolov/rnnlm/simple-examples.tgz",
"https://bj.bcebos.com/x2paddle/models/paddle_resnet50.tar": f"{BASE}/bcebos-paddle_resnet50.tar",
"https://data.deepai.org/stanfordcars.zip": f"{BASE}/deepai-stanfordcars.zip",
"https://download.pytorch.org/models/quantized/mobilenet_v2_qnnpack_37f702c5.pth": f"{BASE}/models/quantized/mobilenet_v2_qnnpack_37f702c5.pth",
"https://github.com/ARM-software/ML-zoo/blob/48f458af1e9065d9aad2ad94d24b58d6e7c00817/models/keyword_spotting/ds_cnn_small/tflite_int16/ds_cnn_quantized.tflite?raw=true": f"{BASE}/ARM-software/ML-zoo/blob/48f458af1e9065d9aad2ad94d24b58d6e7c00817/models/keyword_spotting/ds_cnn_small/tflite_int16/ds_cnn_quantized.tflite"
+ quote("?raw=true"),
"https://raw.githubusercontent.com/tlc-pack/tophub/main/tophub/adreno_v0.01.log": f"{BASE}/tlc-pack/tophub/main/tophub/adreno_v0.01.log",
"https://docs-assets.developer.apple.com/coreml/models/MobileNet.mlmodel": f"{BASE}/2022-10-05/MobileNet.mlmodel",
"https://docs-assets.developer.apple.com/coreml/models/Resnet50.mlmodel": f"{BASE}/coreml/models/Resnet50.mlmodel",
"https://download.pytorch.org/models/deeplabv3_mobilenet_v3_large-fc3c493d.pth": f"{BASE}/models/deeplabv3_mobilenet_v3_large-fc3c493d.pth",
"https://download.pytorch.org/models/deeplabv3_resnet101_coco-586e9e4e.pth": f"{BASE}/models/deeplabv3_resnet101_coco-586e9e4e.pth",
"https://download.pytorch.org/models/densenet121-a639ec97.pth": f"{BASE}/models/densenet121-a639ec97.pth",
"https://download.pytorch.org/models/efficientnet_b4_rwightman-7eb33cd5.pth": f"{BASE}/models/efficientnet_b4_rwightman-7eb33cd5.pth",
"https://download.pytorch.org/models/fcn_resnet101_coco-7ecb50ca.pth": f"{BASE}/models/fcn_resnet101_coco-7ecb50ca.pth",
"https://download.pytorch.org/models/googlenet-1378be20.pth": f"{BASE}/models/googlenet-1378be20.pth",
"https://download.pytorch.org/models/inception_v3_google-0cc3c7bd.pth": f"{BASE}/models/inception_v3_google-0cc3c7bd.pth",
"https://download.pytorch.org/models/maskrcnn_resnet50_fpn_coco-bf2d0c1e.pth": f"{BASE}/2022-10-05/maskrcnn_resnet50_fpn_coco-bf2d0c1e.pth",
"https://download.pytorch.org/models/mnasnet0.5_top1_67.823-3ffadce67e.pth": f"{BASE}/models/mnasnet0.5_top1_67.823-3ffadce67e.pth",
"https://download.pytorch.org/models/mobilenet_v2-b0353104.pth": f"{BASE}/2022-10-05/mobilenet_v2-b0353104.pth",
"https://download.pytorch.org/models/r3d_18-b3b3357e.pth": f"{BASE}/models/r3d_18-b3b3357e.pth",
"https://download.pytorch.org/models/resnet18-f37072fd.pth": f"{BASE}/2022-10-05/resnet18-f37072fd.pth",
"https://download.pytorch.org/models/resnet50-0676ba61.pth": f"{BASE}/models/resnet50-0676ba61.pth",
"https://download.pytorch.org/models/squeezenet1_0-b66bff10.pth": f"{BASE}/models/squeezenet1_0-b66bff10.pth",
"https://download.pytorch.org/models/squeezenet1_1-b8a52dc0.pth": f"{BASE}/models/squeezenet1_1-b8a52dc0.pth",
"https://download.pytorch.org/models/vgg16_features-amdegroot-88682ab5.pth": f"{BASE}/models/vgg16_features-amdegroot-88682ab5.pth",
"https://gist.github.com/zhreshold/bcda4716699ac97ea44f791c24310193/raw/93672b029103648953c4e5ad3ac3aadf346a4cdc/super_resolution_0.2.onnx": f"{BASE}/2022-10-05/super_resolution_0.2.onnx",
"https://gist.githubusercontent.com/zhreshold/4d0b62f3d01426887599d4f7ede23ee5/raw/596b27d23537e5a1b5751d2b0481ef172f58b539/imagenet1000_clsid_to_human.txt": f"{BASE}/2022-10-05/imagenet1000_clsid_to_human.txt",
"https://gist.githubusercontent.com/zhreshold/bcda4716699ac97ea44f791c24310193/raw/fa7ef0e9c9a5daea686d6473a62aacd1a5885849/cat.png": f"{BASE}/zhreshold/bcda4716699ac97ea44f791c24310193/raw/fa7ef0e9c9a5daea686d6473a62aacd1a5885849/cat.png",
"https://github.com/ARM-software/ML-zoo/raw/48a22ee22325d15d2371a6df24eb7d67e21dcc97/models/keyword_spotting/cnn_small/tflite_int8/cnn_s_quantized.tflite": f"{BASE}/ARM-software/ML-zoo/raw/48a22ee22325d15d2371a6df24eb7d67e21dcc97/models/keyword_spotting/cnn_small/tflite_int8/cnn_s_quantized.tflite",
"https://github.com/czh978/models_for_tvm_test/raw/main/tflite_graph_with_postprocess.pb": f"{BASE}/czh978/models_for_tvm_test/raw/main/tflite_graph_with_postprocess.pb",
"https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true": f"{BASE}/dmlc/mxnet.js/blob/main/data/cat.png"
+ quote("?raw=true"),
"https://github.com/dmlc/mxnet.js/raw/main/data/cat.png": f"{BASE}/dmlc/mxnet.js/raw/main/data/cat.png",
"https://github.com/dmlc/web-data/blob/main/darknet/cfg/yolov3.cfg?raw=true": f"{BASE}/dmlc/web-data/blob/main/darknet/cfg/yolov3.cfg"
+ quote("?raw=true"),
"https://github.com/dmlc/web-data/blob/main/darknet/data/arial.ttf?raw=true": f"{BASE}/dmlc/web-data/blob/main/darknet/data/arial.ttf"
+ quote("?raw=true"),
"https://github.com/dmlc/web-data/blob/main/darknet/data/coco.names?raw=true": f"{BASE}/dmlc/web-data/blob/main/darknet/data/coco.names"
+ quote("?raw=true"),
"https://github.com/dmlc/web-data/blob/main/darknet/data/dog.jpg?raw=true": f"{BASE}/dmlc/web-data/blob/main/darknet/data/dog.jpg"
+ quote("?raw=true"),
"https://github.com/dmlc/web-data/blob/main/darknet/data/dog.jpg": f"{BASE}/dog.jpg",
"https://github.com/dmlc/web-data/blob/main/darknet/data/person.jpg?raw=true": f"{BASE}/dmlc/web-data/blob/main/darknet/data/person.jpg"
+ quote("?raw=true"),
"https://github.com/dmlc/web-data/blob/main/darknet/lib/libdarknet2.0.so?raw=true": f"{BASE}/dmlc/web-data/blob/main/darknet/lib/libdarknet2.0.so"
+ quote("?raw=true"),
"https://github.com/dmlc/web-data/blob/main/gluoncv/detection/street_small.jpg?raw=true": f"{BASE}/2022-10-05/small_street_raw.jpg",
"https://github.com/dmlc/web-data/raw/main/darknet/cfg/yolov3.cfg": f"{BASE}/dmlc/web-data/raw/main/darknet/cfg/yolov3.cfg",
"https://github.com/dmlc/web-data/raw/main/darknet/data/arial.ttf": f"{BASE}/dmlc/web-data/raw/main/darknet/data/arial.ttf",
"https://github.com/dmlc/web-data/raw/main/darknet/data/coco.names": f"{BASE}/dmlc/web-data/raw/main/darknet/data/coco.names",
"https://github.com/dmlc/web-data/raw/main/darknet/data/dog.jpg": f"{BASE}/dmlc/web-data/raw/main/darknet/data/dog.jpg",
"https://github.com/dmlc/web-data/raw/main/darknet/data/person.jpg": f"{BASE}/dmlc/web-data/raw/main/darknet/data/person.jpg",
"https://github.com/dmlc/web-data/raw/main/darknet/lib/libdarknet2.0.so": f"{BASE}/dmlc/web-data/raw/main/darknet/lib/libdarknet2.0.so",
"https://github.com/dmlc/web-data/raw/main/gluoncv/detection/street_small.jpg": f"{BASE}/2022-10-05/gluon-small-stree.jpg",
"https://github.com/dmlc/web-data/raw/main/tensorflow/models/Custom/placeholder.pb": f"{BASE}/dmlc/web-data/raw/main/tensorflow/models/Custom/placeholder.pb",
"https://github.com/dmlc/web-data/raw/main/tensorflow/models/InceptionV1/classify_image_graph_def-with_shapes.pb": f"{BASE}/dmlc/web-data/raw/main/tensorflow/models/InceptionV1/classify_image_graph_def-with_shapes.pb",
"https://github.com/dmlc/web-data/raw/main/tensorflow/models/ResnetV2/resnet-20180601_resnet_v2_imagenet-shapes.pb": f"{BASE}/dmlc/web-data/raw/main/tensorflow/models/ResnetV2/resnet-20180601_resnet_v2_imagenet-shapes.pb",
"https://github.com/dmlc/web-data/raw/main/tensorflow/models/InceptionV1/elephant-299.jpg": f"{BASE}/dmlc/web-data/raw/main/tensorflow/models/InceptionV1/elephant-299.jpg",
"https://github.com/dmlc/web-data/raw/main/tensorflow/models/InceptionV1/imagenet_2012_challenge_label_map_proto.pbtxt": f"{BASE}/dmlc/web-data/raw/main/tensorflow/models/InceptionV1/imagenet_2012_challenge_label_map_proto.pbtxt",
"https://github.com/dmlc/web-data/raw/main/tensorflow/models/InceptionV1/imagenet_synset_to_human_label_map.txt": f"{BASE}/dmlc/web-data/raw/main/tensorflow/models/InceptionV1/imagenet_synset_to_human_label_map.txt",
"https://github.com/dmlc/web-data/raw/main/tensorflow/models/RNN/ptb/ptb_model_with_lstmblockcell.pb": f"{BASE}/dmlc/web-data/raw/main/tensorflow/models/RNN/ptb/ptb_model_with_lstmblockcell.pb",
"https://github.com/dmlc/web-data/raw/master/tensorflow/models/InceptionV1/elephant-299.jpg": f"{BASE}/dmlc/web-data/raw/master/tensorflow/models/InceptionV1/elephant-299.jpg",
"https://github.com/fernchen/CaffeModels/raw/master/resnet/ResNet-50-deploy.prototxt": f"{BASE}/fernchen/CaffeModels/raw/master/resnet/ResNet-50-deploy.prototxt",
"https://github.com/fernchen/CaffeModels/raw/master/resnet/ResNet-50-deploy.prototxt": f"{BASE}/fernchen/CaffeModels/raw/master/resnet/ResNet-50-deploy.prototxt",
"https://github.com/fernchen/CaffeModels/raw/master/resnet/ResNet-50-model.caffemodel": f"{BASE}/fernchen/CaffeModels/raw/master/resnet/ResNet-50-model.caffemodel",
"https://github.com/google/mediapipe/raw/v0.7.4/mediapipe/models/hand_landmark.tflite": f"{BASE}/google/mediapipe/raw/v0.7.4/mediapipe/models/hand_landmark.tflite",
"https://github.com/JonathanCMitchell/mobilenet_v2_keras/releases/download/v1.1/mobilenet_v2_weights_tf_dim_ordering_tf_kernels_0.5_224.h5": f"{BASE}/2022-10-05/mobilenet_v2_weights_tf_dim_ordering_tf_kernels_0.5_224.h5",
"https://github.com/onnx/models/raw/bd206494e8b6a27b25e5cf7199dbcdbfe9d05d1c/vision/classification/mnist/model/mnist-1.onnx": f"{BASE}/onnx/mnist-1.onnx",
"https://github.com/onnx/models/raw/bd206494e8b6a27b25e5cf7199dbcdbfe9d05d1c/vision/classification/resnet/model/resnet50-v2-7.onnx": f"{BASE}/onnx/models/raw/bd206494e8b6a27b25e5cf7199dbcdbfe9d05d1c/vision/classification/resnet/model/resnet50-v2-7.onnx",
"https://github.com/onnx/models/raw/main/vision/classification/mobilenet/model/mobilenetv2-7.onnx": f"{BASE}/onnx/models/raw/main/vision/classification/mobilenet/model/mobilenetv2-7.onnx",
"https://github.com/onnx/models/raw/main/vision/classification/resnet/model/resnet50-v2-7.onnx": f"{BASE}/2022-10-05/resnet50-v2-7.onnx",
"https://github.com/pjreddie/darknet/blob/master/cfg/alexnet.cfg?raw=true": f"{BASE}/pjreddie/darknet/blob/master/cfg/alexnet.cfg"
+ quote("?raw=true"),
"https://github.com/pjreddie/darknet/blob/master/cfg/extraction.cfg?raw=true": f"{BASE}/pjreddie/darknet/blob/master/cfg/extraction.cfg"
+ quote("?raw=true"),
"https://github.com/pjreddie/darknet/blob/master/cfg/resnet50.cfg?raw=true": f"{BASE}/pjreddie/darknet/blob/master/cfg/resnet50.cfg"
+ quote("?raw=true"),
"https://github.com/pjreddie/darknet/blob/master/cfg/resnext50.cfg?raw=true": f"{BASE}/pjreddie/darknet/blob/master/cfg/resnext50.cfg"
+ quote("?raw=true"),
"https://github.com/pjreddie/darknet/blob/master/cfg/yolov2.cfg?raw=true": f"{BASE}/pjreddie/darknet/blob/master/cfg/yolov2.cfg"
+ quote("?raw=true"),
"https://github.com/pjreddie/darknet/blob/master/cfg/yolov3-tiny.cfg?raw=true": f"{BASE}/2022-10-05/yolov3-tiny-raw.cfg",
"https://github.com/pjreddie/darknet/blob/master/cfg/yolov3.cfg?raw=true": f"{BASE}/pjreddie/darknet/blob/master/cfg/yolov3.cfg"
+ quote("?raw=true"),
"https://github.com/SebastianBoblestETAS/nn_models/blob/ce49c5de64889493161ca4194a20e0fd5eb707e6/lstm_1_in_3_out_2_ts_4.tflite?raw=true": f"{BASE}/SebastianBoblestETAS/nn_models/blob/ce49c5de64889493161ca4194a20e0fd5eb707e6/lstm_1_in_3_out_2_ts_4.tflite"
+ quote("?raw=true"),
"https://github.com/shicai/MobileNet-Caffe/blob/master/mobilenet_v2.caffemodel?raw=true": f"{BASE}/shicai/MobileNet-Caffe/blob/master/mobilenet_v2.caffemodel"
+ quote("?raw=true"),
"https://github.com/shicai/MobileNet-Caffe/raw/master/mobilenet_v2_deploy.prototxt": f"{BASE}/shicai/MobileNet-Caffe/raw/master/mobilenet_v2_deploy.prototxt",
"https://github.com/tensorflow/tflite-micro/raw/a56087ffa2703b4d5632f024a8a4c899815c31bb/tensorflow/lite/micro/examples/micro_speech/micro_speech.tflite": f"{BASE}/tensorflow/tflite-micro/raw/a56087ffa2703b4d5632f024a8a4c899815c31bb/tensorflow/lite/micro/examples/micro_speech/micro_speech.tflite",
"https://github.com/mlcommons/tiny/raw/v0.7/benchmark/training/visual_wake_words/trained_models/vww_96_int8.tflite": f"{BASE}/mlcommons/tiny/raw/v0.7/benchmark/training/visual_wake_words/trained_models/vww_96_int8.tflite",
"https://github.com/uwsampl/web-data/raw/main/vta/models/synset.txt": f"{BASE}/2022-10-05/synset.txt",
"https://homes.cs.washington.edu/~cyulin/media/gnn_model/gcn_cora.torch": f"{BASE}/gcn_cora.torch",
"https://homes.cs.washington.edu/~moreau/media/vta/cat.jpg": f"{BASE}/vta_cat.jpg",
"https://objects.githubusercontent.com/github-production-release-asset-2e65be/130932608/4b196a8a-4e2d-11e8-9a11-be3c41846711?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIWNJYAX4CSVEH53A%2F20221004%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20221004T170456Z&X-Amz-Expires=300&X-Amz-Signature=0602b68e8864b9b01c9142eee22aed3543fe98a5482686eec33d98e2617a2295&X-Amz-SignedHeaders=host&actor_id=0&key_id=0&repo_id=130932608&response-content-disposition=attachment%3B%20filename%3Dmobilenet_v2_weights_tf_dim_ordering_tf_kernels_0.5_224.h5&response-content-type=application%2Foctet-stream": f"{BASE}/2022-10-05/aws-mobilenet_v2_weights_tf_dim_ordering_tf_kernels_0.5_224.h5",
"https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/classification/ResNet/resnet18.zip": f"{BASE}/oneflow/resnet18.zip",
"https://github.com/tlc-pack/web-data/raw/main/testdata/microTVM/model/sine_model.tflite": f"{BASE}/tlc-pack/web-data/testdata/microTVM/model/sine_model.tflite",
"https://pjreddie.com/media/files/yolov3-tiny.weights?raw=true": f"{BASE}/yolov3-tiny.weights",
"https://pjreddie.com/media/files/yolov3.weights": f"{BASE}/yolov3.weights",
"https://raw.githubusercontent.com/Cadene/pretrained-models.pytorch/master/data/imagenet_classes.txt": f"{BASE}/2022-10-05/imagenet_classes.txt",
"https://raw.githubusercontent.com/Cadene/pretrained-models.pytorch/master/data/imagenet_synsets.txt": f"{BASE}/2022-10-05/imagenet_synsets.txt",
"https://raw.githubusercontent.com/dmlc/mxnet.js/main/data/cat.png": f"{BASE}/dmlc/mxnet.js/main/data/cat.png",
"https://raw.githubusercontent.com/dmlc/web-data/main/darknet/cfg/yolov3.cfg": f"{BASE}/dmlc/web-data/main/darknet/cfg/yolov3.cfg",
"https://raw.githubusercontent.com/dmlc/web-data/main/darknet/data/arial.ttf": f"{BASE}/dmlc/web-data/main/darknet/data/arial.ttf",
"https://raw.githubusercontent.com/dmlc/web-data/main/darknet/data/coco.names": f"{BASE}/dmlc/web-data/main/darknet/data/coco.names",
"https://raw.githubusercontent.com/dmlc/web-data/main/darknet/data/dog.jpg": f"{BASE}/dmlc/web-data/main/darknet/data/dog.jpg",
"https://raw.githubusercontent.com/dmlc/web-data/main/darknet/data/person.jpg": f"{BASE}/dmlc/web-data/main/darknet/data/person.jpg",
"https://raw.githubusercontent.com/dmlc/web-data/main/darknet/lib/libdarknet2.0.so": f"{BASE}/dmlc/web-data/main/darknet/lib/libdarknet2.0.so",
"https://raw.githubusercontent.com/dmlc/web-data/main/gluoncv/detection/street_small.jpg": f"{BASE}/2022-10-05/small_street.jpg",
"https://raw.githubusercontent.com/dmlc/web-data/main/tensorflow/models/InceptionV1/classify_image_graph_def-with_shapes.pb": f"{BASE}/dmlc/web-data/main/tensorflow/models/InceptionV1/classify_image_graph_def-with_shapes.pb",
"https://raw.githubusercontent.com/dmlc/web-data/main/tensorflow/models/InceptionV1/elephant-299.jpg": f"{BASE}/dmlc/web-data/main/tensorflow/models/InceptionV1/elephant-299.jpg",
"https://raw.githubusercontent.com/dmlc/web-data/main/tensorflow/models/InceptionV1/imagenet_2012_challenge_label_map_proto.pbtxt": f"{BASE}/dmlc/web-data/main/tensorflow/models/InceptionV1/imagenet_2012_challenge_label_map_proto.pbtxt",
"https://raw.githubusercontent.com/dmlc/web-data/main/tensorflow/models/InceptionV1/imagenet_synset_to_human_label_map.txt": f"{BASE}/dmlc/web-data/main/tensorflow/models/InceptionV1/imagenet_synset_to_human_label_map.txt",
"https://raw.githubusercontent.com/dmlc/web-data/main/tensorflow/models/object_detection/ssd_mobilenet_v1_coco_2018_01_28.tgz": f"{BASE}/dmlc/web-data/main/tensorflow/models/object_detection/ssd_mobilenet_v1_coco_2018_01_28.tgz",
"https://raw.githubusercontent.com/dmlc/web-data/main/tensorflow/models/Quantized/inception_v1_quantized.tflite": f"{BASE}/dmlc/web-data/main/tensorflow/models/Quantized/inception_v1_quantized.tflite",
"https://raw.githubusercontent.com/dmlc/web-data/main/tensorflow/models/Quantized/mobilenet_v2_quantized.tflite": f"{BASE}/dmlc/web-data/main/tensorflow/models/Quantized/mobilenet_v2_quantized.tflite",
"https://raw.githubusercontent.com/dmlc/web-data/main/tensorflow/models/Quantized/resnet_50_quantized.tflite": f"{BASE}/dmlc/web-data/main/tensorflow/models/Quantized/resnet_50_quantized.tflite",
"https://raw.githubusercontent.com/dmlc/web-data/master/gluoncv/detection/street_small.jpg": f"{BASE}/2022-10-05/street_small.jpg",
"https://raw.githubusercontent.com/tensorflow/tensorflow/master/tensorflow/lite/java/demo/app/src/main/assets/labels_mobilenet_quant_v1_224.txt": f"{BASE}/2022-10-05/labels_mobilenet_quant_v1_224.txt",
"https://raw.githubusercontent.com/tlc-pack/tophub/main/tophub/arm_cpu_v0.08.log": f"{BASE}/tlc-pack/tophub/main/tophub/arm_cpu_v0.08.log",
"https://raw.githubusercontent.com/tlc-pack/tophub/main/tophub/cuda_v0.10.log": f"{BASE}/tlc-pack/tophub/main/tophub/cuda_v0.10.log",
"https://raw.githubusercontent.com/tlc-pack/tophub/main/tophub/llvm_v0.04.log": f"{BASE}/tlc-pack/tophub/main/tophub/llvm_v0.04.log",
"https://raw.githubusercontent.com/tlc-pack/tophub/main/tophub/mali_v0.06.log": f"{BASE}/2022-10-05/mali_v0.06.log",
"https://raw.githubusercontent.com/tlc-pack/tophub/main/tophub/opencl_v0.04.log": f"{BASE}/tlc-pack/tophub/main/tophub/opencl_v0.04.log",
"https://raw.githubusercontent.com/tlc-pack/tophub/main/tophub/vta_v0.10.log": f"{BASE}/tlc-pack/tophub/main/tophub/vta_v0.10.log",
"https://s3.amazonaws.com/model-server/inputs/kitten.jpg": f"{BASE}/2022-10-05/kitten.jpg",
"https://s3.amazonaws.com/onnx-model-zoo/synset.txt": f"{BASE}/2022-10-05/synset-s3.txt",
"https://storage.googleapis.com/download.tensorflow.org/models/inception_v1_224_quant_20181026.tgz": f"{BASE}/download.tensorflow.org/models/inception_v1_224_quant_20181026.tgz",
"https://storage.googleapis.com/download.tensorflow.org/models/inception_v4_299_quant_20181026.tgz": f"{BASE}/download.tensorflow.org/models/inception_v4_299_quant_20181026.tgz",
"https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.25_128.tgz": f"{BASE}/download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.25_128.tgz",
"https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz": f"{BASE}/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz",
"https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224.tgz": f"{BASE}/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224.tgz",
"https://storage.googleapis.com/download.tensorflow.org/models/tflite_11_05_08/inception_v3_quant.tgz": f"{BASE}/download.tensorflow.org/models/tflite_11_05_08/inception_v3_quant.tgz",
"https://storage.googleapis.com/download.tensorflow.org/models/tflite_11_05_08/mobilenet_v2_1.0_224_quant.tgz": f"{BASE}/2022-10-05/mobilenet_v2_1.0_224_quant.tgz",
"https://storage.googleapis.com/download.tensorflow.org/models/tflite/coco_ssd_mobilenet_v1_1.0_quant_2018_06_29.zip": f"{BASE}/download.tensorflow.org/models/tflite/coco_ssd_mobilenet_v1_1.0_quant_2018_06_29.zip",
"https://storage.googleapis.com/download.tensorflow.org/models/tflite/digit_classifier/mnist.tflite": f"{BASE}/download.tensorflow.org/models/tflite/digit_classifier/mnist.tflite",
"https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/inception_v3_2018_04_27.tgz": f"{BASE}/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/inception_v3_2018_04_27.tgz",
"https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/inception_v4_2018_04_27.tgz": f"{BASE}/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/inception_v4_2018_04_27.tgz",
"https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/squeezenet_2018_04_27.tgz": f"{BASE}/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/squeezenet_2018_04_27.tgz",
"https://storage.googleapis.com/fast-convnets/tflite-models/mbv1_140_90_12b4_720.tflite": f"{BASE}/fast-convnets/tflite-models/mbv1_140_90_12b4_720.tflite",
"https://storage.googleapis.com/fast-convnets/tflite-models/mbv2_200_85_11-16b2_744.tflite": f"{BASE}/fast-convnets/tflite-models/mbv2_200_85_11-16b2_744.tflite",
"https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_1.4_224.tgz": f"{BASE}/mobilenet_v2/checkpoints/mobilenet_v2_1.4_224.tgz",
"https://storage.googleapis.com/mobilenet_v3/checkpoints/v3-large_224_1.0_float.tgz": f"{BASE}/mobilenet_v3/checkpoints/v3-large_224_1.0_float.tgz",
"https://storage.googleapis.com/tensorflow/keras-applications/mobilenet/mobilenet_1_0_224_tf_no_top.h5": f"{BASE}/tensorflow/keras-applications/mobilenet/mobilenet_1_0_224_tf_no_top.h5",
"https://storage.googleapis.com/tensorflow/keras-applications/mobilenet/mobilenet_1_0_224_tf.h5": f"{BASE}/tensorflow/keras-applications/mobilenet/mobilenet_1_0_224_tf.h5",
"https://storage.googleapis.com/tensorflow/keras-applications/mobilenet/mobilenet_2_5_128_tf.h5": f"{BASE}/2022-10-05/mobilenet_2_5_128_tf.h5",
"https://storage.googleapis.com/tensorflow/keras-applications/resnet/resnet50_weights_tf_dim_ordering_tf_kernels.h5": f"{BASE}/tensorflow/keras-applications/resnet/resnet50_weights_tf_dim_ordering_tf_kernels.h5",
"https://storage.googleapis.com/tensorflow/keras-applications/vgg16/vgg16_weights_tf_dim_ordering_tf_kernels.h5": f"{BASE}/tensorflow/keras-applications/vgg16/vgg16_weights_tf_dim_ordering_tf_kernels.h5",
"https://storage.googleapis.com/tensorflow/keras-applications/xception/xception_weights_tf_dim_ordering_tf_kernels.h5": f"{BASE}/tensorflow/keras-applications/xception/xception_weights_tf_dim_ordering_tf_kernels.h5",
"https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz": f"{BASE}/tensorflow/tf-keras-datasets/mnist.npz",
"https://github.com/mlcommons/tiny/raw/bceb91c5ad2e2deb295547d81505721d3a87d578/benchmark/training/visual_wake_words/trained_models/vww_96_int8.tflite": f"{BASE}/mlcommons/tiny/benchmark/training/visual_wake_words/trained_models/vww_96_int8.tflite",
"https://github.com/mlcommons/tiny/raw/bceb91c5ad2e2deb295547d81505721d3a87d578/benchmark/training/keyword_spotting/trained_models/kws_ref_model.tflite": f"{BASE}/mlcommons/tiny/benchmark/training/keyword_spotting/trained_models/kws_ref_model.tflite",
"https://github.com/mlcommons/tiny/raw/bceb91c5ad2e2deb295547d81505721d3a87d578/benchmark/training/anomaly_detection/trained_models/ToyCar/baseline_tf23/model/model_ToyCar_quant_fullint_micro.tflite": f"{BASE}/mlcommons/tiny/benchmark/training/anomaly_detection/trained_models/ToyCar/baseline_tf23/model/model_ToyCar_quant_fullint_micro.tflite",
"https://github.com/mlcommons/tiny/raw/bceb91c5ad2e2deb295547d81505721d3a87d578/benchmark/training/image_classification/trained_models/pretrainedResnet_quant.tflite": f"{BASE}/mlcommons/tiny/benchmark/training/image_classification/trained_models/pretrainedResnet_quant.tflite",
"https://github.com/tlc-pack/web-data/raw/main/testdata/microTVM/data/keyword_spotting_int8_6.pyc.npy": f"{BASE}/tlc-pack/web-data/raw/main/testdata/microTVM/data/keyword_spotting_int8_6.pyc.npy",
"https://github.com/tlc-pack/web-data/raw/main/testdata/microTVM/data/visual_wake_word_int8_1.npy": f"{BASE}/tlc-pack/web-data/raw/main/testdata/microTVM/data/visual_wake_word_int8_1.npy",
"https://github.com/tlc-pack/web-data/raw/main/testdata/microTVM/data/anomaly_detection_normal_id_01.npy": f"{BASE}/tlc-pack/web-data/raw/main/testdata/microTVM/data/anomaly_detection_normal_id_01.npy",
"https://github.com/tlc-pack/web-data/raw/main/testdata/microTVM/data/image_classification_int8_0.npy": f"{BASE}/tlc-pack/web-data/raw/main/testdata/microTVM/data/image_classification_int8_0.npy",
"https://github.com/tlc-pack/web-data/raw/main/testdata/microTVM/data/vww_sample_person.jpg": f"{BASE}/tlc-pack/web-data/testdata/microTVM/data/vww_sample_person.jpg",
"https://github.com/tlc-pack/web-data/raw/main/testdata/microTVM/data/vww_sample_not_person.jpg": f"{BASE}/tlc-pack/web-data/testdata/microTVM/data/vww_sample_not_person.jpg",
"https://github.com/tensorflow/tflite-micro/raw/de8f61a074460e1fa5227d875c95aa303be01240/tensorflow/lite/micro/models/keyword_scrambled.tflite": f"{BASE}/models/tflite/keyword_scrambled_8bit.tflite",
}
class TvmRequestHook(urllib.request.Request):
def __init__(self, url, *args, **kwargs):
LOGGER.info(f"Caught access to {url}")
url = url.strip()
if url not in URL_MAP and not url.startswith(BASE):
# Dis-allow any accesses that aren't going through S3
msg = (
f"Uncaught URL found in CI: {url}. "
"A committer must upload the relevant file to S3 via "
"https://github.com/apache/tvm/actions/workflows/upload_ci_resource.yml "
"and add it to the mapping in tests/scripts/request_hook/request_hook.py"
)
raise RuntimeError(msg)
new_url = URL_MAP[url]
LOGGER.info(f"Mapped URL {url} to {new_url}")
super().__init__(new_url, *args, **kwargs)
def init():
global LOGGER
urllib.request.Request = TvmRequestHook
LOGGER = logging.getLogger("tvm_request_hook")
LOGGER.setLevel(logging.DEBUG)
fh = logging.FileHandler("redirected_urls.log")
fh.setLevel(logging.DEBUG)
LOGGER.addHandler(fh)
| 29,189 | 117.658537 | 683 | py |
tvm | tvm-main/tests/scripts/release/make_notes.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import pickle
from pathlib import Path
import csv
import sys
import re
from collections import defaultdict
REPO_ROOT = Path(__file__).resolve().parent.parent.parent.parent
sys.path.append(str(REPO_ROOT / "tests" / "scripts"))
sys.path.append(str(REPO_ROOT / "tests" / "scripts" / "github"))
sys.path.append(str(REPO_ROOT / "tests" / "scripts" / "jenkins"))
# Tag dictionary used to create a mapping relation to categorize PRs owning same tag.
TAG_DICT = {
"metaschedule": "MetaSchedule",
"cuda": "cuda & cutlass & tensorrt",
"cutlass": "cuda & cutlass & tensorrt",
"tensorrt": "cuda & cutlass & tensorrt",
"ethosn": "Ethosn",
"hexagon": "Hexagon",
"metal": "Metal",
"vulkan": "Vulkan",
"cmsis-nn": "CMSIS-NN",
"clml": "OpenCL & CLML",
"opencl": "OpenCL & CLML",
"openclml": "OpenCL & CLML",
"adreno": "Adreno",
"acl": "ArmComputeLibrary",
"rocm": "ROCm",
"crt": "CRT",
"micronpu": "micoNPU",
"microtvm": "microTVM",
"web": "web",
"wasm": "web",
"runtime": "Runtime",
"aot": "AOT",
"arith": "Arith",
"byoc": "BYOC",
"community": "Community",
"tensorir": "TIR",
"tir": "TIR",
"tensorflow": "Frontend",
"tflite": "Frontend",
"paddle": "Frontend",
"oneflow": "Frontend",
"pytorch": "Frontend",
"torch": "Frontend",
"keras": "Frontend",
"frontend": "Frontend",
"onnx": "Frontend",
"roofline": "Misc",
"rpc": "Misc",
"transform": "Misc",
"tophub": "Misc",
"vta": "Misc",
"ux": "Misc",
"APP": "Misc",
"docker": "Docker",
"doc": "Docs",
"docs": "Docs",
"llvm": "LLVM",
"sve": "LLVM",
"ci": "CI",
"test": "CI",
"tests": "CI",
"testing": "CI",
"unittest": "CI",
"bugfix": "BugFix",
"fix": "BugFix",
"bug": "BugFix",
"hotfix": "BugFix",
"relay": "Relay",
"qnn": "Relay",
"quantization": "Relay",
"tvmscript": "TVMScript",
"tvmscripts": "TVMScript",
"tvmc": "TVMC",
"topi": "TOPI",
}
def strip_header(title: str, header: str) -> str:
pos = title.lower().find(header.lower())
if pos == -1:
return title
return title[0:pos] + title[pos + len(header) :].strip()
def sprint(*args):
print(*args, file=sys.stderr)
def create_pr_dict(cache: Path):
with open(cache, "rb") as f:
data = pickle.load(f)
sprint(data[1])
pr_dict = {}
for item in data:
prs = item["associatedPullRequests"]["nodes"]
if len(prs) != 1:
continue
pr = prs[0]
pr_dict[pr["number"]] = pr
return pr_dict
def categorize_csv_file(csv_path: str):
headings = defaultdict(lambda: defaultdict(list))
sprint("Opening CSV")
with open(csv_path) as f:
input_file = csv.DictReader(f)
i = 0
blank_cate_set = {"Misc"}
for row in input_file:
# print(row)
tags = row["pr_title_tags"].split("/")
tags = ["misc"] if len(tags) == 0 else tags
categories = map(lambda t: TAG_DICT.get(t.lower(), "Misc"), tags)
categories = list(categories)
categories = list(set(categories) - blank_cate_set)
category = "Misc" if len(categories) == 0 else categories[0]
subject = row["subject"].strip()
pr_number = row["url"].split("/")[-1]
if category == "" or subject == "":
sprint(f"Skipping {i}th pr with number: {pr_number}, row: {row}")
continue
headings[category][subject].append(pr_number)
i += 1
# if i > 30:
# break
return headings
if __name__ == "__main__":
help = "List out commits with attached PRs since a certain commit"
parser = argparse.ArgumentParser(description=help)
parser.add_argument(
"--notes", required=True, help="csv or markdown file of categorized PRs in order"
)
parser.add_argument(
"--is-pr-with-link",
required=False,
help="exported pr number with hyper-link for forum format",
)
parser.add_argument(
"--convert-with-link",
required=False,
help="make PR number in markdown file owning hyper-link",
)
args = parser.parse_args()
user = "apache"
repo = "tvm"
if args.convert_with_link:
with open("release_note_0.13.0.md", "r") as f:
lines = f.readlines()
formated = []
for line in lines:
match = re.search(r"#\d+", line)
if match:
pr_num_str = match.group()
pr_num_int = pr_num_str.replace("#", "")
pr_number_str = f"[#{pr_num_int}](https://github.com/apache/tvm/pull/{pr_num_int})"
line = line.replace(pr_num_str, pr_number_str)
formated.append(line)
result = "".join(formated)
print(result)
exit(0)
# 1. Create PR dict from cache file
cache = Path("out.pkl")
if not cache.exists():
sprint("run gather_prs.py first to generate out.pkl")
exit(1)
pr_dict = create_pr_dict(cache)
# 2. Categorize csv file as dict by category and subject (sub-category)
headings = categorize_csv_file(args.notes_csv)
# 3. Summarize and sort all categories
def sorter(x):
if x == "Misc":
return 10
return 0
keys = list(headings.keys())
keys = list(sorted(keys))
keys = list(sorted(keys, key=sorter))
# 4. Generate markdown by loop categorized csv file dict
def pr_title(number, heading):
# print(f"number:{number}, heading:{heading}, len(pr_dict):{len(pr_dict)}")
try:
title = pr_dict[int(number)]["title"]
title = strip_header(title, heading)
except:
sprint("The out.pkl file is not match with csv file.")
exit(1)
return title
output = ""
for key in keys:
value = headings[key]
if key == "DO NOT INCLUDE":
continue
value = dict(value)
output += f"### {key}\n"
misc = []
misc += value.get("n/a", [])
misc += value.get("Misc", [])
for pr_number in misc:
if args.is_pr_with_link:
pr_number_str = f"[#{pr_number}](https://github.com/apache/tvm/pull/{pr_number})"
else:
pr_number_str = f"#{pr_number}"
pr_str = f" * {pr_number_str} - {pr_title(pr_number, '[' + key + ']')}\n"
output += pr_str
for subheading, pr_numbers in value.items():
if subheading == "DO NOT INCLUDE":
continue
if subheading == "n/a" or subheading == "Misc":
continue
else:
output += f" * {subheading} - " + ", ".join([f"#{n}" for n in pr_numbers]) + "\n"
# print(value)
output += "\n"
# 5. Print markdown-format output
print(output)
| 7,805 | 29.138996 | 99 | py |
tvm | tvm-main/vta/tutorials/frontend/deploy_classification.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Deploy Pretrained Vision Model from MxNet on VTA
================================================
**Author**: `Thierry Moreau <https://homes.cs.washington.edu/~moreau/>`_
This tutorial provides an end-to-end demo, on how to run ImageNet classification
inference onto the VTA accelerator design to perform ImageNet classification tasks.
It showcases Relay as a front end compiler that can perform quantization (VTA
only supports int8/32 inference) as well as graph packing (in order to enable
tensorization in the core) to massage the compute graph for the hardware target.
"""
######################################################################
# Install dependencies
# --------------------
# To use the autotvm package in tvm, we need to install some extra dependencies.
# (change "3" to "2" if you use python2):
#
# .. code-block:: bash
#
# pip3 install --user mxnet requests "Pillow<7"
#
# Now return to the python code. Import packages.
from __future__ import absolute_import, print_function
import argparse, json, os, requests, sys, time
from io import BytesIO
from os.path import join, isfile
from PIL import Image
from mxnet.gluon.model_zoo import vision
import numpy as np
from matplotlib import pyplot as plt
import tvm
from tvm import te
from tvm import rpc, autotvm, relay
from tvm.contrib import graph_executor, utils, download
from tvm.contrib.debugger import debug_executor
from tvm.relay import transform
import vta
from vta.testing import simulator
from vta.top import graph_pack
# Make sure that TVM was compiled with RPC=1
assert tvm.runtime.enabled("rpc")
######################################################################
# Define the platform and model targets
# -------------------------------------
# Execute on CPU vs. VTA, and define the model.
# Load VTA parameters from the 3rdparty/vta-hw/config/vta_config.json file
env = vta.get_env()
# Set ``device=arm_cpu`` to run inference on the CPU
# or ``device=vta`` to run inference on the FPGA.
device = "vta"
target = env.target if device == "vta" else env.target_vta_cpu
# Dictionary lookup for when to start/end bit packing
pack_dict = {
"resnet18_v1": ["nn.max_pool2d", "nn.global_avg_pool2d"],
"resnet34_v1": ["nn.max_pool2d", "nn.global_avg_pool2d"],
"resnet18_v2": ["nn.max_pool2d", "nn.global_avg_pool2d"],
"resnet34_v2": ["nn.max_pool2d", "nn.global_avg_pool2d"],
"resnet50_v2": ["nn.max_pool2d", "nn.global_avg_pool2d"],
"resnet101_v2": ["nn.max_pool2d", "nn.global_avg_pool2d"],
}
# Name of Gluon model to compile
# The ``start_pack`` and ``stop_pack`` labels indicate where
# to start and end the graph packing relay pass: in other words
# where to start and finish offloading to VTA.
model = "resnet18_v1"
assert model in pack_dict
######################################################################
# Obtain an execution remote
# --------------------------
# When target is 'pynq', reconfigure FPGA and runtime.
# Otherwise, if target is 'sim', execute locally.
if env.TARGET not in ["sim", "tsim", "intelfocl"]:
# Get remote from tracker node if environment variable is set.
# To set up the tracker, you'll need to follow the "Auto-tuning
# a convolutional network for VTA" tutorial.
tracker_host = os.environ.get("TVM_TRACKER_HOST", None)
tracker_port = os.environ.get("TVM_TRACKER_PORT", None)
# Otherwise if you have a device you want to program directly from
# the host, make sure you've set the variables below to the IP of
# your board.
device_host = os.environ.get("VTA_RPC_HOST", "192.168.2.99")
device_port = os.environ.get("VTA_RPC_PORT", "9091")
if not tracker_host or not tracker_port:
remote = rpc.connect(device_host, int(device_port))
else:
remote = autotvm.measure.request_remote(
env.TARGET, tracker_host, int(tracker_port), timeout=10000
)
# Reconfigure the JIT runtime and FPGA.
# You can program the FPGA with your own custom bitstream
# by passing the path to the bitstream file instead of None.
reconfig_start = time.time()
vta.reconfig_runtime(remote)
vta.program_fpga(remote, bitstream=None)
reconfig_time = time.time() - reconfig_start
print("Reconfigured FPGA and RPC runtime in {0:.2f}s!".format(reconfig_time))
# In simulation mode, host the RPC server locally.
else:
remote = rpc.LocalSession()
if env.TARGET in ["intelfocl"]:
# program intelfocl aocx
vta.program_fpga(remote, bitstream="vta.bitstream")
# Get execution context from remote
ctx = remote.ext_dev(0) if device == "vta" else remote.cpu(0)
######################################################################
# Build the inference graph executor
# ----------------------------------
# Grab vision model from Gluon model zoo and compile with Relay.
# The compilation steps are:
#
# 1. Front end translation from MxNet into Relay module.
# 2. Apply 8-bit quantization: here we skip the first conv layer,
# and dense layer which will both be executed in fp32 on the CPU.
# 3. Perform graph packing to alter the data layout for tensorization.
# 4. Perform constant folding to reduce number of operators (e.g. eliminate batch norm multiply).
# 5. Perform relay build to object file.
# 6. Load the object file onto remote (FPGA device).
# 7. Generate graph executor, `m`.
#
# Load pre-configured AutoTVM schedules
with autotvm.tophub.context(target):
# Populate the shape and data type dictionary for ImageNet classifier input
dtype_dict = {"data": "float32"}
shape_dict = {"data": (env.BATCH, 3, 224, 224)}
# Get off the shelf gluon model, and convert to relay
gluon_model = vision.get_model(model, pretrained=True)
# Measure build start time
build_start = time.time()
# Start front end compilation
mod, params = relay.frontend.from_mxnet(gluon_model, shape_dict)
# Update shape and type dictionary
shape_dict.update({k: v.shape for k, v in params.items()})
dtype_dict.update({k: str(v.dtype) for k, v in params.items()})
if target.device_name == "vta":
# Perform quantization in Relay
# Note: We set opt_level to 3 in order to fold batch norm
with tvm.transform.PassContext(opt_level=3):
with relay.quantize.qconfig(global_scale=8.0, skip_conv_layers=[0]):
mod = relay.quantize.quantize(mod, params=params)
# Perform graph packing and constant folding for VTA target
assert env.BLOCK_IN == env.BLOCK_OUT
# do device annotation if target is intelfocl or sim
relay_prog = graph_pack(
mod["main"],
env.BATCH,
env.BLOCK_OUT,
env.WGT_WIDTH,
start_name=pack_dict[model][0],
stop_name=pack_dict[model][1],
device_annot=(env.TARGET == "intelfocl"),
)
else:
relay_prog = mod["main"]
# Compile Relay program with AlterOpLayout disabled
if target.device_name != "vta":
with tvm.transform.PassContext(opt_level=3, disabled_pass={"AlterOpLayout"}):
graph, lib, params = relay.build(
relay_prog, target=tvm.target.Target(target, host=env.target_host), params=params
)
else:
if env.TARGET == "intelfocl":
# multiple targets to run both on cpu and vta
target = {"cpu": env.target_vta_cpu, "ext_dev": target}
with vta.build_config(
opt_level=3, disabled_pass={"AlterOpLayout", "tir.CommonSubexprElimTIR"}
):
graph, lib, params = relay.build(
relay_prog, target=tvm.target.Target(target, host=env.target_host), params=params
)
# Measure Relay build time
build_time = time.time() - build_start
print(model + " inference graph built in {0:.2f}s!".format(build_time))
# Send the inference library over to the remote RPC server
temp = utils.tempdir()
lib.export_library(temp.relpath("graphlib.tar"))
remote.upload(temp.relpath("graphlib.tar"))
lib = remote.load_module("graphlib.tar")
if env.TARGET == "intelfocl":
ctxes = [remote.ext_dev(0), remote.cpu(0)]
m = graph_executor.create(graph, lib, ctxes)
else:
# Graph runtime
m = graph_executor.create(graph, lib, ctx)
######################################################################
# Perform image classification inference
# --------------------------------------
# We run classification on an image sample from ImageNet
# We just need to download the categories files, `synset.txt`
# and an input test image.
# Download ImageNet categories
categ_url = "https://github.com/uwsampl/web-data/raw/main/vta/models/"
categ_fn = "synset.txt"
download.download(join(categ_url, categ_fn), categ_fn)
synset = eval(open(categ_fn).read())
# Download test image
image_url = "https://homes.cs.washington.edu/~moreau/media/vta/cat.jpg"
image_fn = "cat.png"
download.download(image_url, image_fn)
# Prepare test image for inference
image = Image.open(image_fn).resize((224, 224))
plt.imshow(image)
plt.show()
image = np.array(image) - np.array([123.0, 117.0, 104.0])
image /= np.array([58.395, 57.12, 57.375])
image = image.transpose((2, 0, 1))
image = image[np.newaxis, :]
image = np.repeat(image, env.BATCH, axis=0)
# Set the network parameters and inputs
m.set_input(**params)
m.set_input("data", image)
# Perform inference and gather execution statistics
# More on: :py:method:`tvm.runtime.Module.time_evaluator`
num = 4 # number of times we run module for a single measurement
rep = 3 # number of measurements (we derive std dev from this)
timer = m.module.time_evaluator("run", ctx, number=num, repeat=rep)
if env.TARGET in ["sim", "tsim"]:
simulator.clear_stats()
timer()
sim_stats = simulator.stats()
print("\nExecution statistics:")
for k, v in sim_stats.items():
# Since we execute the workload many times, we need to normalize stats
# Note that there is always one warm up run
# Therefore we divide the overall stats by (num * rep + 1)
print("\t{:<16}: {:>16}".format(k, v // (num * rep + 1)))
else:
tcost = timer()
std = np.std(tcost.results) * 1000
mean = tcost.mean * 1000
print("\nPerformed inference in %.2fms (std = %.2f) for %d samples" % (mean, std, env.BATCH))
print("Average per sample inference time: %.2fms" % (mean / env.BATCH))
# Get classification results
tvm_output = m.get_output(0, tvm.nd.empty((env.BATCH, 1000), "float32", remote.cpu(0)))
for b in range(env.BATCH):
top_categories = np.argsort(tvm_output.numpy()[b])
# Report top-5 classification results
print("\n{} prediction for sample {}".format(model, b))
print("\t#1:", synset[top_categories[-1]])
print("\t#2:", synset[top_categories[-2]])
print("\t#3:", synset[top_categories[-3]])
print("\t#4:", synset[top_categories[-4]])
print("\t#5:", synset[top_categories[-5]])
# This just checks that one of the 5 top categories
# is one variety of cat; this is by no means an accurate
# assessment of how quantization affects classification
# accuracy but is meant to catch changes to the
# quantization pass that would accuracy in the CI.
cat_detected = False
for k in top_categories[-5:]:
if "cat" in synset[k]:
cat_detected = True
assert cat_detected
| 12,219 | 38.546926 | 97 | py |
tvm | tvm-main/vta/tutorials/autotvm/tune_alu_vta.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Auto-tuning a ALU fused op on VTA
---------------------------------
"""
import os
from mxnet.gluon.model_zoo import vision
import numpy as np
from PIL import Image
from tvm import topi
import tvm
from tvm import te
from tvm import rpc, autotvm, relay
from tvm.contrib import download
from tvm.autotvm.measure.measure_methods import request_remote
from tvm.autotvm.tuner import XGBTuner, GATuner, RandomTuner, GridSearchTuner
from tvm.autotvm import record
import vta
from vta.testing import simulator
from vta.top import graph_pack
import copy
#################################################################
# Compile network
# ---------------
# Perform vta-specific compilation with Relay from a Gluon model
def compile_network(env, target, model, start_pack, stop_pack):
# Populate the shape and data type dictionary
dtype_dict = {"data": "float32"}
shape_dict = {"data": (env.BATCH, 3, 224, 224)}
# Get off the shelf gluon model, and convert to relay
gluon_model = vision.get_model(model, pretrained=True)
mod, params = relay.frontend.from_mxnet(gluon_model, shape_dict)
# Update shape and type dictionary
shape_dict.update({k: v.shape for k, v in params.items()})
dtype_dict.update({k: str(v.dtype) for k, v in params.items()})
# Perform quantization in Relay
# Note: We set opt_level to 3 in order to fold batch norm
with relay.build_config(opt_level=3):
with relay.quantize.qconfig(global_scale=8.0, skip_conv_layers=[0]):
mod = relay.quantize.quantize(mod, params=params)
# Perform graph packing and constant folding for VTA target
if target.device_name == "vta":
assert env.BLOCK_IN == env.BLOCK_OUT
relay_prog = graph_pack(
mod["main"],
env.BATCH,
env.BLOCK_OUT,
env.WGT_WIDTH,
start_name=start_pack,
stop_name=stop_pack,
)
return relay_prog, params
###########################################
# Set Tuning Options
# ------------------
# Before tuning, we should apply some configurations.
# Here we use an Pynq-Z1 board as an example.
# Tracker host and port can be set by your environment
tracker_host = os.environ.get("TVM_TRACKER_HOST", "0.0.0.0")
tracker_port = int(os.environ.get("TVM_TRACKER_PORT", 9190))
# Load VTA parameters from the vta/config/vta_config.json file
env = vta.get_env()
# This target is used for cross compilation. You can query it by :code:`gcc -v` on your device.
# Set ``device=arm_cpu`` to run inference on the CPU
# or ``device=vta`` to run inference on the FPGA.
device = "vta"
target = env.target if device == "vta" else env.target_vta_cpu
# Name of Gluon model to compile
# The ``start_pack`` and ``stop_pack`` labels indicate where
# to start and end the graph packing relay pass: in other words
# where to start and finish offloading to VTA.
network = "resnet50_v2"
start_pack = "nn.max_pool2d"
stop_pack = "nn.global_avg_pool2d"
# Tuning option
log_file = "%s.alu.%s.log" % (device, network)
tuning_option = {
"log_filename": log_file,
"tuner": "random",
"n_trial": 1000,
"early_stopping": None,
"measure_option": autotvm.measure_option(
builder=autotvm.LocalBuilder(n_parallel=1),
runner=autotvm.RPCRunner(
env.TARGET,
host=tracker_host,
port=tracker_port,
number=5,
timeout=60,
# check_correctness=True, # TODO: re-enable when check_correctness works again.
),
),
}
def log_to_file(file_out, protocol="json"):
"""Log the tuning records into file.
The rows of the log are stored in the format of autotvm.record.encode.
for lhs == rhs, we add an extra rhs = [] record
Parameters
----------
file_out : str
The file to log to.
protocol: str, optional
The log protocol. Can be 'json' or 'pickle'
Returns
-------
callback : callable
Callback function to do the logging.
"""
def _callback(_, inputs, results):
with open(file_out, "a") as f:
for inp, result in zip(inputs, results):
f.write(record.encode(inp, result, protocol) + "\n")
# we only consider task with same lhs and rhs
if inp.task.args[0] == inp.task.args[1]:
args = list(inp.task.args)
args[1] = (args[0][0], (), args[0][2])
inp_copy = copy.deepcopy(inp)
inp_copy.task.args = tuple(args)
f.write(record.encode(inp_copy, result, protocol) + "\n")
return _callback
def tune_tasks(
tasks,
measure_option,
tuner="xgb",
n_trial=10,
early_stopping=None,
log_filename="tuning.log",
use_transfer_learning=True,
):
# create tmp log file
tmp_log_file = log_filename + ".tmp"
if os.path.exists(tmp_log_file):
os.remove(tmp_log_file)
for i, tsk in enumerate(reversed(tasks)):
prefix = "[Task %2d/%2d] " % (i + 1, len(tasks))
# create tuner
if tuner == "xgb":
tuner_obj = XGBTuner(tsk, loss_type="reg")
elif tuner == "xgb_knob":
tuner_obj = XGBTuner(tsk, loss_type="reg", feature_type="knob")
elif tuner == "xgb_itervar":
tuner_obj = XGBTuner(tsk, loss_type="reg", feature_type="itervar")
elif tuner == "xgb_curve":
tuner_obj = XGBTuner(tsk, loss_type="reg", feature_type="curve")
elif tuner == "xgb_rank":
tuner_obj = XGBTuner(tsk, loss_type="rank")
elif tuner == "xgb_rank_knob":
tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="knob")
elif tuner == "xgb_rank_itervar":
tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="itervar")
elif tuner == "xgb_rank_curve":
tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="curve")
elif tuner == "xgb_rank_binary":
tuner_obj = XGBTuner(tsk, loss_type="rank-binary")
elif tuner == "xgb_rank_binary_knob":
tuner_obj = XGBTuner(tsk, loss_type="rank-binary", feature_type="knob")
elif tuner == "xgb_rank_binary_itervar":
tuner_obj = XGBTuner(tsk, loss_type="rank-binary", feature_type="itervar")
elif tuner == "xgb_rank_binary_curve":
tuner_obj = XGBTuner(tsk, loss_type="rank-binary", feature_type="curve")
elif tuner == "ga":
tuner_obj = GATuner(tsk, pop_size=50)
elif tuner == "random":
tuner_obj = RandomTuner(tsk)
elif tuner == "gridsearch":
tuner_obj = GridSearchTuner(tsk)
else:
raise ValueError("Invalid tuner: " + tuner)
if use_transfer_learning:
if os.path.isfile(tmp_log_file):
tuner_obj.load_history(autotvm.record.load_from_file(tmp_log_file))
# do tuning
tsk_trial = min(n_trial, len(tsk.config_space))
tuner_obj.tune(
n_trial=tsk_trial,
early_stopping=early_stopping,
measure_option=measure_option,
callbacks=[
autotvm.callback.progress_bar(tsk_trial, prefix=prefix),
log_to_file(tmp_log_file),
],
)
# pick best records to a cache file
autotvm.record.pick_best(tmp_log_file, log_filename)
os.remove(tmp_log_file)
########################################################################
# Register VTA-specific tuning tasks
def register_vta_tuning_tasks():
from tvm.autotvm.task import TaskExtractEnv
@tvm.te.tag_scope(tag=topi.tag.ELEMWISE)
def my_clip(x, a_min, a_max):
"""Unlike topi's current clip, put min and max into two stages."""
const_min = tvm.tir.const(a_min, x.dtype)
const_max = tvm.tir.const(a_max, x.dtype)
x = te.compute(x.shape, lambda *i: tvm.te.min(x(*i), const_max), name="clipA")
x = te.compute(x.shape, lambda *i: tvm.te.max(x(*i), const_min), name="clipB")
return x
# init autotvm env to register VTA operator
TaskExtractEnv()
@autotvm.template("add.vta")
def _topi_add(*args, **kwargs):
assert not kwargs, "Do not support kwargs in template function call"
A, B = args[:2]
with tvm.target.vta():
res = vta.top.op.add_packed(*args, **kwargs)
res = my_clip(res, 0, 127)
res = topi.cast(res, "int8")
if tvm.target.Target.current().device_name == "vta":
s = vta.top.op.schedule_add_packed([res])
else:
s = te.create_schedule([res.op])
return s, [A, B, res]
@autotvm.template("multiply.vta")
def _topi_multiply(*args, **kwargs):
assert not kwargs, "Do not support kwargs in template function call"
A, B = args[:2]
with tvm.target.vta():
res = vta.top.op.multiply_packed(*args, **kwargs)
res = my_clip(res, 0, 127)
res = topi.cast(res, "int8")
if tvm.target.Target.current().device_name == "vta":
s = vta.top.op.schedule_multiply_packed([res])
else:
s = te.create_schedule([res.op])
return s, [A, B, res]
########################################################################
# Finally, we launch tuning jobs and evaluate the end-to-end performance.
def tune_and_evaluate(tuning_opt):
if env.TARGET != "intelfocl":
print("ALU only op only available for intelfocl target")
return
# Register VTA tuning tasks
register_vta_tuning_tasks()
# Perform task extraction on Relay program
print("Extract tasks...")
relay_prog, params = compile_network(env, target, network, start_pack, stop_pack)
mod = tvm.IRModule.from_expr(relay_prog)
tasks = autotvm.task.extract_from_program(
mod,
params=params,
ops=(
relay.op.get("add"),
relay.op.get("multiply"),
),
target=tvm.target.Target(target, host=env.target_host),
)
# filter out non-packed alu task
tasks = list(filter(lambda t: len(t.args[0][1]) > 4, tasks))
# filter out float alu task
tasks = list(filter(lambda t: t.args[0][2] != "float32", tasks))
# We should have extracted 10 convolution tasks
tasks_set = {}
print("Extracted {} alu tasks:".format(len(tasks)))
for tsk in tasks:
print("tsk = ", tsk)
if len(tsk.args[1][1]) == 0:
args = list(tsk.args)
args[1] = args[0]
tsk.args = tuple(args)
if (tsk.name, tsk.args) in tasks_set:
print("task {} already exists".format(tsk))
tasks_set[(tsk.name, tsk.args)] = tsk
tasks = list(tasks_set.values())
print("After merged, final #tasks={}, tasks = {}".format(len(tasks), tasks))
# run tuning tasks
print("Tuning...")
tune_tasks(tasks, **tuning_opt)
# Run the tuning and evaluate the results
tune_and_evaluate(tuning_option)
| 11,793 | 33.58651 | 95 | py |
tvm | tvm-main/vta/tutorials/autotvm/tune_relay_vta.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Auto-tuning a convolutional network on VTA
==========================================
**Author**: `Lianmin Zheng <https://github.com/merrymercy>`_, `Thierry Moreau <https://homes.cs.washington.edu/~moreau/>`_
Auto-tuning for a specific accelerator design is critical for getting the best
performance for any given operator. This is a tutorial showcases how to tune a
whole convolutional network on VTA.
The operator implementation for VTA in TVM is written in template form.
The template has many tunable knobs (tile factor, virtual threads, etc).
We will tune all convolution operators in the neural network. After tuning,
we produce a log file which stores the best schedule parameters for all tuned
operators. When the TVM compiler compiles these operators, it will query this
log file to get the best knob parameters.
"""
######################################################################
# Install dependencies
# --------------------
# To use the autotvm package in tvm, we need to install some extra dependencies.
# (change "3" to "2" if you use python2):
#
# .. code-block:: bash
#
# pip3 install --user psutil xgboost tornado mxnet requests "Pillow<7" cloudpickle
#
# To make TVM run faster during tuning, it is recommended to use cython
# as FFI of TVM. In the root directory of TVM, execute
# (change "3" to "2" if you use python2):
#
# .. code-block:: bash
#
# pip3 install --user cython
# sudo make cython3
#
# Now return to python code. Import packages.
import os
from mxnet.gluon.model_zoo import vision
import numpy as np
from PIL import Image
from tvm import topi
import tvm
from tvm import te
from tvm import rpc, autotvm, relay
from tvm.contrib import graph_executor, utils, download
from tvm.autotvm.measure.measure_methods import request_remote
from tvm.autotvm.tuner import XGBTuner, GATuner, RandomTuner, GridSearchTuner
import vta
from vta.testing import simulator
from vta.top import graph_pack
#################################################################
# Compile network
# ---------------
# Perform vta-specific compilation with Relay from a Gluon model
def compile_network(env, target, model, start_pack, stop_pack):
# Populate the shape and data type dictionary
dtype_dict = {"data": "float32"}
shape_dict = {"data": (env.BATCH, 3, 224, 224)}
# Get off the shelf gluon model, and convert to relay
gluon_model = vision.get_model(model, pretrained=True)
mod, params = relay.frontend.from_mxnet(gluon_model, shape_dict)
# Update shape and type dictionary
shape_dict.update({k: v.shape for k, v in params.items()})
dtype_dict.update({k: str(v.dtype) for k, v in params.items()})
# Perform quantization in Relay
# Note: We set opt_level to 3 in order to fold batch norm
with tvm.transform.PassContext(opt_level=3):
with relay.quantize.qconfig(global_scale=8.0, skip_conv_layers=[0]):
mod = relay.quantize.quantize(mod, params=params)
# Perform graph packing and constant folding for VTA target
if target.device_name == "vta":
assert env.BLOCK_IN == env.BLOCK_OUT
relay_prog = graph_pack(
mod["main"],
env.BATCH,
env.BLOCK_OUT,
env.WGT_WIDTH,
start_name=start_pack,
stop_name=stop_pack,
)
return relay_prog, params
#################################################################
# Start RPC Tracker
# -----------------
# TVM uses an RPC session to communicate with Pynq boards.
# During tuning, the tuner will send the generated code to the board and
# measure the speed of code on the board.
#
# To scale up tuning, TVM uses an RPC Tracker to manage multiple devices.
# The RPC Tracker is a centralized controller node. We can register all devices to
# the tracker. For example, if we have 10 Pynq boards, we can register all of them
# to the tracker, and run 10 measurements in parallel, accelerating the tuning process.
#
# To start an RPC tracker, run this command on the host machine. The tracker is
# required during the whole tuning process, so we need to open a new terminal for
# this command:
#
# .. code-block:: bash
#
# python -m tvm.exec.rpc_tracker --host=0.0.0.0 --port=9190
#
# The expected output is:
#
# .. code-block:: bash
#
# INFO:RPCTracker:bind to 0.0.0.0:9190
#################################################################
# Register devices to RPC Tracker
# -----------------------------------
# Now we can register our devices to the tracker. The first step is to
# build the TVM runtime for the Pynq devices.
#
# Follow :ref:`vta-index`
# to build the TVM runtime on the device. Then register the device to the tracker with:
#
# .. code-block:: bash
#
# python -m tvm.exec.rpc_server --tracker=[HOST_IP]:9190 --key=pynq
#
# (replace :code:`[HOST_IP]` with the IP address of your host machine)
#
# After registering devices, we can confirm it by querying the rpc_tracker:
#
# .. code-block:: bash
#
# python -m tvm.exec.query_rpc_tracker --host=0.0.0.0 --port=9190
#
# For example, if we have 6 Pynq boards and 11 Raspberry Pi 3B,
# the output can be
#
# .. code-block:: bash
#
# Queue Status
# ----------------------------------
# key total free pending
# ----------------------------------
# pynq 6 6 0
# rpi3b 11 11 0
# ----------------------------------
#
# You can register multiple devices to the tracker to accelerate tuning.
###########################################
# Set Tuning Options
# ------------------
# Before tuning, we should apply some configurations.
# Here we use an Pynq-Z1 board as an example.
# Tracker host and port can be set by your environment
tracker_host = os.environ.get("TVM_TRACKER_HOST", "127.0.0.1")
tracker_port = int(os.environ.get("TVM_TRACKER_PORT", 9190))
# Load VTA parameters from the 3rdparty/vta-hw/config/vta_config.json file
env = vta.get_env()
# This target is used for cross compilation. You can query it by :code:`gcc -v` on your device.
# Set ``device=arm_cpu`` to run inference on the CPU
# or ``device=vta`` to run inference on the FPGA.
device = "vta"
target = env.target if device == "vta" else env.target_vta_cpu
# Name of Gluon model to compile
# The ``start_pack`` and ``stop_pack`` labels indicate where
# to start and end the graph packing relay pass: in other words
# where to start and finish offloading to VTA.
network = "resnet18_v1"
start_pack = "nn.max_pool2d"
stop_pack = "nn.global_avg_pool2d"
# Tuning option
log_file = "%s.%s.log" % (device, network)
tuning_option = {
"log_filename": log_file,
"tuner": "random",
"n_trial": 1000,
"early_stopping": None,
"measure_option": autotvm.measure_option(
builder=autotvm.LocalBuilder(),
runner=autotvm.RPCRunner(
env.TARGET,
host=tracker_host,
port=tracker_port,
number=5,
timeout=60,
module_loader=vta.module_loader(),
# check_correctness=True, # TODO: re-enable when check_correctness works again.
),
),
}
####################################################################
#
# .. note:: How to set tuning options
#
# In general, the default values provided here work well.
# If you have enough time budget, you can set :code:`n_trial`, :code:`early_stopping`
# to larger values, makes the tuning run for longer.
# If your device is under-powered or your conv2d operators are large, consider
# setting a longer timeout.
#
###################################################################
# Begin Tuning
# ------------
# Now we can extract tuning tasks from the network and begin tuning.
# Here, we provide a simple utility function to tune a list of tasks.
# This function is just an initial implementation which tunes them in sequential order.
# We will introduce a more sophisticated tuning scheduler in the future.
#
# Given that the tuning will be done on Pynq FPGA boards, make sure that
# the ```TARGET`` entry in the ``vta_config.json`` file is set to ``pynq``.
# You can skip the implementation of this function for this tutorial.
def tune_tasks(
tasks,
measure_option,
tuner="xgb",
n_trial=1000,
early_stopping=None,
log_filename="tuning.log",
use_transfer_learning=True,
):
# create tmp log file
tmp_log_file = log_filename + ".tmp"
if os.path.exists(tmp_log_file):
os.remove(tmp_log_file)
for i, tsk in enumerate(reversed(tasks)):
prefix = "[Task %2d/%2d] " % (i + 1, len(tasks))
# create tuner
if tuner == "xgb":
tuner_obj = XGBTuner(tsk, loss_type="reg")
elif tuner == "xgb_knob":
tuner_obj = XGBTuner(tsk, loss_type="reg", feature_type="knob")
elif tuner == "xgb_itervar":
tuner_obj = XGBTuner(tsk, loss_type="reg", feature_type="itervar")
elif tuner == "xgb_curve":
tuner_obj = XGBTuner(tsk, loss_type="reg", feature_type="curve")
elif tuner == "xgb_rank":
tuner_obj = XGBTuner(tsk, loss_type="rank")
elif tuner == "xgb_rank_knob":
tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="knob")
elif tuner == "xgb_rank_itervar":
tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="itervar")
elif tuner == "xgb_rank_curve":
tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="curve")
elif tuner == "xgb_rank_binary":
tuner_obj = XGBTuner(tsk, loss_type="rank-binary")
elif tuner == "xgb_rank_binary_knob":
tuner_obj = XGBTuner(tsk, loss_type="rank-binary", feature_type="knob")
elif tuner == "xgb_rank_binary_itervar":
tuner_obj = XGBTuner(tsk, loss_type="rank-binary", feature_type="itervar")
elif tuner == "xgb_rank_binary_curve":
tuner_obj = XGBTuner(tsk, loss_type="rank-binary", feature_type="curve")
elif tuner == "ga":
tuner_obj = GATuner(tsk, pop_size=50)
elif tuner == "random":
tuner_obj = RandomTuner(tsk)
elif tuner == "gridsearch":
tuner_obj = GridSearchTuner(tsk)
else:
raise ValueError("Invalid tuner: " + tuner)
if use_transfer_learning:
if os.path.isfile(tmp_log_file):
tuner_obj.load_history(autotvm.record.load_from_file(tmp_log_file))
# do tuning
tsk_trial = min(n_trial, len(tsk.config_space))
tuner_obj.tune(
n_trial=tsk_trial,
early_stopping=early_stopping,
measure_option=measure_option,
callbacks=[
autotvm.callback.progress_bar(tsk_trial, prefix=prefix),
autotvm.callback.log_to_file(tmp_log_file),
],
)
# pick best records to a cache file
autotvm.record.pick_best(tmp_log_file, log_filename)
os.remove(tmp_log_file)
########################################################################
# Register VTA-specific tuning tasks
def register_vta_tuning_tasks():
from tvm.autotvm.task import TaskExtractEnv
@tvm.te.tag_scope(tag=topi.tag.ELEMWISE)
def my_clip(x, a_min, a_max):
"""Unlike topi's current clip, put min and max into two stages."""
const_min = tvm.tir.const(a_min, x.dtype)
const_max = tvm.tir.const(a_max, x.dtype)
x = te.compute(x.shape, lambda *i: tvm.te.min(x(*i), const_max), name="clipA")
x = te.compute(x.shape, lambda *i: tvm.te.max(x(*i), const_min), name="clipB")
return x
# init autotvm env to register VTA operator
TaskExtractEnv()
@autotvm.template("conv2d_packed.vta")
def _topi_nn_conv2d(*args, **kwargs):
assert not kwargs, "Do not support kwargs in template function call"
A, W = args[:2]
with tvm.target.vta():
res = vta.top.conv2d_packed(*args, **kwargs)
res = topi.right_shift(res, 8)
res = my_clip(res, 0, 127)
res = topi.cast(res, "int8")
if tvm.target.Target.current().device_name == "vta":
s = vta.top.schedule_conv2d_packed([res])
else:
s = te.create_schedule([res.op])
return s, [A, W, res]
########################################################################
# Finally, we launch tuning jobs and evaluate the end-to-end performance.
def tune_and_evaluate(tuning_opt):
# Register VTA tuning tasks
register_vta_tuning_tasks()
# Perform task extraction on Relay program
print("Extract tasks...")
relay_prog, params = compile_network(env, target, network, start_pack, stop_pack)
mod = tvm.IRModule.from_expr(relay_prog)
tasks = autotvm.task.extract_from_program(
mod,
params=params,
ops=(relay.op.get("nn.conv2d"),),
target=target,
target_host=env.target_host,
)
# filter out non-packed conv2d task
tasks = list(filter(lambda t: len(t.args[0][1]) > 4 and "conv" in t.name, tasks))
# We should have extracted 10 convolution tasks
assert len(tasks) == 10
print("Extracted {} conv2d tasks:".format(len(tasks)))
for tsk in tasks:
inp = tsk.args[0][1]
wgt = tsk.args[1][1]
batch = inp[0] * inp[4]
in_filter = inp[1] * inp[5]
out_filter = wgt[0] * wgt[4]
height, width = inp[2], inp[3]
hkernel, wkernel = wgt[2], wgt[3]
hstride, wstride = tsk.args[2][0], tsk.args[2][1]
hpad, wpad = tsk.args[3][0], tsk.args[3][1]
print(
"({}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {})".format(
batch,
height,
width,
in_filter,
out_filter,
hkernel,
wkernel,
hpad,
wpad,
hstride,
wstride,
)
)
# We do not run the tuning in our webpage server since it takes too long.
# Comment the following line to run it by yourself.
return
# run tuning tasks
print("Tuning...")
tune_tasks(tasks, **tuning_opt)
# evaluate with tuning history
if env.TARGET != "sim":
# Get remote from fleet node
remote = autotvm.measure.request_remote(
env.TARGET, tracker_host, tracker_port, timeout=10000
)
# Reconfigure the JIT runtime and FPGA.
vta.reconfig_runtime(remote)
vta.program_fpga(remote, bitstream=None)
else:
# In simulation mode, host the RPC server locally.
remote = rpc.LocalSession()
# compile kernels with history best records
with autotvm.tophub.context(target, extra_files=[log_file]):
# Compile network
print("Compile...")
if target.device_name != "vta":
with tvm.transform.PassContext(opt_level=3, disabled_pass={"AlterOpLayout"}):
lib = relay.build(
relay_prog, target=target, params=params, target_host=env.target_host
)
else:
with vta.build_config(opt_level=3, disabled_pass={"AlterOpLayout"}):
lib = relay.build(
relay_prog, target=target, params=params, target_host=env.target_host
)
# Export library
print("Upload...")
temp = utils.tempdir()
lib.export_library(temp.relpath("graphlib.tar"))
remote.upload(temp.relpath("graphlib.tar"))
lib = remote.load_module("graphlib.tar")
# Generate the graph executor
ctx = remote.ext_dev(0) if device == "vta" else remote.cpu(0)
m = graph_executor.GraphModule(lib["default"](ctx))
# upload parameters to device
image = tvm.nd.array((np.random.uniform(size=(1, 3, 224, 224))).astype("float32"))
m.set_input("data", image)
# evaluate
print("Evaluate inference time cost...")
timer = m.module.time_evaluator("run", ctx, number=1, repeat=10)
tcost = timer()
prof_res = np.array(tcost.results) * 1000 # convert to millisecond
print(
"Mean inference time (std dev): %.2f ms (%.2f ms)"
% (np.mean(prof_res), np.std(prof_res))
)
# Run the tuning and evaluate the results
tune_and_evaluate(tuning_option)
######################################################################
# Sample Output
# -------------
# The tuning needs to compile many programs and extract feature from them.
# So a high performance CPU is recommended.
# One sample output is listed below.
# It takes about 2 hours on a 16T CPU, and 6 Pynq boards.
#
# .. code-block:: bash
#
# Extract tasks...
# [Warning] Invalid shape during AutoTVM task creation
# Extracted 10 conv2d tasks:
# Task(func_name=topi_nn_conv2d, args=(('TENSOR', (1, 16, 14, 14, 1, 16), 'int8'), ('TENSOR', (32, 16, 1, 1, 16, 16), 'int8'), (2, 2), (0, 0), (1, 1), 'NCHW1n16c', 'int32'), kwargs={}, workload=('conv2d', (1, 16, 14, 14, 1, 16, 'int8'), (32, 16, 1, 1, 16, 16, 'int8'), (2, 2), (0, 0), (1, 1), 'NCHW1n16c', 'int32'))
# Task(func_name=topi_nn_conv2d, args=(('TENSOR', (1, 8, 28, 28, 1, 16), 'int8'), ('TENSOR', (16, 8, 1, 1, 16, 16), 'int8'), (2, 2), (0, 0), (1, 1), 'NCHW1n16c', 'int32'), kwargs={}, workload=('conv2d', (1, 8, 28, 28, 1, 16, 'int8'), (16, 8, 1, 1, 16, 16, 'int8'), (2, 2), (0, 0), (1, 1), 'NCHW1n16c', 'int32'))
# Task(func_name=topi_nn_conv2d, args=(('TENSOR', (1, 4, 56, 56, 1, 16), 'int8'), ('TENSOR', (8, 4, 1, 1, 16, 16), 'int8'), (2, 2), (0, 0), (1, 1), 'NCHW1n16c', 'int32'), kwargs={}, workload=('conv2d', (1, 4, 56, 56, 1, 16, 'int8'), (8, 4, 1, 1, 16, 16, 'int8'), (2, 2), (0, 0), (1, 1), 'NCHW1n16c', 'int32'))
# Task(func_name=topi_nn_conv2d, args=(('TENSOR', (1, 4, 56, 56, 1, 16), 'int8'), ('TENSOR', (4, 4, 3, 3, 16, 16), 'int8'), (1, 1), (1, 1), (1, 1), 'NCHW1n16c', 'int32'), kwargs={}, workload=('conv2d', (1, 4, 56, 56, 1, 16, 'int8'), (4, 4, 3, 3, 16, 16, 'int8'), (1, 1), (1, 1), (1, 1), 'NCHW1n16c', 'int32'))
# Task(func_name=topi_nn_conv2d, args=(('TENSOR', (1, 8, 28, 28, 1, 16), 'int8'), ('TENSOR', (8, 8, 3, 3, 16, 16), 'int8'), (1, 1), (1, 1), (1, 1), 'NCHW1n16c', 'int32'), kwargs={}, workload=('conv2d', (1, 8, 28, 28, 1, 16, 'int8'), (8, 8, 3, 3, 16, 16, 'int8'), (1, 1), (1, 1), (1, 1), 'NCHW1n16c', 'int32'))
# Task(func_name=topi_nn_conv2d, args=(('TENSOR', (1, 4, 56, 56, 1, 16), 'int8'), ('TENSOR', (8, 4, 3, 3, 16, 16), 'int8'), (2, 2), (1, 1), (1, 1), 'NCHW1n16c', 'int32'), kwargs={}, workload=('conv2d', (1, 4, 56, 56, 1, 16, 'int8'), (8, 4, 3, 3, 16, 16, 'int8'), (2, 2), (1, 1), (1, 1), 'NCHW1n16c', 'int32'))
# Task(func_name=topi_nn_conv2d, args=(('TENSOR', (1, 16, 14, 14, 1, 16), 'int8'), ('TENSOR', (16, 16, 3, 3, 16, 16), 'int8'), (1, 1), (1, 1), (1, 1), 'NCHW1n16c', 'int32'), kwargs={}, workload=('conv2d', (1, 16, 14, 14, 1, 16, 'int8'), (16, 16, 3, 3, 16, 16, 'int8'), (1, 1), (1, 1), (1, 1), 'NCHW1n16c', 'int32'))
# Task(func_name=topi_nn_conv2d, args=(('TENSOR', (1, 8, 28, 28, 1, 16), 'int8'), ('TENSOR', (16, 8, 3, 3, 16, 16), 'int8'), (2, 2), (1, 1), (1, 1), 'NCHW1n16c', 'int32'), kwargs={}, workload=('conv2d', (1, 8, 28, 28, 1, 16, 'int8'), (16, 8, 3, 3, 16, 16, 'int8'), (2, 2), (1, 1), (1, 1), 'NCHW1n16c', 'int32'))
# Task(func_name=topi_nn_conv2d, args=(('TENSOR', (1, 32, 7, 7, 1, 16), 'int8'), ('TENSOR', (32, 32, 3, 3, 16, 16), 'int8'), (1, 1), (1, 1), (1, 1), 'NCHW1n16c', 'int32'), kwargs={}, workload=('conv2d', (1, 32, 7, 7, 1, 16, 'int8'), (32, 32, 3, 3, 16, 16, 'int8'), (1, 1), (1, 1), (1, 1), 'NCHW1n16c', 'int32'))
# Task(func_name=topi_nn_conv2d, args=(('TENSOR', (1, 16, 14, 14, 1, 16), 'int8'), ('TENSOR', (32, 16, 3, 3, 16, 16), 'int8'), (2, 2), (1, 1), (1, 1), 'NCHW1n16c', 'int32'), kwargs={}, workload=('conv2d', (1, 16, 14, 14, 1, 16, 'int8'), (32, 16, 3, 3, 16, 16, 'int8'), (2, 2), (1, 1), (1, 1), 'NCHW1n16c', 'int32'))
# Tuning...
# [Task 1/10] Current/Best: 0.72/ 23.24 GFLOPS | Progress: (480/1000) | 640.31 s Done.
# [Task 2/10] Current/Best: 0.00/ 27.69 GFLOPS | Progress: (576/1000) | 810.09 s Done.
# [Task 3/10] Current/Best: 0.00/ 22.97 GFLOPS | Progress: (1000/1000) | 1125.37 s Done.
# [Task 4/10] Current/Best: 0.00/ 31.26 GFLOPS | Progress: (1000/1000) | 1025.52 s Done.
# [Task 5/10] Current/Best: 0.00/ 15.15 GFLOPS | Progress: (1000/1000) | 1236.58 s Done.
# [Task 6/10] Current/Best: 0.00/ 22.74 GFLOPS | Progress: (1000/1000) | 906.60 s Done.
# [Task 7/10] Current/Best: 0.00/ 15.27 GFLOPS | Progress: (1000/1000) | 1056.25 s Done.
# [Task 8/10] Current/Best: 0.00/ 2.18 GFLOPS | Progress: (1000/1000) | 2275.29 s Done.
# [Task 9/10] Current/Best: 2.23/ 3.99 GFLOPS | Progress: (1000/1000) | 2527.25 s Done.
# [Task 10/10] Current/Best: 1.56/ 6.32 GFLOPS | Progress: (480/1000) | 1304.84 s Done.
# Compile...
# Upload...
# Evaluate inference time cost...
# Mean inference time (std dev): 621.79 ms (0.14 ms)
######################################################################
#
# .. note:: **Experiencing Difficulties?**
#
# The auto tuning module is error-prone. If you always see " 0.00/ 0.00 GFLOPS",
# then there must be something wrong.
#
# First, make sure you set the correct configuration of your device.
# Then, you can print debug information by adding these lines in the beginning
# of the script. It will print every measurement result, where you can find useful
# error messages.
#
# .. code-block:: python
#
# import logging
# logging.getLogger('autotvm').setLevel(logging.DEBUG)
#
# Finally, always feel free to ask our community for help on https://discuss.tvm.apache.org
| 22,597 | 41.397749 | 322 | py |
tvm | tvm-main/vta/scripts/tune_resnet.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Perform ResNet autoTVM tuning on VTA using Relay."""
import argparse, os, time
from mxnet.gluon.model_zoo import vision
import numpy as np
from PIL import Image
from tvm import topi
import tvm
from tvm import te
from tvm import rpc, autotvm, relay
from tvm.autotvm.measure.measure_methods import request_remote
from tvm.autotvm.tuner import XGBTuner, GATuner, RandomTuner, GridSearchTuner
from tvm.contrib import graph_executor, utils, download
from tvm.contrib.debugger import debug_executor
import vta
from vta.testing import simulator
from vta.top import graph_pack
from tvm.autotvm.task import extract_from_program
def parse_arguments():
parser = argparse.ArgumentParser(description="Train a model for image classification.")
parser.add_argument(
"--model",
type=str,
default="resnet18_v1",
choices=["resnet18_v1"],
help="Input model name.",
)
parser.add_argument(
"--start-name",
type=str,
default="nn.max_pool2d",
help="The name of the node where packing starts",
)
parser.add_argument(
"--stop-name",
type=str,
default="nn.global_avg_pool2d",
help="The name of the node where packing stops",
)
parser.add_argument(
"--debug-profile", action="store_true", help="Show layer-wise time cost profiling results"
)
parser.add_argument(
"--device", default="vta", choices=["vta", "arm_cpu"], help="Select device target"
)
parser.add_argument(
"--measurements", type=int, default=1, help="Number of measurements during AutoTVM search"
)
parser.add_argument("--tuner", type=str, default="random", help="AutoTVM search strategy")
parser.add_argument(
"--log-filename", type=str, default="resnet-18.log", help="AutoTVM log file name"
)
return parser.parse_args()
def register_vta_tuning_tasks():
from tvm.autotvm.task.topi_integration import TaskExtractEnv, deserialize_args
@tvm.te.tag_scope(tag=topi.tag.ELEMWISE)
def my_clip(x, a_min, a_max):
"""Unlike topi's current clip, put min and max into two stages."""
const_min = tvm.tir.const(a_min, x.dtype)
const_max = tvm.tir.const(a_max, x.dtype)
x = te.compute(x.shape, lambda *i: tvm.te.min(x(*i), const_max), name="clipA")
x = te.compute(x.shape, lambda *i: tvm.te.max(x(*i), const_min), name="clipB")
return x
# init autotvm env to register VTA operator
TaskExtractEnv()
@autotvm.task.register("topi_nn_conv2d", override=True)
def _topi_nn_conv2d(*args, **kwargs):
assert not kwargs, "Do not support kwargs in template function call"
args = deserialize_args(args)
A, W = args[:2]
with tvm.target.vta():
res = topi.nn.conv2d(*args, **kwargs)
res = topi.right_shift(res, 8)
res = my_clip(res, 0, 127)
res = topi.cast(res, "int8")
if tvm.target.Target.current().device_name == "vta":
s = topi.generic.schedule_conv2d_nchw([res])
else:
s = te.create_schedule([res.op])
return s, [A, W, res]
@autotvm.task.register("topi_nn_dense", override=True)
def _topi_nn_dense(*args, **kwargs):
assert not kwargs, "Do not support kwargs in template function call"
args = deserialize_args(args)
A, W = args[:2]
with tvm.target.vta():
res = topi.nn.dense(*args, **kwargs)
res = topi.right_shift(res, 8)
res = my_clip(res, 0, 127)
res = topi.cast(res, "int8")
if tvm.target.Target.current().device_name == "vta":
s = topi.generic.schedule_dense([res])
else:
s = te.create_schedule([res.op])
return s, [A, W, res]
def compile_network(opt, env, target):
# Populate the shape and data type dictionary
dtype_dict = {"data": "float32"}
shape_dict = {"data": (env.BATCH, 3, 224, 224)}
# Get off the shelf gluon model, and convert to relay
gluon_model = vision.get_model(opt.model, pretrained=True)
mod, params = relay.frontend.from_mxnet(gluon_model, shape_dict)
# Update shape and type dictionary
shape_dict.update({k: v.shape for k, v in params.items()})
dtype_dict.update({k: str(v.dtype) for k, v in params.items()})
# Perform quantization in Relay
# Note: We set opt_level to 3 in order to fold batch norm
with tvm.transform.PassContext(opt_level=3):
with relay.quantize.qconfig(global_scale=8.0, skip_conv_layers=[0]):
relay_prog = relay.quantize.quantize(mod["main"], params=params)
# Perform graph packing and constant folding for VTA target
if target.device_name == "vta":
assert env.BLOCK_IN == env.BLOCK_OUT
relay_prog = graph_pack(
relay_prog,
env.BATCH,
env.BLOCK_OUT,
env.WGT_WIDTH,
start_name=opt.start_name,
stop_name=opt.stop_name,
)
return relay_prog, params
def tune_tasks(
tasks,
measure_option,
tuner="xgb",
n_trial=1000,
early_stopping=None,
log_filename="tuning.log",
use_transfer_learning=True,
try_winograd=True,
):
# create tmp log file
tmp_log_file = log_filename + ".tmp"
if os.path.exists(tmp_log_file):
os.remove(tmp_log_file)
for i, tsk in enumerate(reversed(tasks)):
prefix = "[Task %2d/%2d] " % (i + 1, len(tasks))
# create tuner
if tuner == "xgb":
tuner_obj = XGBTuner(tsk, loss_type="reg")
elif tuner == "xgb_knob":
tuner_obj = XGBTuner(tsk, loss_type="reg", feature_type="knob")
elif tuner == "xgb_itervar":
tuner_obj = XGBTuner(tsk, loss_type="reg", feature_type="itervar")
elif tuner == "xgb_curve":
tuner_obj = XGBTuner(tsk, loss_type="reg", feature_type="curve")
elif tuner == "xgb_rank":
tuner_obj = XGBTuner(tsk, loss_type="rank")
elif tuner == "xgb_rank_knob":
tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="knob")
elif tuner == "xgb_rank_itervar":
tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="itervar")
elif tuner == "xgb_rank_curve":
tuner_obj = XGBTuner(tsk, loss_type="rank", feature_type="curve")
elif tuner == "xgb_rank_binary":
tuner_obj = XGBTuner(tsk, loss_type="rank-binary")
elif tuner == "xgb_rank_binary_knob":
tuner_obj = XGBTuner(tsk, loss_type="rank-binary", feature_type="knob")
elif tuner == "xgb_rank_binary_itervar":
tuner_obj = XGBTuner(tsk, loss_type="rank-binary", feature_type="itervar")
elif tuner == "xgb_rank_binary_curve":
tuner_obj = XGBTuner(tsk, loss_type="rank-binary", feature_type="curve")
elif tuner == "ga":
tuner_obj = GATuner(tsk, pop_size=50)
elif tuner == "random":
tuner_obj = RandomTuner(tsk)
elif tuner == "gridsearch":
tuner_obj = GridSearchTuner(tsk)
else:
raise ValueError("Invalid tuner: " + tuner)
if use_transfer_learning:
if os.path.isfile(tmp_log_file):
tuner_obj.load_history(autotvm.record.load_from_file(tmp_log_file))
# do tuning
n_trial_ = min(n_trial, len(tsk.config_space))
tuner_obj.tune(
n_trial_,
early_stopping=early_stopping,
measure_option=measure_option,
callbacks=[
autotvm.callback.progress_bar(n_trial_, prefix=prefix),
autotvm.callback.log_to_file(tmp_log_file),
],
)
# pick best records to a cache file
autotvm.record.pick_best(tmp_log_file, log_filename)
os.remove(tmp_log_file)
if __name__ == "__main__":
opt = parse_arguments()
# Make sure that TVM was compiled with RPC=1
assert tvm.runtime.enabled("rpc")
# Read in VTA environment
env = vta.get_env()
# Get remote from fleet node
tracker_host = os.environ.get("TVM_TRACKER_HOST", None)
tracker_port = os.environ.get("TVM_TRACKER_PORT", None)
if not tracker_host or not tracker_port:
print("Set your AutoTVM tracker node host and port variables to run the autotuner")
exit()
# Get remote
if env.TARGET != "sim":
# Measure build start time
reconfig_start = time.time()
# Get remote from fleet node
remote = autotvm.measure.request_remote(
env.TARGET, tracker_host, int(tracker_port), timeout=10000
)
# Reconfigure the JIT runtime and FPGA.
# You can program the FPGA with your own custom bitstream
# by passing the path to the bitstream file instead of None.
vta.reconfig_runtime(remote)
vta.program_fpga(remote, bitstream=None)
# Report on reconfiguration time
reconfig_time = time.time() - reconfig_start
print("Reconfigured FPGA and RPC runtime in {0:.2f}s!".format(reconfig_time))
# In simulation mode, host the RPC server locally.
else:
remote = rpc.LocalSession()
# VTA target and execution context
target = env.target if opt.device == "vta" else env.target_vta_cpu
ctx = remote.ext_dev(0) if opt.device == "vta" else remote.cpu(0)
# Compile Relay program
print("Initial compile...")
relay_prog, params = compile_network(opt, env, target)
# Register VTA tuning tasks
register_vta_tuning_tasks()
# Perform task extraction on Relay program
print("Extracting tasks...")
tasks = extract_from_program(
func=relay_prog,
params=params,
ops=(relay.op.get("nn.conv2d"),),
target=tvm.target.Target(target, host=env.target_host),
)
# Perform Autotuning
print("Tuning...")
tuning_opt = {
"log_filename": opt.log_filename,
"tuner": opt.tuner,
"n_trial": 1e9,
"early_stopping": None,
"measure_option": autotvm.measure_option(
builder=autotvm.LocalBuilder(build_func=vta.vta_autotvm_build_func),
runner=autotvm.RPCRunner(
env.TARGET,
tracker_host,
tracker_port,
number=4,
min_repeat_ms=150,
repeat=opt.measurements,
timeout=60,
# check_correctness=True, # TODO: re-enable when check_correctness works again.
),
),
}
tune_tasks(tasks, **tuning_opt)
# Compile kernels with history best records
with autotvm.tophub.context(target, extra_files=[opt.log_filename]):
# Compile network
print("Compiling network with best tuning parameters...")
if target.device_name != "vta":
with tvm.transform.PassContext(opt_level=3, disabled_pass={"AlterOpLayout"}):
graph, lib, params = relay.build(
relay_prog,
target=tvm.target.Target(target, host=env.target_host),
params=params,
)
else:
with vta.build_config(opt_level=3, disabled_pass={"AlterOpLayout"}):
graph, lib, params = relay.build(
relay_prog,
target=tvm.target.Target(target, host=env.target_host),
params=params,
)
# Export library
temp = utils.tempdir()
lib.save(temp.relpath("graphlib.o"))
remote.upload(temp.relpath("graphlib.o"))
lib = remote.load_module("graphlib.o")
# If detailed runtime info is needed build with debug runtime
if opt.debug_profile:
m = debug_executor.create(graph, lib, ctx)
else:
m = graph_executor.create(graph, lib, ctx)
# Set the network parameters and synthetic input
image = tvm.nd.array((np.random.uniform(size=(1, 3, 224, 224))).astype("float32"))
m.set_input(**params)
m.set_input("data", image)
# Perform inference
timer = m.module.time_evaluator("run", ctx, number=4, repeat=opt.measurements)
tcost = timer()
prof_res = np.array(tcost.results) * 1000 # convert to millisecond
print(
"Mean inference time (std dev): %.2f ms (%.2f ms)"
% (np.mean(prof_res), np.std(prof_res))
)
# Display profile information
if opt.debug_profile:
m.run()
| 13,325 | 34.631016 | 98 | py |
tvm | tvm-main/docs/legacy_redirect.py | # -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from string import Template
import json
import os
legacy_redirects = [
["dev/benchmark.html", "../arch/benchmark.html"],
["dev/convert_layout.html", "../arch/convert_layout.html"],
["dev/debugger.html", "../arch/debugger.html"],
["dev/device_target_interactions.html", "../arch/device_target_interactions.html"],
["dev/frontend/tensorflow.html", "../../arch/frontend/tensorflow.html"],
["dev/hybrid_script.html", "../arch/hybrid_script.html"],
["dev/index.html", "../arch/index.html"],
["dev/inferbound.html", "../arch/inferbound.html"],
[
"dev/introduction_to_module_serialization.html",
"../arch/introduction_to_module_serialization.html",
],
["dev/microtvm_design.html", "../arch/microtvm_design.html"],
["dev/model_library_format.html", "../arch/model_library_format.html"],
["dev/pass_infra.html", "../arch/pass_infra.html"],
["dev/relay_intro.html", "../arch/relay_intro.html"],
["dev/relay_op_strategy.html", "../arch/relay_op_strategy.html"],
["dev/runtime.html", "../arch/runtime.html"],
["dev/runtimes/vulkan.html", "../../arch/runtimes/vulkan.html"],
["dev/security.html", "../arch/security.html"],
["dev/virtual_machine.html", "../arch/virtual_machine.html"],
["dev/how_to.html", "index.html"],
["dev/pytest_target_parametrization.html", "how_to/pytest_target_parametrization.html"],
["dev/relay_add_op.html", "how_to/relay_add_op.html"],
["dev/relay_add_pass.html", "how_to/relay_add_pass.html"],
["dev/relay_bring_your_own_codegen.html", "how_to/relay_bring_your_own_codegen.html"],
["dev/codebase_walkthrough.html", "tutorial/codebase_walkthrough.html"],
["deploy/android.html", "../how_to/deploy/android.html"],
["deploy/arm_compute_lib.html", "../how_to/deploy/arm_compute_lib.html"],
["deploy/bnns.html", "../how_to/deploy/bnns.html"],
["deploy/cpp_deploy.html", "../how_to/deploy/cpp_deploy.html"],
["deploy/hls.html", "../how_to/deploy/hls.html"],
["deploy/index.html", "../how_to/deploy/index.html"],
["deploy/integrate.html", "../how_to/deploy/integrate.html"],
["deploy/tensorrt.html", "../how_to/deploy/tensorrt.html"],
["deploy/vitis_ai.html", "../how_to/deploy/vitis_ai.html"],
["profiling/index.html", "../how_to/profile/index.html"],
["profiling/papi.html", "../how_to/profile/papi.html"],
["api/links.html", "../reference/api/links.html"],
["api/python/auto_scheduler.html", "../../reference/api/python/auto_scheduler.html"],
["api/python/autotvm.html", "../../reference/api/python/autotvm.html"],
["api/python/contrib.html", "../../reference/api/python/contrib.html"],
["api/python/driver.html", "../../reference/api/python/driver.html"],
["api/python/error.html", "../../reference/api/python/error.html"],
["api/python/graph_executor.html", "../../reference/api/python/graph_executor.html"],
["api/python/index.html", "../../reference/api/python/index.html"],
["api/python/ir.html", "../../reference/api/python/ir.html"],
["api/python/micro.html", "../../reference/api/python/micro.html"],
["api/python/ndarray.html", "../../reference/api/python/ndarray.html"],
["api/python/relay/analysis.html", "../../../reference/api/python/relay/analysis.html"],
["api/python/relay/backend.html", "../../../reference/api/python/relay/backend.html"],
[
"api/python/relay/dataflow_pattern.html",
"../../../reference/api/python/relay/dataflow_pattern.html",
],
["api/python/relay/frontend.html", "../../../reference/api/python/relay/frontend.html"],
["api/python/relay/image.html", "../../../reference/api/python/relay/image.html"],
["api/python/relay/index.html", "../../../reference/api/python/relay/index.html"],
["api/python/relay/nn.html", "../../../reference/api/python/relay/nn.html"],
["api/python/relay/testing.html", "../../../reference/api/python/relay/testing.html"],
["api/python/relay/transform.html", "../../../reference/api/python/relay/transform.html"],
["api/python/relay/vision.html", "../../../reference/api/python/relay/vision.html"],
["api/python/rpc.html", "../../reference/api/python/rpc.html"],
["api/python/runtime.html", "../../reference/api/python/runtime.html"],
["api/python/target.html", "../../reference/api/python/target.html"],
["api/python/te.html", "../../reference/api/python/te.html"],
["api/python/tir.html", "../../reference/api/python/tir.html"],
["api/python/topi.html", "../../reference/api/python/topi.html"],
["api/python/vta/index.html", "../../../reference/api/python/vta/index.html"],
["langref/hybrid_script.html", "../reference/langref/hybrid_script.html"],
["langref/index.html", "../reference/langref/index.html"],
["langref/relay_adt.html", "../reference/langref/relay_adt.html"],
["langref/relay_expr.html", "../reference/langref/relay_expr.html"],
["langref/relay_op.html", "../reference/langref/relay_op.html"],
["langref/relay_pattern.html", "../reference/langref/relay_pattern.html"],
["langref/relay_type.html", "../reference/langref/relay_type.html"],
["microtvm/index.html", "../topic/microtvm/index.html"],
["vta/dev/config.html", "../../topic/vta/dev/config.html"],
["vta/dev/hardware.html", "../../topic/vta/dev/hardware.html"],
["vta/dev/index.html", "../../topic/vta/dev/index.html"],
["vta/index.html", "../topic/vta/index.html"],
["vta/install.html", "../topic/vta/install.html"],
["tutorials/index.html", "../tutorial/index.html"],
["tutorials/frontend/from_caffe2.html", "../../how_to/compile_models/from_caffe2.html"],
["tutorials/frontend/from_coreml.html", "../../how_to/compile_models/from_coreml.html"],
["tutorials/frontend/from_darknet.html", "../../how_to/compile_models/from_darknet.html"],
["tutorials/frontend/from_keras.html", "../../how_to/compile_models/from_keras.html"],
["tutorials/frontend/from_mxnet.html", "../../how_to/compile_models/from_mxnet.html"],
["tutorials/frontend/from_onnx.html", "../../how_to/compile_models/from_onnx.html"],
["tutorials/frontend/from_paddle.html", "../../how_to/compile_models/from_paddle.html"],
["tutorials/frontend/from_pytorch.html", "../../how_to/compile_models/from_pytorch.html"],
["tutorials/frontend/from_tensorflow.html", "../../how_to/compile_models/from_tensorflow.html"],
["tutorials/frontend/from_tflite.html", "../../how_to/compile_models/from_tflite.html"],
[
"tutorials/frontend/deploy_model_on_android.html",
"../../how_to/deploy_models/deploy_model_on_android.html",
],
[
"tutorials/frontend/deploy_model_on_rasp.html",
"../../how_to/deploy_models/deploy_model_on_rasp.html",
],
[
"tutorials/frontend/deploy_object_detection_pytorch.html",
"../../how_to/deploy_models/deploy_object_detection_pytorch.html",
],
[
"tutorials/frontend/deploy_prequantized.html",
"../../how_to/deploy_models/deploy_prequantized.html",
],
[
"tutorials/frontend/deploy_prequantized_tflite.html",
"../../how_to/deploy_models/deploy_prequantized_tflite.html",
],
[
"tutorials/frontend/deploy_quantized.html",
"../../how_to/deploy_models/deploy_quantized.html",
],
["tutorials/frontend/deploy_sparse.html", "../../how_to/deploy_models/deploy_sparse.html"],
[
"tutorials/dev/bring_your_own_datatypes.html",
"../../how_to/extend_tvm/bring_your_own_datatypes.html",
],
[
"tutorials/dev/low_level_custom_pass.html",
"../../how_to/extend_tvm/low_level_custom_pass.html",
],
["tutorials/dev/use_pass_infra.html", "../../how_to/extend_tvm/use_pass_infra.html"],
["tutorials/dev/use_pass_instrument.html", "../../how_to/extend_tvm/use_pass_instrument.html"],
["tutorials/optimize/opt_conv_cuda.html", "../../how_to/optimize_operators/opt_conv_cuda.html"],
[
"tutorials/optimize/opt_conv_tensorcore.html",
"../../how_to/optimize_operators/opt_conv_tensorcore.html",
],
["tutorials/optimize/opt_gemm.html", "../../how_to/optimize_operators/opt_gemm.html"],
[
"tutorials/auto_scheduler/tune_conv2d_layer_cuda.html",
"../../how_to/tune_with_autoscheduler/tune_conv2d_layer_cuda.html",
],
[
"tutorials/auto_scheduler/tune_network_arm.html",
"../../how_to/tune_with_autoscheduler/tune_network_arm.html",
],
[
"tutorials/auto_scheduler/tune_network_cuda.html",
"../../how_to/tune_with_autoscheduler/tune_network_cuda.html",
],
[
"tutorials/auto_scheduler/tune_network_mali.html",
"../../how_to/tune_with_autoscheduler/tune_network_mali.html",
],
[
"tutorials/auto_scheduler/tune_network_x86.html",
"../../how_to/tune_with_autoscheduler/tune_network_x86.html",
],
[
"tutorials/auto_scheduler/tune_sparse_x86.html",
"../../how_to/tune_with_autoscheduler/tune_sparse_x86.html",
],
[
"tutorials/autotvm/tune_conv2d_cuda.html",
"../../how_to/tune_with_autotvm/tune_conv2d_cuda.html",
],
["tutorials/autotvm/tune_relay_arm.html", "../../how_to/tune_with_autotvm/tune_relay_arm.html"],
[
"tutorials/autotvm/tune_relay_cuda.html",
"../../how_to/tune_with_autotvm/tune_relay_cuda.html",
],
[
"tutorials/autotvm/tune_relay_mobile_gpu.html",
"../../how_to/tune_with_autotvm/tune_relay_mobile_gpu.html",
],
["tutorials/autotvm/tune_relay_x86.html", "../../how_to/tune_with_autotvm/tune_relay_x86.html"],
["tutorials/micro/micro_autotune.html", "../../how_to/work_with_microtvm/micro_autotune.html"],
[
"tutorials/micro/micro_reference_vm.html",
"../../how_to/work_with_microtvm/micro_reference_vm.html",
],
["tutorials/micro/micro_tflite.html", "../../how_to/work_with_microtvm/micro_tflite.html"],
["tutorials/frontend/build_gcn.html", "../../how_to/work_with_relay/build_gcn.html"],
[
"tutorials/frontend/using_external_lib.html",
"../../how_to/work_with_relay/using_external_lib.html",
],
["tutorials/language/extern_op.html", "../../how_to/work_with_schedules/extern_op.html"],
["tutorials/language/intrin_math.html", "../../how_to/work_with_schedules/intrin_math.html"],
["tutorials/language/reduction.html", "../../how_to/work_with_schedules/reduction.html"],
["tutorials/language/scan.html", "../../how_to/work_with_schedules/scan.html"],
[
"tutorials/language/schedule_primitives.html",
"../../how_to/work_with_schedules/schedule_primitives.html",
],
["tutorials/language/tedd.html", "../../how_to/work_with_schedules/tedd.html"],
["tutorials/language/tensorize.html", "../../how_to/work_with_schedules/tensorize.html"],
["tutorials/language/tuple_inputs.html", "../../how_to/work_with_schedules/tuple_inputs.html"],
[
"tutorials/get_started/auto_scheduler_matmul_x86.html",
"../../tutorial/auto_scheduler_matmul_x86.html",
],
["tutorials/get_started/autotvm_matmul_x86.html", "../../tutorial/autotvm_matmul_x86.html"],
["tutorials/get_started/autotvm_relay_x86.html", "../../tutorial/autotvm_relay_x86.html"],
[
"tutorials/get_started/cross_compilation_and_rpc.html",
"../../tutorial/cross_compilation_and_rpc.html",
],
["tutorials/get_started/install.html", "../../tutorial/install.html"],
["tutorials/topi/intro_topi.html", "../../tutorial/intro_topi.html"],
["tutorials/get_started/introduction.html", "../../tutorial/introduction.html"],
["tutorials/get_started/relay_quick_start.html", "../../tutorial/relay_quick_start.html"],
[
"tutorials/get_started/tensor_expr_get_started.html",
"../../tutorial/tensor_expr_get_started.html",
],
[
"tutorials/get_started/tvmc_command_line_driver.html",
"../../tutorial/tvmc_command_line_driver.html",
],
[
"tutorials/get_started/tvmc_python.html",
"../../tutorial/tvmc_python.html",
],
]
redirect_template = """
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="refresh" content="1; url=$to" />
<script>
window.location.href = "$to"
</script>
</head>
</html>
"""
def build_legacy_redirect(tvm_path):
def legacy_redirect(app, docname): # Sphinx expects two arguments
if app.builder.name == "html":
src = Template(redirect_template)
for frm, to in legacy_redirects:
frm = tvm_path.resolve() / "docs" / "_build" / "html" / frm
redirect = src.substitute({"to": to})
os.makedirs(os.path.dirname(frm), exist_ok=True)
with open(frm, "w") as f:
f.write(redirect)
return legacy_redirect
| 13,700 | 49.00365 | 100 | py |
tvm | tvm-main/docs/conf.py | # -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# documentation build configuration file, created by
# sphinx-quickstart on Thu Jul 23 19:40:08 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from functools import partial
import gc
from importlib import import_module
import inspect
from hashlib import md5
import os
from pathlib import Path
import re
import sys
from textwrap import dedent, indent
from unittest.mock import patch
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
curr_path = Path(__file__).expanduser().absolute().parent
if curr_path.name == "_staging":
# Can't use curr_path.parent, because sphinx_gallery requires a relative path.
tvm_path = Path(os.pardir, os.pardir)
else:
tvm_path = Path(os.pardir)
sys.path.insert(0, str(tvm_path.resolve() / "python"))
sys.path.insert(0, str(tvm_path.resolve() / "vta" / "python"))
sys.path.insert(0, str(tvm_path.resolve() / "docs"))
# -- General configuration ------------------------------------------------
# General information about the project.
project = "tvm"
author = "Apache Software Foundation"
copyright = "2020 - 2023, %s" % author
github_doc_root = "https://github.com/apache/tvm/tree/main/docs/"
os.environ["TVM_BUILD_DOC"] = "1"
def git_describe_version(original_version):
"""Get git describe version."""
ver_py = tvm_path.joinpath("version.py")
libver = {"__file__": ver_py}
exec(compile(open(ver_py, "rb").read(), ver_py, "exec"), libver, libver)
_, gd_version = libver["git_describe_version"]()
if gd_version != original_version:
print("Use git describe based version %s" % gd_version)
return gd_version
# Version information.
import tvm
from tvm import topi
from tvm import te
from tvm import testing
version = git_describe_version(tvm.__version__)
release = version
def monkey_patch(module_name, func_name):
"""Helper function for monkey-patching library functions.
Used to modify a few sphinx-gallery behaviors to make the "Open in Colab" button work correctly.
Should be called as a decorator with arguments. Note this behaves differently from unittest's
@mock.patch, as our monkey_patch decorator should be placed on the new version of the function.
"""
module = import_module(module_name)
original_func = getattr(module, func_name)
def decorator(function):
updated_func = partial(function, real_func=original_func)
setattr(module, func_name, updated_func)
return updated_func
return decorator
CURRENT_FILE_CONF = None
@monkey_patch("sphinx_gallery.py_source_parser", "split_code_and_text_blocks")
def split_code_and_text_blocks(source_file, return_node, real_func):
"""Monkey-patch split_code_and_text_blocks to expose sphinx-gallery's file-level config.
It's kinda gross, but we need access to file_conf to detect the requires_cuda flag.
"""
global CURRENT_FILE_CONF
file_conf, blocks, node = real_func(source_file, return_node)
CURRENT_FILE_CONF = file_conf
return (file_conf, blocks, node)
# This header replaces the default sphinx-gallery one in sphinx_gallery/gen_rst.py.
COLAB_HTML_HEADER = """
.. DO NOT EDIT. THIS FILE WAS AUTOMATICALLY GENERATED BY
.. TVM'S MONKEY-PATCHED VERSION OF SPHINX-GALLERY. TO MAKE
.. CHANGES, EDIT THE SOURCE PYTHON FILE:
.. "{python_file}"
.. only:: html
.. note::
:class: sphx-glr-download-link-note
This tutorial can be used interactively with Google Colab! You can also click
:ref:`here <sphx_glr_download_{ref_name}>` to run the Jupyter notebook locally.
.. image:: {button_svg}
:align: center
:target: {colab_url}
:width: 300px
.. rst-class:: sphx-glr-example-title
.. _sphx_glr_{ref_name}:
"""
# Google Colab allows opening .ipynb files on GitHub by appending a GitHub path to this base URL.
COLAB_URL_BASE = "https://colab.research.google.com/github"
# The GitHub path where the site is automatically deployed by tvm-bot.
IPYTHON_GITHUB_BASE = "apache/tvm-site/blob/asf-site/docs/_downloads/"
# The SVG image of the "Open in Colab" button.
BUTTON = (
"https://raw.githubusercontent.com/tlc-pack/web-data/main/images/utilities/colab_button.svg"
)
@monkey_patch("sphinx_gallery.gen_rst", "save_rst_example")
def save_rst_example(example_rst, example_file, time_elapsed, memory_used, gallery_conf, real_func):
"""Monkey-patch save_rst_example to include the "Open in Colab" button."""
# The url is the md5 hash of the notebook path.
example_fname = os.path.relpath(example_file, gallery_conf["src_dir"])
ref_fname = example_fname.replace(os.path.sep, "_")
notebook_path = example_fname[:-2] + "ipynb"
digest = md5(notebook_path.encode()).hexdigest()
# Fixed documentation versions must link to different (earlier) .ipynb notebooks.
colab_url = f"{COLAB_URL_BASE}/{IPYTHON_GITHUB_BASE}"
if "dev" not in version:
colab_url += version + "/"
colab_url += digest + "/" + os.path.basename(notebook_path)
new_header = COLAB_HTML_HEADER.format(
python_file=example_fname, ref_name=ref_fname, colab_url=colab_url, button_svg=BUTTON
)
with patch("sphinx_gallery.gen_rst.EXAMPLE_HEADER", new_header):
real_func(example_rst, example_file, time_elapsed, memory_used, gallery_conf)
INCLUDE_DIRECTIVE_RE = re.compile(r"^([ \t]*)\.\. include::\s*(.+)\n", flags=re.M)
COMMENT_DIRECTIVE_RE = re.compile(r"^\.\.(?: .*)?\n(?:(?: .*)?\n)*", flags=re.M)
ADMONITION_DIRECTIVE_RE = re.compile(rf"^\.\. admonition:: *(.*)\n((?:(?: .*)?\n)*)\n", flags=re.M)
@monkey_patch("sphinx_gallery.notebook", "rst2md")
def rst2md(text, gallery_conf, target_dir, heading_levels, real_func):
"""Monkey-patch rst2md to support comments and some include directives.
Currently, only include directives without any parameters are supported. Also, note that in
reStructuredText any unrecognized explicit markup block is treated as a comment (see
https://docutils.sourceforge.io/docs/ref/rst/restructuredtext.html#comments).
For callouts, we only replace generic "admonition" directives. All others should be replaced by
sphinx-gallery's rst2md. Note that the "alert" and "alert-info" tags are support in most IPython
notebooks, but they render kinda funky on Colab.
"""
def load_include(match):
full_path = os.path.join(target_dir, match.group(2))
with open(full_path) as f:
lines = f.read()
indented = indent(lines, match.group(1)) + "\n"
return indented
text = re.sub(INCLUDE_DIRECTIVE_RE, load_include, text)
# Replace generic, titled admonitions with indented text. Other admonitions (e.g. .. note::)
# will be handled by sphinx-gallery's
def rewrite_generic_admonition(match):
title, text = match.groups()
stripped_text = dedent(text).strip()
return f'<div class="alert alert-info"><h4>{title}</h4><p>{stripped_text}</p></div>'
text = re.sub(ADMONITION_DIRECTIVE_RE, rewrite_generic_admonition, text)
# Call the real function, and then strip any remaining directives (i.e. comments)
text = real_func(text, gallery_conf, target_dir, heading_levels)
text = re.sub(COMMENT_DIRECTIVE_RE, "", text)
return text
def install_request_hook(gallery_conf, fname):
testing.utils.install_request_hook(depth=3)
INSTALL_TVM_DEV = f"""\
%%shell
# Installs the latest dev build of TVM from PyPI. If you wish to build
# from source, see https://tvm.apache.org/docs/install/from_source.html
pip install apache-tvm --pre"""
INSTALL_TVM_FIXED = f"""\
%%shell
# Installs TVM version {version} from PyPI. If you wish to build
# from source, see https://tvm.apache.org/docs/install/from_source.html
pip install apache-tvm=={version}"""
INSTALL_TVM_CUDA_DEV = f"""\
%%shell
# Installs the latest dev build of TVM from PyPI, with CUDA enabled. To use this,
# you must request a Google Colab instance with a GPU by going to Runtime ->
# Change runtime type -> Hardware accelerator -> GPU. If you wish to build from
# source, see see https://tvm.apache.org/docs/install/from_source.html
pip install tlcpack-nightly-cu113 --pre -f https://tlcpack.ai/wheels"""
INSTALL_TVM_CUDA_FIXED = f"""\
%%shell
# Installs TVM version {version} from PyPI, with CUDA enabled. To use this,
# you must request a Google Colab instance with a GPU by going to Runtime ->
# Change runtime type -> Hardware accelerator -> GPU. If you wish to build from
# source, see see https://tvm.apache.org/docs/install/from_source.html
pip install apache-tvm-cu113=={version} -f https://tlcpack.ai/wheels"""
@monkey_patch("sphinx_gallery.gen_rst", "jupyter_notebook")
def jupyter_notebook(script_blocks, gallery_conf, target_dir, real_func):
"""Monkey-patch sphinx-gallery to add a TVM import block to each IPython notebook.
If we had only one import block, we could skip the patching and just set first_notebook_cell.
However, how we import TVM depends on if we are using a fixed or dev version, and whether we
will use the GPU.
Tutorials requiring a CUDA-enabled build of TVM should use the flag:
# sphinx_gallery_requires_cuda = True
"""
requires_cuda = CURRENT_FILE_CONF.get("requires_cuda", False)
fixed_version = not "dev" in version
if fixed_version and requires_cuda:
install_block = INSTALL_TVM_CUDA_FIXED
elif fixed_version and not requires_cuda:
install_block = INSTALL_TVM_FIXED
elif not fixed_version and requires_cuda:
install_block = INSTALL_TVM_CUDA_DEV
else:
install_block = INSTALL_TVM_DEV
new_conf = {**gallery_conf, "first_notebook_cell": install_block}
return real_func(script_blocks, new_conf, target_dir)
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.napoleon",
"sphinx.ext.mathjax",
"sphinx_gallery.gen_gallery",
"autodocsumm",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = [".rst", ".md"]
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# generate autosummary even if no references
autosummary_generate = True
# The main toctree document.
main_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build", "_staging"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme is set by the make target
html_theme = os.environ.get("TVM_THEME", "rtd")
on_rtd = os.environ.get("READTHEDOCS", None) == "True"
# only import rtd theme and set it if want to build docs locally
if not on_rtd and html_theme == "rtd":
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
html_theme_options = {
"analytics_id": "UA-75982049-2",
"logo_only": True,
}
html_logo = "_static/img/tvm-logo-small.png"
html_favicon = "_static/img/tvm-logo-square.png"
# Output file base name for HTML help builder.
htmlhelp_basename = project + "doc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(main_doc, "%s.tex" % project, project, author, "manual"),
]
intersphinx_mapping = {
"python": ("https://docs.python.org/{.major}".format(sys.version_info), None),
# "numpy": ("https://numpy.org/doc/stable", None),
# "scipy": ("https://docs.scipy.org/doc/scipy", None),
# "matplotlib": ("https://matplotlib.org/", None),
}
from sphinx_gallery.sorting import ExplicitOrder
examples_dirs = [
tvm_path.joinpath("gallery", "tutorial"),
tvm_path.joinpath("gallery", "how_to", "compile_models"),
tvm_path.joinpath("gallery", "how_to", "deploy_models"),
tvm_path.joinpath("gallery", "how_to", "work_with_relay"),
tvm_path.joinpath("gallery", "how_to", "work_with_schedules"),
tvm_path.joinpath("gallery", "how_to", "optimize_operators"),
tvm_path.joinpath("gallery", "how_to", "tune_with_autotvm"),
tvm_path.joinpath("gallery", "how_to", "tune_with_autoscheduler"),
tvm_path.joinpath("gallery", "how_to", "work_with_microtvm"),
tvm_path.joinpath("gallery", "how_to", "extend_tvm"),
tvm_path.joinpath("vta", "tutorials"),
]
gallery_dirs = [
"tutorial",
"how_to/compile_models",
"how_to/deploy_models",
"how_to/work_with_relay",
"how_to/work_with_schedules",
"how_to/optimize_operators",
"how_to/tune_with_autotvm",
"how_to/tune_with_autoscheduler",
"how_to/work_with_microtvm",
"how_to/extend_tvm",
"topic/vta/tutorials",
]
subsection_order = ExplicitOrder(
str(p)
for p in [
tvm_path / "vta" / "tutorials" / "frontend",
tvm_path / "vta" / "tutorials" / "optimize",
tvm_path / "vta" / "tutorials" / "autotvm",
]
)
# Explicitly define the order within a subsection.
# The listed files are sorted according to the list.
# The unlisted files are sorted by filenames.
# The unlisted files always appear after listed files.
within_subsection_order = {
"tutorial": [
"introduction.py",
"install.py",
"tvmc_command_line_driver.py",
"tvmc_python.py",
"autotvm_relay_x86.py",
"tensor_expr_get_started.py",
"autotvm_matmul_x86.py",
"auto_scheduler_matmul_x86.py",
"tensor_ir_blitz_course.py",
"topi.pi",
"cross_compilation_and_rpc.py",
"relay_quick_start.py",
"uma.py",
],
"compile_models": [
"from_pytorch.py",
"from_tensorflow.py",
"from_mxnet.py",
"from_onnx.py",
"from_keras.py",
"from_tflite.py",
"from_coreml.py",
"from_darknet.py",
"from_caffe2.py",
"from_paddle.py",
],
"work_with_schedules": [
"schedule_primitives.py",
"reduction.py",
"intrin_math.py",
"scan.py",
"extern_op.py",
"tensorize.py",
"tuple_inputs.py",
"tedd.py",
],
"optimize_operators": [
"opt_gemm.py",
"opt_conv_cuda.py",
"opt_conv_tensorcore.py",
],
"tune_with_autotvm": [
"tune_conv2d_cuda.py",
"tune_relay_cuda.py",
"tune_relay_x86.py",
"tune_relay_arm.py",
"tune_relay_mobile_gpu.py",
],
"tune_with_autoscheduler": [
"tune_matmul_x86.py",
"tune_conv2d_layer_cuda.py",
"tune_network_x86.py",
"tune_network_cuda.py",
],
"extend_tvm": [
"low_level_custom_pass.py",
"use_pass_infra.py",
"use_pass_instrument.py",
"bring_your_own_datatypes.py",
],
"work_with_microtvm": [
"micro_tvmc.py",
"micro_tflite.py",
"micro_aot.py",
"micro_pytorch.py",
"micro_train.py",
"micro_autotune.py",
"micro_ethosu.py",
"micro_mlperftiny.py",
],
}
class WithinSubsectionOrder:
def __init__(self, src_dir):
self.src_dir = src_dir.split("/")[-1]
def __call__(self, filename):
# If the order is provided, use the provided order
if (
self.src_dir in within_subsection_order
and filename in within_subsection_order[self.src_dir]
):
index = within_subsection_order[self.src_dir].index(filename)
assert index < 1e10
return "\0%010d" % index
# Otherwise, sort by filename
return filename
# When running the tutorials on GPUs we are dependent on the Python garbage collector
# collecting TVM packed function closures for any device memory to also be released. This
# is not a good setup for machines with lots of CPU ram but constrained GPU ram, so force
# a gc after each example.
def force_gc(gallery_conf, fname):
gc.collect()
# Skips certain files to avoid dependency issues
filename_pattern_default = "^(?!.*micro_mlperftiny.py).*$"
sphinx_gallery_conf = {
"backreferences_dir": "gen_modules/backreferences",
"doc_module": ("tvm", "numpy"),
"reference_url": {
"tvm": None,
# "matplotlib": "https://matplotlib.org/",
# "numpy": "https://numpy.org/doc/stable",
},
"examples_dirs": examples_dirs,
"within_subsection_order": WithinSubsectionOrder,
"gallery_dirs": gallery_dirs,
"subsection_order": subsection_order,
"filename_pattern": os.environ.get("TVM_TUTORIAL_EXEC_PATTERN", filename_pattern_default),
"download_all_examples": False,
"min_reported_time": 60,
"expected_failing_examples": [],
"reset_modules": ("matplotlib", "seaborn", force_gc),
"promote_jupyter_magic": True,
"reset_modules": (install_request_hook),
}
autodoc_default_options = {
"member-order": "bysource",
}
# Maps the original namespace to list of potential modules
# that we can import alias from.
tvm_alias_check_map = {
"tvm.te": ["tvm.tir"],
"tvm.tir": ["tvm.ir", "tvm.runtime"],
"tvm.relay": ["tvm.ir", "tvm.tir"],
}
## Setup header and other configs
import tlcpack_sphinx_addon
footer_copyright = "© 2023 Apache Software Foundation | All rights reserved"
footer_note = " ".join(
"""
Copyright © 2023 The Apache Software Foundation. Apache TVM, Apache, the Apache feather,
and the Apache TVM project logo are either trademarks or registered trademarks of
the Apache Software Foundation.""".split(
"\n"
)
).strip()
header_logo = "https://tvm.apache.org/assets/images/logo.svg"
header_logo_link = "https://tvm.apache.org/"
header_links = [
("Community", "https://tvm.apache.org/community"),
("Download", "https://tvm.apache.org/download"),
("VTA", "https://tvm.apache.org/vta"),
("Blog", "https://tvm.apache.org/blog"),
("Docs", "https://tvm.apache.org/docs"),
("Conference", "https://tvmconf.org"),
("Github", "https://github.com/apache/tvm/"),
]
header_dropdown = {
"name": "ASF",
"items": [
("Apache Homepage", "https://apache.org/"),
("License", "https://www.apache.org/licenses/"),
("Sponsorship", "https://www.apache.org/foundation/sponsorship.html"),
("Security", "https://www.apache.org/security/"),
("Thanks", "https://www.apache.org/foundation/thanks.html"),
("Events", "https://www.apache.org/events/current-event"),
],
}
def fixup_tutorials(original_url: str) -> str:
if "docs/tutorial" in original_url:
# tutorials true source is in Python or .txt files, but Sphinx only sees
# the generated .rst files so this maps them back to the source
if original_url.endswith("index.rst"):
# for index pages, go to the README files
return re.sub(
r"docs/tutorial/(.*)index\.rst", "gallery/tutorial/\\1README.txt", original_url
)
else:
# otherwise for tutorials, redirect to python files
return re.sub(r"docs/tutorial/(.*)\.rst", "gallery/tutorial/\\1.py", original_url)
else:
# do nothing for normal non-tutorial .rst files
return original_url
html_context = {
"footer_copyright": footer_copyright,
"footer_note": footer_note,
"header_links": header_links,
"header_dropdown": header_dropdown,
"header_logo": header_logo,
"header_logo_link": header_logo_link,
"version_prefixes": ["main", "v0.8.0/", "v0.9.0/", "v0.10.0/", "v0.11.0/", "v0.12.0/"],
"display_github": True,
"github_user": "apache",
"github_repo": "tvm",
"github_version": "main/docs/",
"theme_vcs_pageview_mode": "edit",
"edit_link_hook_fn": fixup_tutorials,
}
# add additional overrides
templates_path += [tlcpack_sphinx_addon.get_templates_path()]
html_static_path += [tlcpack_sphinx_addon.get_static_path()]
def update_alias_docstring(name, obj, lines):
"""Update the docstring of alias functions.
This function checks if the obj is an alias of another documented object
in a different module.
If it is an alias, then it will append the alias information to the docstring.
Parameters
----------
name : str
The full name of the object in the doc.
obj : object
The original object.
lines : list
The docstring lines, need to be modified inplace.
"""
arr = name.rsplit(".", 1)
if len(arr) != 2:
return
target_mod, target_name = arr
if target_mod not in tvm_alias_check_map:
return
if not hasattr(obj, "__module__"):
return
obj_mod = obj.__module__
for amod in tvm_alias_check_map[target_mod]:
if not obj_mod.startswith(amod):
continue
if hasattr(sys.modules[amod], target_name):
obj_type = ":py:func" if callable(obj) else ":py:class"
lines.append(".. rubric:: Alias of %s:`%s.%s`" % (obj_type, amod, target_name))
def process_docstring(app, what, name, obj, options, lines):
"""Sphinx callback to process docstring"""
if callable(obj) or inspect.isclass(obj):
update_alias_docstring(name, obj, lines)
from legacy_redirect import build_legacy_redirect
def strip_ipython_magic(app, docname, source):
"""Prevents IPython magic commands from being rendered in HTML files.
TODO rework this function to remove IPython magic commands from include directives too.
"""
for i in range(len(source)):
source[i] = re.sub(r"%%.*\n\s*", "", source[i])
def setup(app):
app.connect("source-read", strip_ipython_magic)
app.connect("autodoc-process-docstring", process_docstring)
app.connect("build-finished", build_legacy_redirect(tvm_path))
| 24,952 | 33.323246 | 100 | py |
tvm | tvm-main/rust/tvm/examples/resnet/src/build_resnet.py | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import csv
import logging
from os import path as osp
import sys
import shutil
import numpy as np
import tvm
from tvm import te
from tvm import relay, runtime
from tvm.relay import testing
from tvm.contrib import graph_executor, cc
from PIL import Image
from tvm.contrib.download import download_testdata
from mxnet.gluon.model_zoo.vision import get_model
logging.basicConfig(
level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
logger = logging.getLogger(__name__)
parser = argparse.ArgumentParser(description="Resnet build example")
aa = parser.add_argument
aa("--build-dir", type=str, required=True, help="directory to put the build artifacts")
aa("--batch-size", type=int, default=1, help="input image batch size")
aa(
"--opt-level",
type=int,
default=3,
help="level of optimization. 0 is unoptimized and 3 is the highest level",
)
aa("--target", type=str, default="llvm", help="target for compilation")
aa("--image-shape", type=str, default="3,224,224", help="input image dimensions")
aa("--image-name", type=str, default="cat.png", help="name of input image to download")
args = parser.parse_args()
build_dir = args.build_dir
batch_size = args.batch_size
opt_level = args.opt_level
target = tvm.target.create(args.target)
image_shape = tuple(map(int, args.image_shape.split(",")))
data_shape = (batch_size,) + image_shape
def build(target_dir):
"""Compiles resnet18 with TVM"""
# Download the pretrained model in MxNet's format.
block = get_model("resnet18_v1", pretrained=True)
shape_dict = {"data": (1, 3, 224, 224)}
mod, params = relay.frontend.from_mxnet(block, shape_dict)
# Add softmax to do classification in last layer.
func = mod["main"]
func = relay.Function(
func.params, relay.nn.softmax(func.body), None, func.type_params, func.attrs
)
target = "llvm"
with tvm.transform.PassContext(opt_level=3):
graph, lib, params = relay.build(func, target, params=params)
# save the model artifacts
deploy_lib = osp.join(target_dir, "deploy_lib.o")
lib.save(deploy_lib)
cc.create_shared(osp.join(target_dir, "deploy_lib.so"), [osp.join(target_dir, "deploy_lib.o")])
with open(osp.join(target_dir, "deploy_graph.json"), "w") as fo:
fo.write(graph)
with open(osp.join(target_dir, "deploy_param.params"), "wb") as fo:
fo.write(runtime.save_param_dict(params))
def download_img_labels():
"""Download an image and imagenet1k class labels for test"""
from mxnet.gluon.utils import download
synset_url = "".join(
[
"https://gist.githubusercontent.com/zhreshold/",
"4d0b62f3d01426887599d4f7ede23ee5/raw/",
"596b27d23537e5a1b5751d2b0481ef172f58b539/",
"imagenet1000_clsid_to_human.txt",
]
)
synset_name = "synset.txt"
synset_path = download_testdata(synset_url, synset_name + ".raw", module="data", overwrite=True)
with open(synset_path) as fin:
data = fin.read()
synset = eval(data)
with open(synset_name, "w") as f:
for key in synset:
f.write(synset[key])
f.write("\n")
print(synset_path)
print(synset_name)
return synset
def transform_image(image):
image = np.array(image) - np.array([123.0, 117.0, 104.0])
image /= np.array([58.395, 57.12, 57.375])
image = image.transpose((2, 0, 1))
image = image[np.newaxis, :]
return image
def get_cat_image():
img_url = "https://github.com/dmlc/mxnet.js/blob/main/data/cat.png?raw=true"
img_path = download_testdata(img_url, "cat.png", module="data")
shutil.copyfile(img_path, "cat.png")
img = Image.open(img_path).resize((224, 224))
return transform_image(img)
def test_build(build_dir):
"""Sanity check with the cat image we download."""
graph = open(osp.join(build_dir, "deploy_graph.json")).read()
lib = tvm.runtime.load_module(osp.join(build_dir, "deploy_lib.so"))
params = bytearray(open(osp.join(build_dir, "deploy_param.params"), "rb").read())
input_data = get_cat_image()
dev = tvm.cpu()
module = graph_executor.create(graph, lib, dev)
module.load_params(params)
module.run(data=input_data)
out = module.get_output(0).numpy()
top1 = np.argmax(out[0])
synset = download_img_labels()
print("TVM prediction top-1:", top1, synset[top1])
if __name__ == "__main__":
logger.info("Compiling the model to graph executor.")
build(build_dir)
logger.info("Testing the model's predication on test data.")
test_build(build_dir)
| 5,428 | 32.720497 | 100 | py |
TPlinker-joint-extraction | TPlinker-joint-extraction-master/common/components.py | from IPython.core.debugger import set_trace
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
import math
class LayerNorm(nn.Module):
def __init__(self, input_dim, cond_dim = 0, center = True, scale = True, epsilon = None, conditional = False,
hidden_units = None, hidden_activation = 'linear', hidden_initializer = 'xaiver', **kwargs):
super(LayerNorm, self).__init__()
"""
input_dim: inputs.shape[-1]
cond_dim: cond.shape[-1]
"""
self.center = center
self.scale = scale
self.conditional = conditional
self.hidden_units = hidden_units
# self.hidden_activation = activations.get(hidden_activation) keras中activation为linear时,返回原tensor,unchanged
self.hidden_initializer = hidden_initializer
self.epsilon = epsilon or 1e-12
self.input_dim = input_dim
self.cond_dim = cond_dim
if self.center:
self.beta = Parameter(torch.zeros(input_dim))
if self.scale:
self.gamma = Parameter(torch.ones(input_dim))
if self.conditional:
if self.hidden_units is not None:
self.hidden_dense = nn.Linear(in_features = self.cond_dim, out_features = self.hidden_units, bias=False)
if self.center:
self.beta_dense = nn.Linear(in_features = self.cond_dim, out_features = input_dim, bias=False)
if self.scale:
self.gamma_dense = nn.Linear(in_features = self.cond_dim, out_features = input_dim, bias=False)
self.initialize_weights()
def initialize_weights(self):
if self.conditional:
if self.hidden_units is not None:
if self.hidden_initializer == 'normal':
torch.nn.init.normal(self.hidden_dense.weight)
elif self.hidden_initializer == 'xavier': # glorot_uniform
torch.nn.init.xavier_uniform_(self.hidden_dense.weight)
# 下面这两个为什么都初始化为0呢?
# 为了防止扰乱原来的预训练权重,两个变换矩阵可以全零初始化(单层神经网络可以用全零初始化,连续的多层神经网络才不应当用全零初始化),这样在初始状态,模型依然保持跟原来的预训练模型一致。
if self.center:
torch.nn.init.constant_(self.beta_dense.weight, 0)
if self.scale:
torch.nn.init.constant_(self.gamma_dense.weight, 0)
def forward(self, inputs, cond=None):
"""
如果是条件Layer Norm,则cond不是None
"""
if self.conditional:
if self.hidden_units is not None:
cond = self.hidden_dense(cond)
# for _ in range(K.ndim(inputs) - K.ndim(cond)): # K.ndim: 以整数形式返回张量中的轴数。
# TODO: 这两个为什么有轴数差呢? 为什么在 dim=1 上增加维度??
# 为了保持维度一致,cond可以是(batch_size, cond_dim)
for _ in range(len(inputs.shape) - len(cond.shape)):
cond = cond.unsqueeze(1) # cond = K.expand_dims(cond, 1)
# cond在加入beta和gamma之前做一次线性变换,以保证与input维度一致
if self.center:
beta = self.beta_dense(cond) + self.beta
if self.scale:
gamma = self.gamma_dense(cond) + self.gamma
else:
if self.center:
beta = self.beta
if self.scale:
gamma = self.gamma
outputs = inputs
if self.center:
mean = torch.mean(outputs, dim=-1).unsqueeze(-1)
outputs = outputs - mean
if self.scale:
variance = torch.mean(outputs**2, dim=-1).unsqueeze(-1)
std = (variance + self.epsilon) **2
outputs = outputs / std
outputs = outputs * gamma
if self.center:
outputs = outputs + beta
return outputs
class HandshakingKernel(nn.Module):
def __init__(self, hidden_size, shaking_type, inner_enc_type):
super().__init__()
self.shaking_type = shaking_type
if shaking_type == "cat":
self.combine_fc = nn.Linear(hidden_size * 2, hidden_size)
elif shaking_type == "cat_plus":
self.combine_fc = nn.Linear(hidden_size * 3, hidden_size)
elif shaking_type == "cln":
self.tp_cln = LayerNorm(hidden_size, hidden_size, conditional = True)
elif shaking_type == "cln_plus":
self.tp_cln = LayerNorm(hidden_size, hidden_size, conditional = True)
self.inner_context_cln = LayerNorm(hidden_size, hidden_size, conditional = True)
self.inner_enc_type = inner_enc_type
if inner_enc_type == "mix_pooling":
self.lamtha = Parameter(torch.rand(hidden_size))
elif inner_enc_type == "lstm":
self.inner_context_lstm = nn.LSTM(hidden_size,
hidden_size,
num_layers = 1,
bidirectional = False,
batch_first = True)
def enc_inner_hiddens(self, seq_hiddens, inner_enc_type = "lstm"):
# seq_hiddens: (batch_size, seq_len, hidden_size)
def pool(seqence, pooling_type):
if pooling_type == "mean_pooling":
pooling = torch.mean(seqence, dim = -2)
elif pooling_type == "max_pooling":
pooling, _ = torch.max(seqence, dim = -2)
elif pooling_type == "mix_pooling":
pooling = self.lamtha * torch.mean(seqence, dim = -2) + (1 - self.lamtha) * torch.max(seqence, dim = -2)[0]
return pooling
if "pooling" in inner_enc_type:
inner_context = torch.stack([pool(seq_hiddens[:, :i+1, :], inner_enc_type) for i in range(seq_hiddens.size()[1])], dim = 1)
elif inner_enc_type == "lstm":
inner_context, _ = self.inner_context_lstm(seq_hiddens)
return inner_context
def forward(self, seq_hiddens):
'''
seq_hiddens: (batch_size, seq_len, hidden_size)
return:
shaking_hiddenss: (batch_size, (1 + seq_len) * seq_len / 2, hidden_size) (32, 5+4+3+2+1, 5)
'''
seq_len = seq_hiddens.size()[-2]
shaking_hiddens_list = []
for ind in range(seq_len):
hidden_each_step = seq_hiddens[:, ind, :]
visible_hiddens = seq_hiddens[:, ind:, :] # ind: only look back
repeat_hiddens = hidden_each_step[:, None, :].repeat(1, seq_len - ind, 1)
if self.shaking_type == "cat":
shaking_hiddens = torch.cat([repeat_hiddens, visible_hiddens], dim = -1)
shaking_hiddens = torch.tanh(self.combine_fc(shaking_hiddens))
elif self.shaking_type == "cat_plus":
inner_context = self.enc_inner_hiddens(visible_hiddens, self.inner_enc_type)
shaking_hiddens = torch.cat([repeat_hiddens, visible_hiddens, inner_context], dim = -1)
shaking_hiddens = torch.tanh(self.combine_fc(shaking_hiddens))
elif self.shaking_type == "cln":
shaking_hiddens = self.tp_cln(visible_hiddens, repeat_hiddens)
elif self.shaking_type == "cln_plus":
inner_context = self.enc_inner_hiddens(visible_hiddens, self.inner_enc_type)
shaking_hiddens = self.tp_cln(visible_hiddens, repeat_hiddens)
shaking_hiddens = self.inner_context_cln(shaking_hiddens, inner_context)
shaking_hiddens_list.append(shaking_hiddens)
long_shaking_hiddens = torch.cat(shaking_hiddens_list, dim = 1)
return long_shaking_hiddens | 7,502 | 44.472727 | 135 | py |
TPlinker-joint-extraction | TPlinker-joint-extraction-master/tplinker/tplinker.py | import re
from tqdm import tqdm
from IPython.core.debugger import set_trace
import copy
import torch
import torch.nn as nn
import json
from torch.nn.parameter import Parameter
from common.components import HandshakingKernel
import math
class HandshakingTaggingScheme(object):
"""docstring for HandshakingTaggingScheme"""
def __init__(self, rel2id, max_seq_len):
super(HandshakingTaggingScheme, self).__init__()
self.rel2id = rel2id
self.id2rel = {ind:rel for rel, ind in rel2id.items()}
self.tag2id_ent = {
"O": 0,
"ENT-H2T": 1, # entity head to entity tail
}
self.id2tag_ent = {id_:tag for tag, id_ in self.tag2id_ent.items()}
self.tag2id_head_rel = {
"O": 0,
"REL-SH2OH": 1, # subject head to object head
"REL-OH2SH": 2, # object head to subject head
}
self.id2tag_head_rel = {id_:tag for tag, id_ in self.tag2id_head_rel.items()}
self.tag2id_tail_rel = {
"O": 0,
"REL-ST2OT": 1, # subject tail to object tail
"REL-OT2ST": 2, # object tail to subject tail
}
self.id2tag_tail_rel = {id_:tag for tag, id_ in self.tag2id_tail_rel.items()}
# mapping shaking sequence and matrix
self.matrix_size = max_seq_len
# e.g. [(0, 0), (0, 1), (0, 2), (1, 1), (1, 2), (2, 2)]
self.shaking_ind2matrix_ind = [(ind, end_ind) for ind in range(self.matrix_size) for end_ind in list(range(self.matrix_size))[ind:]]
self.matrix_ind2shaking_ind = [[0 for i in range(self.matrix_size)] for j in range(self.matrix_size)]
for shaking_ind, matrix_ind in enumerate(self.shaking_ind2matrix_ind):
self.matrix_ind2shaking_ind[matrix_ind[0]][matrix_ind[1]] = shaking_ind
def get_spots(self, sample):
'''
entity spot and tail_rel spot: (span_pos1, span_pos2, tag_id)
head_rel spot: (rel_id, span_pos1, span_pos2, tag_id)
'''
ent_matrix_spots, head_rel_matrix_spots, tail_rel_matrix_spots = [], [], []
for rel in sample["relation_list"]:
subj_tok_span = rel["subj_tok_span"]
obj_tok_span = rel["obj_tok_span"]
ent_matrix_spots.append((subj_tok_span[0], subj_tok_span[1] - 1, self.tag2id_ent["ENT-H2T"]))
ent_matrix_spots.append((obj_tok_span[0], obj_tok_span[1] - 1, self.tag2id_ent["ENT-H2T"]))
if subj_tok_span[0] <= obj_tok_span[0]:
head_rel_matrix_spots.append((self.rel2id[rel["predicate"]], subj_tok_span[0], obj_tok_span[0], self.tag2id_head_rel["REL-SH2OH"]))
else:
head_rel_matrix_spots.append((self.rel2id[rel["predicate"]], obj_tok_span[0], subj_tok_span[0], self.tag2id_head_rel["REL-OH2SH"]))
if subj_tok_span[1] <= obj_tok_span[1]:
tail_rel_matrix_spots.append((self.rel2id[rel["predicate"]], subj_tok_span[1] - 1, obj_tok_span[1] - 1, self.tag2id_tail_rel["REL-ST2OT"]))
else:
tail_rel_matrix_spots.append((self.rel2id[rel["predicate"]], obj_tok_span[1] - 1, subj_tok_span[1] - 1, self.tag2id_tail_rel["REL-OT2ST"]))
return ent_matrix_spots, head_rel_matrix_spots, tail_rel_matrix_spots
def sharing_spots2shaking_tag(self, spots):
'''
convert spots to shaking seq tag
spots: [(start_ind, end_ind, tag_id), ], for entiy
return:
shake_seq_tag: (shaking_seq_len, )
'''
shaking_seq_len = self.matrix_size * (self.matrix_size + 1) // 2
shaking_seq_tag = torch.zeros(shaking_seq_len).long()
for sp in spots:
shaking_ind = self.matrix_ind2shaking_ind[sp[0]][sp[1]]
shaking_seq_tag[shaking_ind] = sp[2]
return shaking_seq_tag
def spots2shaking_tag(self, spots):
'''
convert spots to shaking seq tag
spots: [(rel_id, start_ind, end_ind, tag_id), ], for head relation and tail relation
return:
shake_seq_tag: (rel_size, shaking_seq_len, )
'''
shaking_seq_len = self.matrix_size * (self.matrix_size + 1) // 2
shaking_seq_tag = torch.zeros(len(self.rel2id), shaking_seq_len).long()
for sp in spots:
shaking_ind = self.matrix_ind2shaking_ind[sp[1]][sp[2]]
shaking_seq_tag[sp[0]][shaking_ind] = sp[3]
return shaking_seq_tag
def sharing_spots2shaking_tag4batch(self, batch_spots):
'''
convert spots to batch shaking seq tag
因长序列的stack是费时操作,所以写这个函数用作生成批量shaking tag
如果每个样本生成一条shaking tag再stack,一个32的batch耗时1s,太昂贵
spots: [(start_ind, end_ind, tag_id), ], for entiy
return:
batch_shake_seq_tag: (batch_size, shaking_seq_len)
'''
shaking_seq_len = self.matrix_size * (self.matrix_size + 1) // 2
batch_shaking_seq_tag = torch.zeros(len(batch_spots), shaking_seq_len).long()
for batch_id, spots in enumerate(batch_spots):
for sp in spots:
shaking_ind = self.matrix_ind2shaking_ind[sp[0]][sp[1]]
tag_id = sp[2]
batch_shaking_seq_tag[batch_id][shaking_ind] = tag_id
return batch_shaking_seq_tag
def spots2shaking_tag4batch(self, batch_spots):
'''
convert spots to batch shaking seq tag
spots: [(rel_id, start_ind, end_ind, tag_id), ], for head relation and tail_relation
return:
batch_shake_seq_tag: (batch_size, rel_size, shaking_seq_len)
'''
shaking_seq_len = self.matrix_size * (self.matrix_size + 1) // 2
batch_shaking_seq_tag = torch.zeros(len(batch_spots), len(self.rel2id), shaking_seq_len).long()
for batch_id, spots in enumerate(batch_spots):
for sp in spots:
shaking_ind = self.matrix_ind2shaking_ind[sp[1]][sp[2]]
tag_id = sp[3]
rel_id = sp[0]
batch_shaking_seq_tag[batch_id][rel_id][shaking_ind] = tag_id
return batch_shaking_seq_tag
def get_spots_fr_shaking_tag(self, shaking_tag):
'''
shaking_tag -> spots
shaking_tag: (rel_size, shaking_seq_len)
spots: [(rel_id, start_ind, end_ind, tag_id), ]
'''
spots = []
for shaking_inds in shaking_tag.nonzero():
rel_id = shaking_inds[0].item()
tag_id = shaking_tag[rel_id][shaking_inds[1]].item()
matrix_inds = self.shaking_ind2matrix_ind[shaking_inds[1]]
spot = (rel_id, matrix_inds[0], matrix_inds[1], tag_id)
spots.append(spot)
return spots
def get_sharing_spots_fr_shaking_tag(self, shaking_tag):
'''
shaking_tag -> spots
shaking_tag: (shaking_seq_len, )
spots: [(start_ind, end_ind, tag_id), ]
'''
spots = []
for shaking_ind in shaking_tag.nonzero():
shaking_ind_ = shaking_ind[0].item()
tag_id = shaking_tag[shaking_ind_]
matrix_inds = self.shaking_ind2matrix_ind[shaking_ind_]
spot = (matrix_inds[0], matrix_inds[1], tag_id)
spots.append(spot)
return spots
def decode_rel_fr_shaking_tag(self,
text,
ent_shaking_tag,
head_rel_shaking_tag,
tail_rel_shaking_tag,
tok2char_span,
tok_offset = 0, char_offset = 0):
'''
ent shaking tag: (shaking_seq_len, )
head rel and tail rel shaking_tag: size = (rel_size, shaking_seq_len, )
'''
rel_list = []
ent_matrix_spots = self.get_sharing_spots_fr_shaking_tag(ent_shaking_tag)
head_rel_matrix_spots = self.get_spots_fr_shaking_tag(head_rel_shaking_tag)
tail_rel_matrix_spots = self.get_spots_fr_shaking_tag(tail_rel_shaking_tag)
# entity
head_ind2entities = {}
for sp in ent_matrix_spots:
tag_id = sp[2]
if tag_id != self.tag2id_ent["ENT-H2T"]:
continue
char_span_list = tok2char_span[sp[0]:sp[1] + 1]
char_sp = [char_span_list[0][0], char_span_list[-1][1]]
ent_text = text[char_sp[0]:char_sp[1]]
head_key = sp[0] # take head as the key to entity list start with the head token
if head_key not in head_ind2entities:
head_ind2entities[head_key] = []
head_ind2entities[head_key].append({
"text": ent_text,
"tok_span": [sp[0], sp[1] + 1],
"char_span": char_sp,
})
# tail relation
tail_rel_memory_set = set()
for sp in tail_rel_matrix_spots:
rel_id = sp[0]
tag_id = sp[3]
if tag_id == self.tag2id_tail_rel["REL-ST2OT"]:
tail_rel_memory = "{}-{}-{}".format(rel_id, sp[1], sp[2])
tail_rel_memory_set.add(tail_rel_memory)
elif tag_id == self.tag2id_tail_rel["REL-OT2ST"]:
tail_rel_memory = "{}-{}-{}".format(rel_id, sp[2], sp[1])
tail_rel_memory_set.add(tail_rel_memory)
# head relation
for sp in head_rel_matrix_spots:
rel_id = sp[0]
tag_id = sp[3]
if tag_id == self.tag2id_head_rel["REL-SH2OH"]:
subj_head_key, obj_head_key = sp[1], sp[2]
elif tag_id == self.tag2id_head_rel["REL-OH2SH"]:
subj_head_key, obj_head_key = sp[2], sp[1]
if subj_head_key not in head_ind2entities or obj_head_key not in head_ind2entities:
# no entity start with subj_head_key and obj_head_key
continue
subj_list = head_ind2entities[subj_head_key] # all entities start with this subject head
obj_list = head_ind2entities[obj_head_key] # all entities start with this object head
# go over all subj-obj pair to check whether the relation exists
for subj in subj_list:
for obj in obj_list:
tail_rel_memory = "{}-{}-{}".format(rel_id, subj["tok_span"][1] - 1, obj["tok_span"][1] - 1)
if tail_rel_memory not in tail_rel_memory_set:
# no such relation
continue
rel_list.append({
"subject": subj["text"],
"object": obj["text"],
"subj_tok_span": [subj["tok_span"][0] + tok_offset, subj["tok_span"][1] + tok_offset],
"obj_tok_span": [obj["tok_span"][0] + tok_offset, obj["tok_span"][1] + tok_offset],
"subj_char_span": [subj["char_span"][0] + char_offset, subj["char_span"][1] + char_offset],
"obj_char_span": [obj["char_span"][0] + char_offset, obj["char_span"][1] + char_offset],
"predicate": self.id2rel[rel_id],
})
return rel_list
class DataMaker4Bert():
def __init__(self, tokenizer, handshaking_tagger):
self.tokenizer = tokenizer
self.handshaking_tagger = handshaking_tagger
def get_indexed_data(self, data, max_seq_len, data_type = "train"):
indexed_samples = []
for ind, sample in tqdm(enumerate(data), desc = "Generate indexed train or valid data"):
text = sample["text"]
# codes for bert input
codes = self.tokenizer.encode_plus(text,
return_offsets_mapping = True,
add_special_tokens = False,
max_length = max_seq_len,
truncation = True,
pad_to_max_length = True)
# tagging
spots_tuple = None
if data_type != "test":
spots_tuple = self.handshaking_tagger.get_spots(sample)
# get codes
input_ids = torch.tensor(codes["input_ids"]).long()
attention_mask = torch.tensor(codes["attention_mask"]).long()
token_type_ids = torch.tensor(codes["token_type_ids"]).long()
tok2char_span = codes["offset_mapping"]
sample_tp = (sample,
input_ids,
attention_mask,
token_type_ids,
tok2char_span,
spots_tuple,
)
indexed_samples.append(sample_tp)
return indexed_samples
def generate_batch(self, batch_data, data_type = "train"):
sample_list = []
input_ids_list = []
attention_mask_list = []
token_type_ids_list = []
tok2char_span_list = []
ent_spots_list = []
head_rel_spots_list = []
tail_rel_spots_list = []
for tp in batch_data:
sample_list.append(tp[0])
input_ids_list.append(tp[1])
attention_mask_list.append(tp[2])
token_type_ids_list.append(tp[3])
tok2char_span_list.append(tp[4])
if data_type != "test":
ent_matrix_spots, head_rel_matrix_spots, tail_rel_matrix_spots = tp[5]
ent_spots_list.append(ent_matrix_spots)
head_rel_spots_list.append(head_rel_matrix_spots)
tail_rel_spots_list.append(tail_rel_matrix_spots)
# @specific: indexed by bert tokenizer
batch_input_ids = torch.stack(input_ids_list, dim = 0)
batch_attention_mask = torch.stack(attention_mask_list, dim = 0)
batch_token_type_ids = torch.stack(token_type_ids_list, dim = 0)
batch_ent_shaking_tag, batch_head_rel_shaking_tag, batch_tail_rel_shaking_tag = None, None, None
if data_type != "test":
batch_ent_shaking_tag = self.handshaking_tagger.sharing_spots2shaking_tag4batch(ent_spots_list)
batch_head_rel_shaking_tag = self.handshaking_tagger.spots2shaking_tag4batch(head_rel_spots_list)
batch_tail_rel_shaking_tag = self.handshaking_tagger.spots2shaking_tag4batch(tail_rel_spots_list)
return sample_list, \
batch_input_ids, batch_attention_mask, batch_token_type_ids, tok2char_span_list, \
batch_ent_shaking_tag, batch_head_rel_shaking_tag, batch_tail_rel_shaking_tag
class DataMaker4BiLSTM():
def __init__(self, text2indices, get_tok2char_span_map, handshaking_tagger):
self.text2indices = text2indices
self.handshaking_tagger = handshaking_tagger
self.get_tok2char_span_map = get_tok2char_span_map
def get_indexed_data(self, data, max_seq_len, data_type = "train"):
indexed_samples = []
for ind, sample in tqdm(enumerate(data), desc = "Generate indexed train or valid data"):
text = sample["text"]
# tagging
spots_tuple = None
if data_type != "test":
spots_tuple = self.handshaking_tagger.get_spots(sample)
tok2char_span = self.get_tok2char_span_map(text)
tok2char_span.extend([(-1, -1)] * (max_seq_len - len(tok2char_span)))
input_ids = self.text2indices(text, max_seq_len)
sample_tp = (sample,
input_ids,
tok2char_span,
spots_tuple,
)
indexed_samples.append(sample_tp)
return indexed_samples
def generate_batch(self, batch_data, data_type = "train"):
sample_list = []
input_ids_list = []
tok2char_span_list = []
ent_spots_list = []
head_rel_spots_list = []
tail_rel_spots_list = []
for tp in batch_data:
sample_list.append(tp[0])
input_ids_list.append(tp[1])
tok2char_span_list.append(tp[2])
if data_type != "test":
ent_matrix_spots, head_rel_matrix_spots, tail_rel_matrix_spots = tp[3]
ent_spots_list.append(ent_matrix_spots)
head_rel_spots_list.append(head_rel_matrix_spots)
tail_rel_spots_list.append(tail_rel_matrix_spots)
batch_input_ids = torch.stack(input_ids_list, dim = 0)
batch_ent_shaking_tag, batch_head_rel_shaking_tag, batch_tail_rel_shaking_tag = None, None, None
if data_type != "test":
batch_ent_shaking_tag = self.handshaking_tagger.sharing_spots2shaking_tag4batch(ent_spots_list)
batch_head_rel_shaking_tag = self.handshaking_tagger.spots2shaking_tag4batch(head_rel_spots_list)
batch_tail_rel_shaking_tag = self.handshaking_tagger.spots2shaking_tag4batch(tail_rel_spots_list)
return sample_list, \
batch_input_ids, tok2char_span_list, \
batch_ent_shaking_tag, batch_head_rel_shaking_tag, batch_tail_rel_shaking_tag
class TPLinkerBert(nn.Module):
def __init__(self, encoder,
rel_size,
shaking_type,
inner_enc_type,
dist_emb_size,
ent_add_dist,
rel_add_dist
):
super().__init__()
self.encoder = encoder
hidden_size = encoder.config.hidden_size
self.ent_fc = nn.Linear(hidden_size, 2)
self.head_rel_fc_list = [nn.Linear(hidden_size, 3) for _ in range(rel_size)]
self.tail_rel_fc_list = [nn.Linear(hidden_size, 3) for _ in range(rel_size)]
for ind, fc in enumerate(self.head_rel_fc_list):
self.register_parameter("weight_4_head_rel{}".format(ind), fc.weight)
self.register_parameter("bias_4_head_rel{}".format(ind), fc.bias)
for ind, fc in enumerate(self.tail_rel_fc_list):
self.register_parameter("weight_4_tail_rel{}".format(ind), fc.weight)
self.register_parameter("bias_4_tail_rel{}".format(ind), fc.bias)
# handshaking kernel
self.handshaking_kernel = HandshakingKernel(hidden_size, shaking_type, inner_enc_type)
# distance embedding
self.dist_emb_size = dist_emb_size
self.dist_embbedings = None # it will be set in the first forwarding
self.ent_add_dist = ent_add_dist
self.rel_add_dist = rel_add_dist
def forward(self, input_ids, attention_mask, token_type_ids):
# input_ids, attention_mask, token_type_ids: (batch_size, seq_len)
context_outputs = self.encoder(input_ids, attention_mask, token_type_ids)
# last_hidden_state: (batch_size, seq_len, hidden_size)
last_hidden_state = context_outputs[0]
# shaking_hiddens: (batch_size, 1 + ... + seq_len, hidden_size)
shaking_hiddens = self.handshaking_kernel(last_hidden_state)
shaking_hiddens4ent = shaking_hiddens
shaking_hiddens4rel = shaking_hiddens
# add distance embeddings if it is set
if self.dist_emb_size != -1:
# set self.dist_embbedings
hidden_size = shaking_hiddens.size()[-1]
if self.dist_embbedings is None:
dist_emb = torch.zeros([self.dist_emb_size, hidden_size]).to(shaking_hiddens.device)
for d in range(self.dist_emb_size):
for i in range(hidden_size):
if i % 2 == 0:
dist_emb[d][i] = math.sin(d / 10000**(i / hidden_size))
else:
dist_emb[d][i] = math.cos(d / 10000**((i - 1) / hidden_size))
seq_len = input_ids.size()[1]
dist_embbeding_segs = []
for after_num in range(seq_len, 0, -1):
dist_embbeding_segs.append(dist_emb[:after_num, :])
self.dist_embbedings = torch.cat(dist_embbeding_segs, dim = 0)
if self.ent_add_dist:
shaking_hiddens4ent = shaking_hiddens + self.dist_embbedings[None,:,:].repeat(shaking_hiddens.size()[0], 1, 1)
if self.rel_add_dist:
shaking_hiddens4rel = shaking_hiddens + self.dist_embbedings[None,:,:].repeat(shaking_hiddens.size()[0], 1, 1)
# if self.dist_emb_size != -1 and self.ent_add_dist:
# shaking_hiddens4ent = shaking_hiddens + self.dist_embbedings[None,:,:].repeat(shaking_hiddens.size()[0], 1, 1)
# else:
# shaking_hiddens4ent = shaking_hiddens
# if self.dist_emb_size != -1 and self.rel_add_dist:
# shaking_hiddens4rel = shaking_hiddens + self.dist_embbedings[None,:,:].repeat(shaking_hiddens.size()[0], 1, 1)
# else:
# shaking_hiddens4rel = shaking_hiddens
ent_shaking_outputs = self.ent_fc(shaking_hiddens4ent)
head_rel_shaking_outputs_list = []
for fc in self.head_rel_fc_list:
head_rel_shaking_outputs_list.append(fc(shaking_hiddens4rel))
tail_rel_shaking_outputs_list = []
for fc in self.tail_rel_fc_list:
tail_rel_shaking_outputs_list.append(fc(shaking_hiddens4rel))
head_rel_shaking_outputs = torch.stack(head_rel_shaking_outputs_list, dim = 1)
tail_rel_shaking_outputs = torch.stack(tail_rel_shaking_outputs_list, dim = 1)
return ent_shaking_outputs, head_rel_shaking_outputs, tail_rel_shaking_outputs
class TPLinkerBiLSTM(nn.Module):
def __init__(self, init_word_embedding_matrix,
emb_dropout_rate,
enc_hidden_size,
dec_hidden_size,
rnn_dropout_rate,
rel_size,
shaking_type,
inner_enc_type,
dist_emb_size,
ent_add_dist,
rel_add_dist):
super().__init__()
self.word_embeds = nn.Embedding.from_pretrained(init_word_embedding_matrix, freeze = False)
self.emb_dropout = nn.Dropout(emb_dropout_rate)
self.enc_lstm = nn.LSTM(init_word_embedding_matrix.size()[-1],
enc_hidden_size // 2,
num_layers = 1,
bidirectional = True,
batch_first = True)
self.dec_lstm = nn.LSTM(enc_hidden_size,
dec_hidden_size // 2,
num_layers = 1,
bidirectional = True,
batch_first = True)
self.rnn_dropout = nn.Dropout(rnn_dropout_rate)
hidden_size = dec_hidden_size
self.ent_fc = nn.Linear(hidden_size, 2)
self.head_rel_fc_list = [nn.Linear(hidden_size, 3) for _ in range(rel_size)]
self.tail_rel_fc_list = [nn.Linear(hidden_size, 3) for _ in range(rel_size)]
for ind, fc in enumerate(self.head_rel_fc_list):
self.register_parameter("weight_4_head_rel{}".format(ind), fc.weight)
self.register_parameter("bias_4_head_rel{}".format(ind), fc.bias)
for ind, fc in enumerate(self.tail_rel_fc_list):
self.register_parameter("weight_4_tail_rel{}".format(ind), fc.weight)
self.register_parameter("bias_4_tail_rel{}".format(ind), fc.bias)
# handshaking kernel
self.handshaking_kernel = HandshakingKernel(hidden_size, shaking_type, inner_enc_type)
# distance embedding
self.dist_emb_size = dist_emb_size
self.dist_embbedings = None # it will be set in the first forwarding
self.ent_add_dist = ent_add_dist
self.rel_add_dist = rel_add_dist
def forward(self, input_ids):
# input_ids: (batch_size, seq_len)
# embedding: (batch_size, seq_len, emb_dim)
embedding = self.word_embeds(input_ids)
embedding = self.emb_dropout(embedding)
# lstm_outputs: (batch_size, seq_len, enc_hidden_size)
lstm_outputs, _ = self.enc_lstm(embedding)
lstm_outputs = self.rnn_dropout(lstm_outputs)
# lstm_outputs: (batch_size, seq_len, dec_hidden_size)
lstm_outputs, _ = self.dec_lstm(lstm_outputs)
lstm_outputs = self.rnn_dropout(lstm_outputs)
# shaking_hiddens: (batch_size, 1 + ... + seq_len, hidden_size)
shaking_hiddens = self.handshaking_kernel(lstm_outputs)
shaking_hiddens4ent = shaking_hiddens
shaking_hiddens4rel = shaking_hiddens
# add distance embeddings if it is set
if self.dist_emb_size != -1:
# set self.dist_embbedings
hidden_size = shaking_hiddens.size()[-1]
if self.dist_embbedings is None:
dist_emb = torch.zeros([self.dist_emb_size, hidden_size]).to(shaking_hiddens.device)
for d in range(self.dist_emb_size):
for i in range(hidden_size):
if i % 2 == 0:
dist_emb[d][i] = math.sin(d / 10000**(i / hidden_size))
else:
dist_emb[d][i] = math.cos(d / 10000**((i - 1) / hidden_size))
seq_len = input_ids.size()[1]
dist_embbeding_segs = []
for after_num in range(seq_len, 0, -1):
dist_embbeding_segs.append(dist_emb[:after_num, :])
self.dist_embbedings = torch.cat(dist_embbeding_segs, dim = 0)
if self.ent_add_dist:
shaking_hiddens4ent = shaking_hiddens + self.dist_embbedings[None,:,:].repeat(shaking_hiddens.size()[0], 1, 1)
if self.rel_add_dist:
shaking_hiddens4rel = shaking_hiddens + self.dist_embbedings[None,:,:].repeat(shaking_hiddens.size()[0], 1, 1)
ent_shaking_outputs = self.ent_fc(shaking_hiddens4ent)
head_rel_shaking_outputs_list = []
for fc in self.head_rel_fc_list:
head_rel_shaking_outputs_list.append(fc(shaking_hiddens4rel))
tail_rel_shaking_outputs_list = []
for fc in self.tail_rel_fc_list:
tail_rel_shaking_outputs_list.append(fc(shaking_hiddens4rel))
head_rel_shaking_outputs = torch.stack(head_rel_shaking_outputs_list, dim = 1)
tail_rel_shaking_outputs = torch.stack(tail_rel_shaking_outputs_list, dim = 1)
return ent_shaking_outputs, head_rel_shaking_outputs, tail_rel_shaking_outputs
class MetricsCalculator():
def __init__(self, handshaking_tagger):
self.handshaking_tagger = handshaking_tagger
def get_sample_accuracy(self, pred, truth):
'''
计算所有抽取字段都正确的样本比例
即该batch的输出与truth全等的样本比例
'''
# (batch_size, ..., seq_len, tag_size) -> (batch_size, ..., seq_len)
pred_id = torch.argmax(pred, dim = -1)
# (batch_size, ..., seq_len) -> (batch_size, ),把每个sample压成一条seq
pred_id = pred_id.view(pred_id.size()[0], -1)
truth = truth.view(truth.size()[0], -1)
# (batch_size, ),每个元素是pred与truth之间tag相同的数量
correct_tag_num = torch.sum(torch.eq(truth, pred_id).float(), dim = 1)
# seq维上所有tag必须正确,所以correct_tag_num必须等于seq的长度才算一个correct的sample
sample_acc_ = torch.eq(correct_tag_num, torch.ones_like(correct_tag_num) * truth.size()[-1]).float()
sample_acc = torch.mean(sample_acc_)
return sample_acc
def get_rel_cpg(self, sample_list, tok2char_span_list,
batch_pred_ent_shaking_outputs,
batch_pred_head_rel_shaking_outputs,
batch_pred_tail_rel_shaking_outputs,
pattern = "only_head_text"):
batch_pred_ent_shaking_tag = torch.argmax(batch_pred_ent_shaking_outputs, dim = -1)
batch_pred_head_rel_shaking_tag = torch.argmax(batch_pred_head_rel_shaking_outputs, dim = -1)
batch_pred_tail_rel_shaking_tag = torch.argmax(batch_pred_tail_rel_shaking_outputs, dim = -1)
correct_num, pred_num, gold_num = 0, 0, 0
for ind in range(len(sample_list)):
sample = sample_list[ind]
text = sample["text"]
tok2char_span = tok2char_span_list[ind]
pred_ent_shaking_tag = batch_pred_ent_shaking_tag[ind]
pred_head_rel_shaking_tag = batch_pred_head_rel_shaking_tag[ind]
pred_tail_rel_shaking_tag = batch_pred_tail_rel_shaking_tag[ind]
pred_rel_list = self.handshaking_tagger.decode_rel_fr_shaking_tag(text,
pred_ent_shaking_tag,
pred_head_rel_shaking_tag,
pred_tail_rel_shaking_tag,
tok2char_span)
gold_rel_list = sample["relation_list"]
if pattern == "only_head_index":
gold_rel_set = set(["{}\u2E80{}\u2E80{}".format(rel["subj_tok_span"][0], rel["predicate"], rel["obj_tok_span"][0]) for rel in gold_rel_list])
pred_rel_set = set(["{}\u2E80{}\u2E80{}".format(rel["subj_tok_span"][0], rel["predicate"], rel["obj_tok_span"][0]) for rel in pred_rel_list])
elif pattern == "whole_span":
gold_rel_set = set(["{}\u2E80{}\u2E80{}\u2E80{}\u2E80{}".format(rel["subj_tok_span"][0], rel["subj_tok_span"][1], rel["predicate"], rel["obj_tok_span"][0], rel["obj_tok_span"][1]) for rel in gold_rel_list])
pred_rel_set = set(["{}\u2E80{}\u2E80{}\u2E80{}\u2E80{}".format(rel["subj_tok_span"][0], rel["subj_tok_span"][1], rel["predicate"], rel["obj_tok_span"][0], rel["obj_tok_span"][1]) for rel in pred_rel_list])
elif pattern == "whole_text":
gold_rel_set = set(["{}\u2E80{}\u2E80{}".format(rel["subject"], rel["predicate"], rel["object"]) for rel in gold_rel_list])
pred_rel_set = set(["{}\u2E80{}\u2E80{}".format(rel["subject"], rel["predicate"], rel["object"]) for rel in pred_rel_list])
elif pattern == "only_head_text":
gold_rel_set = set(["{}\u2E80{}\u2E80{}".format(rel["subject"].split(" ")[0], rel["predicate"], rel["object"].split(" ")[0]) for rel in gold_rel_list])
pred_rel_set = set(["{}\u2E80{}\u2E80{}".format(rel["subject"].split(" ")[0], rel["predicate"], rel["object"].split(" ")[0]) for rel in pred_rel_list])
for rel_str in pred_rel_set:
if rel_str in gold_rel_set:
correct_num += 1
pred_num += len(pred_rel_set)
gold_num += len(gold_rel_set)
return correct_num, pred_num, gold_num
def get_prf_scores(self, correct_num, pred_num, gold_num):
minimini = 1e-10
precision = correct_num / (pred_num + minimini)
recall = correct_num / (gold_num + minimini)
f1 = 2 * precision * recall / (precision + recall + minimini)
return precision, recall, f1 | 31,326 | 46.108271 | 222 | py |
TPlinker-joint-extraction | TPlinker-joint-extraction-master/tplinker/train.py | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
import json
import os
from tqdm import tqdm
import re
from IPython.core.debugger import set_trace
from pprint import pprint
from transformers import AutoModel, BertTokenizerFast
import copy
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, Dataset
import torch.optim as optim
import glob
import time
import logging
from common.utils import Preprocessor, DefaultLogger
from tplinker import (HandshakingTaggingScheme,
DataMaker4Bert,
DataMaker4BiLSTM,
TPLinkerBert,
TPLinkerBiLSTM,
MetricsCalculator)
import wandb
import config
from glove import Glove
import numpy as np
# In[ ]:
# try:
# from yaml import CLoader as Loader, CDumper as Dumper
# except ImportError:
# from yaml import Loader, Dumper
# config = yaml.load(open("train_config.yaml", "r"), Loader = yaml.FullLoader)
# In[ ]:
config = config.train_config
hyper_parameters = config["hyper_parameters"]
# In[ ]:
os.environ["TOKENIZERS_PARALLELISM"] = "true"
os.environ["CUDA_VISIBLE_DEVICES"] = str(config["device_num"])
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# In[ ]:
# for reproductivity
torch.manual_seed(hyper_parameters["seed"]) # pytorch random seed
torch.backends.cudnn.deterministic = True
# In[ ]:
data_home = config["data_home"]
experiment_name = config["exp_name"]
train_data_path = os.path.join(data_home, experiment_name, config["train_data"])
valid_data_path = os.path.join(data_home, experiment_name, config["valid_data"])
rel2id_path = os.path.join(data_home, experiment_name, config["rel2id"])
# In[ ]:
if config["logger"] == "wandb":
# init wandb
wandb.init(project = experiment_name,
name = config["run_name"],
config = hyper_parameters # Initialize config
)
wandb.config.note = config["note"]
model_state_dict_dir = wandb.run.dir
logger = wandb
else:
logger = DefaultLogger(config["log_path"], experiment_name, config["run_name"], config["run_id"], hyper_parameters)
model_state_dict_dir = config["path_to_save_model"]
if not os.path.exists(model_state_dict_dir):
os.makedirs(model_state_dict_dir)
# # Load Data
# In[ ]:
train_data = json.load(open(train_data_path, "r", encoding = "utf-8"))
valid_data = json.load(open(valid_data_path, "r", encoding = "utf-8"))
# # Split
# In[ ]:
# @specific
if config["encoder"] == "BERT":
tokenizer = BertTokenizerFast.from_pretrained(config["bert_path"], add_special_tokens = False, do_lower_case = False)
tokenize = tokenizer.tokenize
get_tok2char_span_map = lambda text: tokenizer.encode_plus(text, return_offsets_mapping = True, add_special_tokens = False)["offset_mapping"]
elif config["encoder"] in {"BiLSTM", }:
tokenize = lambda text: text.split(" ")
def get_tok2char_span_map(text):
tokens = text.split(" ")
tok2char_span = []
char_num = 0
for tok in tokens:
tok2char_span.append((char_num, char_num + len(tok)))
char_num += len(tok) + 1 # +1: whitespace
return tok2char_span
# In[ ]:
preprocessor = Preprocessor(tokenize_func = tokenize,
get_tok2char_span_map_func = get_tok2char_span_map)
# In[ ]:
# train and valid max token num
max_tok_num = 0
all_data = train_data + valid_data
for sample in all_data:
tokens = tokenize(sample["text"])
max_tok_num = max(max_tok_num, len(tokens))
max_tok_num
# In[ ]:
if max_tok_num > hyper_parameters["max_seq_len"]:
train_data = preprocessor.split_into_short_samples(train_data,
hyper_parameters["max_seq_len"],
sliding_len = hyper_parameters["sliding_len"],
encoder = config["encoder"]
)
valid_data = preprocessor.split_into_short_samples(valid_data,
hyper_parameters["max_seq_len"],
sliding_len = hyper_parameters["sliding_len"],
encoder = config["encoder"]
)
# In[ ]:
print("train: {}".format(len(train_data)), "valid: {}".format(len(valid_data)))
# # Tagger (Decoder)
# In[ ]:
max_seq_len = min(max_tok_num, hyper_parameters["max_seq_len"])
rel2id = json.load(open(rel2id_path, "r", encoding = "utf-8"))
handshaking_tagger = HandshakingTaggingScheme(rel2id = rel2id, max_seq_len = max_seq_len)
# # Dataset
# In[ ]:
if config["encoder"] == "BERT":
tokenizer = BertTokenizerFast.from_pretrained(config["bert_path"], add_special_tokens = False, do_lower_case = False)
data_maker = DataMaker4Bert(tokenizer, handshaking_tagger)
elif config["encoder"] in {"BiLSTM", }:
token2idx_path = os.path.join(data_home, experiment_name, config["token2idx"])
token2idx = json.load(open(token2idx_path, "r", encoding = "utf-8"))
idx2token = {idx:tok for tok, idx in token2idx.items()}
def text2indices(text, max_seq_len):
input_ids = []
tokens = text.split(" ")
for tok in tokens:
if tok not in token2idx:
input_ids.append(token2idx['<UNK>'])
else:
input_ids.append(token2idx[tok])
if len(input_ids) < max_seq_len:
input_ids.extend([token2idx['<PAD>']] * (max_seq_len - len(input_ids)))
input_ids = torch.tensor(input_ids[:max_seq_len])
return input_ids
data_maker = DataMaker4BiLSTM(text2indices, get_tok2char_span_map, handshaking_tagger)
# In[ ]:
class MyDataset(Dataset):
def __init__(self, data):
self.data = data
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return len(self.data)
# In[ ]:
indexed_train_data = data_maker.get_indexed_data(train_data, max_seq_len)
indexed_valid_data = data_maker.get_indexed_data(valid_data, max_seq_len)
# In[ ]:
train_dataloader = DataLoader(MyDataset(indexed_train_data),
batch_size = hyper_parameters["batch_size"],
shuffle = True,
num_workers = 6,
drop_last = False,
collate_fn = data_maker.generate_batch,
)
valid_dataloader = DataLoader(MyDataset(indexed_valid_data),
batch_size = hyper_parameters["batch_size"],
shuffle = True,
num_workers = 6,
drop_last = False,
collate_fn = data_maker.generate_batch,
)
# In[ ]:
# # have a look at dataloader
# train_data_iter = iter(train_dataloader)
# batch_data = next(train_data_iter)
# text_id_list, text_list, batch_input_ids, \
# batch_attention_mask, batch_token_type_ids, \
# offset_map_list, batch_ent_shaking_tag, \
# batch_head_rel_shaking_tag, batch_tail_rel_shaking_tag = batch_data
# print(text_list[0])
# print()
# print(tokenizer.decode(batch_input_ids[0].tolist()))
# print(batch_input_ids.size())
# print(batch_attention_mask.size())
# print(batch_token_type_ids.size())
# print(len(offset_map_list))
# print(batch_ent_shaking_tag.size())
# print(batch_head_rel_shaking_tag.size())
# print(batch_tail_rel_shaking_tag.size())
# # Model
# In[ ]:
if config["encoder"] == "BERT":
encoder = AutoModel.from_pretrained(config["bert_path"])
hidden_size = encoder.config.hidden_size
fake_inputs = torch.zeros([hyper_parameters["batch_size"], max_seq_len, hidden_size]).to(device)
rel_extractor = TPLinkerBert(encoder,
len(rel2id),
hyper_parameters["shaking_type"],
hyper_parameters["inner_enc_type"],
hyper_parameters["dist_emb_size"],
hyper_parameters["ent_add_dist"],
hyper_parameters["rel_add_dist"],
)
elif config["encoder"] in {"BiLSTM", }:
glove = Glove()
glove = glove.load(config["pretrained_word_embedding_path"])
# prepare embedding matrix
word_embedding_init_matrix = np.random.normal(-1, 1, size=(len(token2idx), hyper_parameters["word_embedding_dim"]))
count_in = 0
# 在预训练词向量中的用该预训练向量
# 不在预训练集里的用随机向量
for ind, tok in tqdm(idx2token.items(), desc="Embedding matrix initializing..."):
if tok in glove.dictionary:
count_in += 1
word_embedding_init_matrix[ind] = glove.word_vectors[glove.dictionary[tok]]
print("{:.4f} tokens are in the pretrain word embedding matrix".format(count_in / len(idx2token))) # 命中预训练词向量的比例
word_embedding_init_matrix = torch.FloatTensor(word_embedding_init_matrix)
fake_inputs = torch.zeros([hyper_parameters["batch_size"], max_seq_len, hyper_parameters["dec_hidden_size"]]).to(device)
rel_extractor = TPLinkerBiLSTM(word_embedding_init_matrix,
hyper_parameters["emb_dropout"],
hyper_parameters["enc_hidden_size"],
hyper_parameters["dec_hidden_size"],
hyper_parameters["rnn_dropout"],
len(rel2id),
hyper_parameters["shaking_type"],
hyper_parameters["inner_enc_type"],
hyper_parameters["dist_emb_size"],
hyper_parameters["ent_add_dist"],
hyper_parameters["rel_add_dist"],
)
rel_extractor = rel_extractor.to(device)
# In[ ]:
# all_paras = sum(x.numel() for x in rel_extractor.parameters())
# enc_paras = sum(x.numel() for x in encoder.parameters())
# In[ ]:
# print(all_paras, enc_paras)
# print(all_paras - enc_paras)
# # Metrics
# In[ ]:
def bias_loss(weights = None):
if weights is not None:
weights = torch.FloatTensor(weights).to(device)
cross_en = nn.CrossEntropyLoss(weight = weights)
return lambda pred, target: cross_en(pred.view(-1, pred.size()[-1]), target.view(-1))
loss_func = bias_loss()
# In[ ]:
metrics = MetricsCalculator(handshaking_tagger)
# # Train
# In[ ]:
# train step
def train_step(batch_train_data, optimizer, loss_weights):
if config["encoder"] == "BERT":
sample_list, batch_input_ids, batch_attention_mask, batch_token_type_ids, tok2char_span_list, batch_ent_shaking_tag, batch_head_rel_shaking_tag, batch_tail_rel_shaking_tag = batch_train_data
batch_input_ids, batch_attention_mask, batch_token_type_ids, batch_ent_shaking_tag, batch_head_rel_shaking_tag, batch_tail_rel_shaking_tag = (batch_input_ids.to(device),
batch_attention_mask.to(device),
batch_token_type_ids.to(device),
batch_ent_shaking_tag.to(device),
batch_head_rel_shaking_tag.to(device),
batch_tail_rel_shaking_tag.to(device)
)
elif config["encoder"] in {"BiLSTM", }:
sample_list, batch_input_ids, tok2char_span_list, batch_ent_shaking_tag, batch_head_rel_shaking_tag, batch_tail_rel_shaking_tag = batch_train_data
batch_input_ids, batch_ent_shaking_tag, batch_head_rel_shaking_tag, batch_tail_rel_shaking_tag = (batch_input_ids.to(device),
batch_ent_shaking_tag.to(device),
batch_head_rel_shaking_tag.to(device),
batch_tail_rel_shaking_tag.to(device)
)
# zero the parameter gradients
optimizer.zero_grad()
if config["encoder"] == "BERT":
ent_shaking_outputs, head_rel_shaking_outputs, tail_rel_shaking_outputs = rel_extractor(batch_input_ids,
batch_attention_mask,
batch_token_type_ids,
)
elif config["encoder"] in {"BiLSTM", }:
ent_shaking_outputs, head_rel_shaking_outputs, tail_rel_shaking_outputs = rel_extractor(batch_input_ids)
w_ent, w_rel = loss_weights["ent"], loss_weights["rel"]
loss = w_ent * loss_func(ent_shaking_outputs, batch_ent_shaking_tag) + w_rel * loss_func(head_rel_shaking_outputs, batch_head_rel_shaking_tag) + w_rel * loss_func(tail_rel_shaking_outputs, batch_tail_rel_shaking_tag)
loss.backward()
optimizer.step()
ent_sample_acc = metrics.get_sample_accuracy(ent_shaking_outputs,
batch_ent_shaking_tag)
head_rel_sample_acc = metrics.get_sample_accuracy(head_rel_shaking_outputs,
batch_head_rel_shaking_tag)
tail_rel_sample_acc = metrics.get_sample_accuracy(tail_rel_shaking_outputs,
batch_tail_rel_shaking_tag)
return loss.item(), ent_sample_acc.item(), head_rel_sample_acc.item(), tail_rel_sample_acc.item()
# valid step
def valid_step(batch_valid_data):
if config["encoder"] == "BERT":
sample_list, batch_input_ids, batch_attention_mask, batch_token_type_ids, tok2char_span_list, batch_ent_shaking_tag, batch_head_rel_shaking_tag, batch_tail_rel_shaking_tag = batch_valid_data
batch_input_ids, batch_attention_mask, batch_token_type_ids, batch_ent_shaking_tag, batch_head_rel_shaking_tag, batch_tail_rel_shaking_tag = (batch_input_ids.to(device),
batch_attention_mask.to(device),
batch_token_type_ids.to(device),
batch_ent_shaking_tag.to(device),
batch_head_rel_shaking_tag.to(device),
batch_tail_rel_shaking_tag.to(device)
)
elif config["encoder"] in {"BiLSTM", }:
sample_list, batch_input_ids, tok2char_span_list, batch_ent_shaking_tag, batch_head_rel_shaking_tag, batch_tail_rel_shaking_tag = batch_valid_data
batch_input_ids, batch_ent_shaking_tag, batch_head_rel_shaking_tag, batch_tail_rel_shaking_tag = (batch_input_ids.to(device),
batch_ent_shaking_tag.to(device),
batch_head_rel_shaking_tag.to(device),
batch_tail_rel_shaking_tag.to(device)
)
with torch.no_grad():
if config["encoder"] == "BERT":
ent_shaking_outputs, head_rel_shaking_outputs, tail_rel_shaking_outputs = rel_extractor(batch_input_ids,
batch_attention_mask,
batch_token_type_ids,
)
elif config["encoder"] in {"BiLSTM", }:
ent_shaking_outputs, head_rel_shaking_outputs, tail_rel_shaking_outputs = rel_extractor(batch_input_ids)
ent_sample_acc = metrics.get_sample_accuracy(ent_shaking_outputs,
batch_ent_shaking_tag)
head_rel_sample_acc = metrics.get_sample_accuracy(head_rel_shaking_outputs,
batch_head_rel_shaking_tag)
tail_rel_sample_acc = metrics.get_sample_accuracy(tail_rel_shaking_outputs,
batch_tail_rel_shaking_tag)
rel_cpg = metrics.get_rel_cpg(sample_list, tok2char_span_list,
ent_shaking_outputs,
head_rel_shaking_outputs,
tail_rel_shaking_outputs,
hyper_parameters["match_pattern"]
)
return ent_sample_acc.item(), head_rel_sample_acc.item(), tail_rel_sample_acc.item(), rel_cpg
# In[ ]:
max_f1 = 0.
def train_n_valid(train_dataloader, dev_dataloader, optimizer, scheduler, num_epoch):
def train(dataloader, ep):
# train
rel_extractor.train()
t_ep = time.time()
start_lr = optimizer.param_groups[0]['lr']
total_loss, total_ent_sample_acc, total_head_rel_sample_acc, total_tail_rel_sample_acc = 0., 0., 0., 0.
for batch_ind, batch_train_data in enumerate(dataloader):
t_batch = time.time()
z = (2 * len(rel2id) + 1)
steps_per_ep = len(dataloader)
total_steps = hyper_parameters["loss_weight_recover_steps"] + 1 # + 1 avoid division by zero error
current_step = steps_per_ep * ep + batch_ind
w_ent = max(1 / z + 1 - current_step / total_steps, 1 / z)
w_rel = min((len(rel2id) / z) * current_step / total_steps, (len(rel2id) / z))
loss_weights = {"ent": w_ent, "rel": w_rel}
loss, ent_sample_acc, head_rel_sample_acc, tail_rel_sample_acc = train_step(batch_train_data, optimizer, loss_weights)
scheduler.step()
total_loss += loss
total_ent_sample_acc += ent_sample_acc
total_head_rel_sample_acc += head_rel_sample_acc
total_tail_rel_sample_acc += tail_rel_sample_acc
avg_loss = total_loss / (batch_ind + 1)
avg_ent_sample_acc = total_ent_sample_acc / (batch_ind + 1)
avg_head_rel_sample_acc = total_head_rel_sample_acc / (batch_ind + 1)
avg_tail_rel_sample_acc = total_tail_rel_sample_acc / (batch_ind + 1)
batch_print_format = "\rproject: {}, run_name: {}, Epoch: {}/{}, batch: {}/{}, train_loss: {}, " + "t_ent_sample_acc: {}, t_head_rel_sample_acc: {}, t_tail_rel_sample_acc: {}," + "lr: {}, batch_time: {}, total_time: {} -------------"
print(batch_print_format.format(experiment_name, config["run_name"],
ep + 1, num_epoch,
batch_ind + 1, len(dataloader),
avg_loss,
avg_ent_sample_acc,
avg_head_rel_sample_acc,
avg_tail_rel_sample_acc,
optimizer.param_groups[0]['lr'],
time.time() - t_batch,
time.time() - t_ep,
), end="")
if config["logger"] == "wandb" and batch_ind % hyper_parameters["log_interval"] == 0:
logger.log({
"train_loss": avg_loss,
"train_ent_seq_acc": avg_ent_sample_acc,
"train_head_rel_acc": avg_head_rel_sample_acc,
"train_tail_rel_acc": avg_tail_rel_sample_acc,
"learning_rate": optimizer.param_groups[0]['lr'],
"time": time.time() - t_ep,
})
if config["logger"] != "wandb": # only log once for training if logger is not wandb
logger.log({
"train_loss": avg_loss,
"train_ent_seq_acc": avg_ent_sample_acc,
"train_head_rel_acc": avg_head_rel_sample_acc,
"train_tail_rel_acc": avg_tail_rel_sample_acc,
"learning_rate": optimizer.param_groups[0]['lr'],
"time": time.time() - t_ep,
})
def valid(dataloader, ep):
# valid
rel_extractor.eval()
t_ep = time.time()
total_ent_sample_acc, total_head_rel_sample_acc, total_tail_rel_sample_acc = 0., 0., 0.
total_rel_correct_num, total_rel_pred_num, total_rel_gold_num = 0, 0, 0
for batch_ind, batch_valid_data in enumerate(tqdm(dataloader, desc = "Validating")):
ent_sample_acc, head_rel_sample_acc, tail_rel_sample_acc, rel_cpg = valid_step(batch_valid_data)
total_ent_sample_acc += ent_sample_acc
total_head_rel_sample_acc += head_rel_sample_acc
total_tail_rel_sample_acc += tail_rel_sample_acc
total_rel_correct_num += rel_cpg[0]
total_rel_pred_num += rel_cpg[1]
total_rel_gold_num += rel_cpg[2]
avg_ent_sample_acc = total_ent_sample_acc / len(dataloader)
avg_head_rel_sample_acc = total_head_rel_sample_acc / len(dataloader)
avg_tail_rel_sample_acc = total_tail_rel_sample_acc / len(dataloader)
rel_prf = metrics.get_prf_scores(total_rel_correct_num, total_rel_pred_num, total_rel_gold_num)
log_dict = {
"val_ent_seq_acc": avg_ent_sample_acc,
"val_head_rel_acc": avg_head_rel_sample_acc,
"val_tail_rel_acc": avg_tail_rel_sample_acc,
"val_prec": rel_prf[0],
"val_recall": rel_prf[1],
"val_f1": rel_prf[2],
"time": time.time() - t_ep,
}
logger.log(log_dict)
pprint(log_dict)
return rel_prf[2]
for ep in range(num_epoch):
train(train_dataloader, ep)
valid_f1 = valid(valid_dataloader, ep)
global max_f1
if valid_f1 >= max_f1:
max_f1 = valid_f1
if valid_f1 > config["f1_2_save"]: # save the best model
modle_state_num = len(glob.glob(model_state_dict_dir + "/model_state_dict_*.pt"))
torch.save(rel_extractor.state_dict(), os.path.join(model_state_dict_dir, "model_state_dict_{}.pt".format(modle_state_num)))
# scheduler_state_num = len(glob.glob(schedule_state_dict_dir + "/scheduler_state_dict_*.pt"))
# torch.save(scheduler.state_dict(), os.path.join(schedule_state_dict_dir, "scheduler_state_dict_{}.pt".format(scheduler_state_num)))
print("Current avf_f1: {}, Best f1: {}".format(valid_f1, max_f1))
# In[ ]:
# optimizer
init_learning_rate = float(hyper_parameters["lr"])
optimizer = torch.optim.Adam(rel_extractor.parameters(), lr = init_learning_rate)
if hyper_parameters["scheduler"] == "CAWR":
T_mult = hyper_parameters["T_mult"]
rewarm_epoch_num = hyper_parameters["rewarm_epoch_num"]
scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, len(train_dataloader) * rewarm_epoch_num, T_mult)
elif hyper_parameters["scheduler"] == "Step":
decay_rate = hyper_parameters["decay_rate"]
decay_steps = hyper_parameters["decay_steps"]
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size = decay_steps, gamma = decay_rate)
# In[ ]:
if not config["fr_scratch"]:
model_state_path = config["model_state_dict_path"]
rel_extractor.load_state_dict(torch.load(model_state_path))
print("------------model state {} loaded ----------------".format(model_state_path.split("/")[-1]))
train_n_valid(train_dataloader, valid_dataloader, optimizer, scheduler, hyper_parameters["epochs"])
| 24,510 | 39.050654 | 310 | py |
TPlinker-joint-extraction | TPlinker-joint-extraction-master/tplinker_plus/tplinker_plus.py | import re
from tqdm import tqdm
import torch
from IPython.core.debugger import set_trace
import copy
import torch
import torch.nn as nn
import json
from torch.nn.parameter import Parameter
import torch.nn.functional as F
import math
from common.components import HandshakingKernel
from collections import Counter
class HandshakingTaggingScheme(object):
def __init__(self, rel2id, max_seq_len, entity_type2id):
super().__init__()
self.rel2id = rel2id
self.id2rel = {ind:rel for rel, ind in rel2id.items()}
self.separator = "\u2E80"
self.link_types = {"SH2OH", # subject head to object head
"OH2SH", # object head to subject head
"ST2OT", # subject tail to object tail
"OT2ST", # object tail to subject tail
}
self.tags = {self.separator.join([rel, lt]) for rel in self.rel2id.keys() for lt in self.link_types}
self.ent2id = entity_type2id
self.id2ent = {ind:ent for ent, ind in self.ent2id.items()}
self.tags |= {self.separator.join([ent, "EH2ET"]) for ent in self.ent2id.keys()} # EH2ET: entity head to entity tail
self.tags = sorted(self.tags)
self.tag2id = {t:idx for idx, t in enumerate(self.tags)}
self.id2tag = {idx:t for t, idx in self.tag2id.items()}
self.matrix_size = max_seq_len
# map
# e.g. [(0, 0), (0, 1), (0, 2), (1, 1), (1, 2), (2, 2)]
self.shaking_idx2matrix_idx = [(ind, end_ind) for ind in range(self.matrix_size) for end_ind in list(range(self.matrix_size))[ind:]]
self.matrix_idx2shaking_idx = [[0 for i in range(self.matrix_size)] for j in range(self.matrix_size)]
for shaking_ind, matrix_ind in enumerate(self.shaking_idx2matrix_idx):
self.matrix_idx2shaking_idx[matrix_ind[0]][matrix_ind[1]] = shaking_ind
def get_tag_size(self):
return len(self.tag2id)
def get_spots(self, sample):
'''
matrix_spots: [(tok_pos1, tok_pos2, tag_id), ]
'''
matrix_spots = []
spot_memory_set = set()
def add_spot(spot):
memory = "{},{},{}".format(*spot)
if memory not in spot_memory_set:
matrix_spots.append(spot)
spot_memory_set.add(memory)
# # if entity_list exist, need to distinguish entity types
# if self.ent2id is not None and "entity_list" in sample:
for ent in sample["entity_list"]:
add_spot((ent["tok_span"][0], ent["tok_span"][1] - 1, self.tag2id[self.separator.join([ent["type"], "EH2ET"])]))
for rel in sample["relation_list"]:
subj_tok_span = rel["subj_tok_span"]
obj_tok_span = rel["obj_tok_span"]
rel = rel["predicate"]
# if self.ent2id is None: # set all entities to default type
# add_spot((subj_tok_span[0], subj_tok_span[1] - 1, self.tag2id[self.separator.join(["DEFAULT", "EH2ET"])]))
# add_spot((obj_tok_span[0], obj_tok_span[1] - 1, self.tag2id[self.separator.join(["DEFAULT", "EH2ET"])]))
if subj_tok_span[0] <= obj_tok_span[0]:
add_spot((subj_tok_span[0], obj_tok_span[0], self.tag2id[self.separator.join([rel, "SH2OH"])]))
else:
add_spot((obj_tok_span[0], subj_tok_span[0], self.tag2id[self.separator.join([rel, "OH2SH"])]))
if subj_tok_span[1] <= obj_tok_span[1]:
add_spot((subj_tok_span[1] - 1, obj_tok_span[1] - 1, self.tag2id[self.separator.join([rel, "ST2OT"])]))
else:
add_spot((obj_tok_span[1] - 1, subj_tok_span[1] - 1, self.tag2id[self.separator.join([rel, "OT2ST"])]))
return matrix_spots
def spots2shaking_tag(self, spots):
'''
convert spots to matrix tag
spots: [(start_ind, end_ind, tag_id), ]
return:
shaking_tag: (shaking_seq_len, tag_size)
'''
shaking_seq_len = self.matrix_size * (self.matrix_size + 1) // 2
shaking_tag = torch.zeros(shaking_seq_len, len(self.tag2id)).long()
for sp in spots:
shaking_idx = self.matrix_idx2shaking_idx[sp[0]][sp[1]]
shaking_tag[shaking_idx][sp[2]] = 1
return shaking_tag
def spots2shaking_tag4batch(self, batch_spots):
'''
batch_spots: a batch of spots, [spots1, spots2, ...]
spots: [(start_ind, end_ind, tag_id), ]
return:
batch_shaking_tag: (batch_size, shaking_seq_len, tag_size)
'''
shaking_seq_len = self.matrix_size * (self.matrix_size + 1) // 2
batch_shaking_tag = torch.zeros(len(batch_spots), shaking_seq_len, len(self.tag2id)).long()
for batch_id, spots in enumerate(batch_spots):
for sp in spots:
shaking_idx = self.matrix_idx2shaking_idx[sp[0]][sp[1]]
batch_shaking_tag[batch_id][shaking_idx][sp[2]] = 1
return batch_shaking_tag
def get_spots_fr_shaking_tag(self, shaking_tag):
'''
shaking_tag -> spots
shaking_tag: (shaking_seq_len, tag_id)
spots: [(start_ind, end_ind, tag_id), ]
'''
spots = []
nonzero_points = torch.nonzero(shaking_tag, as_tuple = False)
for point in nonzero_points:
shaking_idx, tag_idx = point[0].item(), point[1].item()
pos1, pos2 = self.shaking_idx2matrix_idx[shaking_idx]
spot = (pos1, pos2, tag_idx)
spots.append(spot)
return spots
def decode_rel(self,
text,
shaking_tag,
tok2char_span,
tok_offset = 0, char_offset = 0):
'''
shaking_tag: (shaking_seq_len, tag_id_num)
'''
rel_list = []
matrix_spots = self.get_spots_fr_shaking_tag(shaking_tag)
# entity
head_ind2entities = {}
ent_list = []
for sp in matrix_spots:
tag = self.id2tag[sp[2]]
ent_type, link_type = tag.split(self.separator)
if link_type != "EH2ET" or sp[0] > sp[1]: # for an entity, the start position can not be larger than the end pos.
continue
char_span_list = tok2char_span[sp[0]:sp[1] + 1]
char_sp = [char_span_list[0][0], char_span_list[-1][1]]
ent_text = text[char_sp[0]:char_sp[1]]
entity = {
"type": ent_type,
"text": ent_text,
"tok_span": [sp[0], sp[1] + 1],
"char_span": char_sp,
}
head_key = str(sp[0]) # take ent_head_pos as the key to entity list
if head_key not in head_ind2entities:
head_ind2entities[head_key] = []
head_ind2entities[head_key].append(entity)
ent_list.append(entity)
# tail link
tail_link_memory_set = set()
for sp in matrix_spots:
tag = self.id2tag[sp[2]]
rel, link_type = tag.split(self.separator)
if link_type == "ST2OT":
tail_link_memory = self.separator.join([rel, str(sp[0]), str(sp[1])])
tail_link_memory_set.add(tail_link_memory)
elif link_type == "OT2ST":
tail_link_memory = self.separator.join([rel, str(sp[1]), str(sp[0])])
tail_link_memory_set.add(tail_link_memory)
# head link
for sp in matrix_spots:
tag = self.id2tag[sp[2]]
rel, link_type = tag.split(self.separator)
if link_type == "SH2OH":
subj_head_key, obj_head_key = str(sp[0]), str(sp[1])
elif link_type == "OH2SH":
subj_head_key, obj_head_key = str(sp[1]), str(sp[0])
else:
continue
if subj_head_key not in head_ind2entities or obj_head_key not in head_ind2entities:
# no entity start with subj_head_key and obj_head_key
continue
subj_list = head_ind2entities[subj_head_key] # all entities start with this subject head
obj_list = head_ind2entities[obj_head_key] # all entities start with this object head
# go over all subj-obj pair to check whether the tail link exists
for subj in subj_list:
for obj in obj_list:
tail_link_memory = self.separator.join([rel, str(subj["tok_span"][1] - 1), str(obj["tok_span"][1] - 1)])
if tail_link_memory not in tail_link_memory_set:
# no such relation
continue
rel_list.append({
"subject": subj["text"],
"object": obj["text"],
"subj_tok_span": [subj["tok_span"][0] + tok_offset, subj["tok_span"][1] + tok_offset],
"obj_tok_span": [obj["tok_span"][0] + tok_offset, obj["tok_span"][1] + tok_offset],
"subj_char_span": [subj["char_span"][0] + char_offset, subj["char_span"][1] + char_offset],
"obj_char_span": [obj["char_span"][0] + char_offset, obj["char_span"][1] + char_offset],
"predicate": rel,
})
# recover the positons in the original text
for ent in ent_list:
ent["char_span"] = [ent["char_span"][0] + char_offset, ent["char_span"][1] + char_offset]
ent["tok_span"] = [ent["tok_span"][0] + tok_offset, ent["tok_span"][1] + tok_offset]
return rel_list, ent_list
def trans2ee(self, rel_list, ent_list):
sepatator = "_" # \u2E80
trigger_set, arg_iden_set, arg_class_set = set(), set(), set()
trigger_offset2vote = {}
trigger_offset2trigger_text = {}
trigger_offset2trigger_char_span = {}
# get candidate trigger types from relation
for rel in rel_list:
trigger_offset = rel["obj_tok_span"]
trigger_offset_str = "{},{}".format(trigger_offset[0], trigger_offset[1])
trigger_offset2trigger_text[trigger_offset_str] = rel["object"]
trigger_offset2trigger_char_span[trigger_offset_str] = rel["obj_char_span"]
_, event_type = rel["predicate"].split(sepatator)
if trigger_offset_str not in trigger_offset2vote:
trigger_offset2vote[trigger_offset_str] = {}
trigger_offset2vote[trigger_offset_str][event_type] = trigger_offset2vote[trigger_offset_str].get(event_type, 0) + 1
# get candidate trigger types from entity types
for ent in ent_list:
t1, t2 = ent["type"].split(sepatator)
assert t1 == "Trigger" or t1 == "Argument"
if t1 == "Trigger": # trigger
event_type = t2
trigger_span = ent["tok_span"]
trigger_offset_str = "{},{}".format(trigger_span[0], trigger_span[1])
trigger_offset2trigger_text[trigger_offset_str] = ent["text"]
trigger_offset2trigger_char_span[trigger_offset_str] = ent["char_span"]
if trigger_offset_str not in trigger_offset2vote:
trigger_offset2vote[trigger_offset_str] = {}
trigger_offset2vote[trigger_offset_str][event_type] = trigger_offset2vote[trigger_offset_str].get(event_type, 0) + 1.1 # if even, entity type makes the call
# voting
tirigger_offset2event = {}
for trigger_offet_str, event_type2score in trigger_offset2vote.items():
event_type = sorted(event_type2score.items(), key = lambda x: x[1], reverse = True)[0][0]
tirigger_offset2event[trigger_offet_str] = event_type # final event type
# generate event list
trigger_offset2arguments = {}
for rel in rel_list:
trigger_offset = rel["obj_tok_span"]
argument_role, event_type = rel["predicate"].split(sepatator)
trigger_offset_str = "{},{}".format(trigger_offset[0], trigger_offset[1])
if tirigger_offset2event[trigger_offset_str] != event_type: # filter false relations
# set_trace()
continue
# append arguments
if trigger_offset_str not in trigger_offset2arguments:
trigger_offset2arguments[trigger_offset_str] = []
trigger_offset2arguments[trigger_offset_str].append({
"text": rel["subject"],
"type": argument_role,
"char_span": rel["subj_char_span"],
"tok_span": rel["subj_tok_span"],
})
event_list = []
for trigger_offset_str, event_type in tirigger_offset2event.items():
arguments = trigger_offset2arguments[trigger_offset_str] if trigger_offset_str in trigger_offset2arguments else []
event = {
"trigger": trigger_offset2trigger_text[trigger_offset_str],
"trigger_char_span": trigger_offset2trigger_char_span[trigger_offset_str],
"trigger_tok_span": trigger_offset_str.split(","),
"trigger_type": event_type,
"argument_list": arguments,
}
event_list.append(event)
return event_list
class DataMaker4Bert():
def __init__(self, tokenizer, shaking_tagger):
self.tokenizer = tokenizer
self.shaking_tagger = shaking_tagger
def get_indexed_data(self, data, max_seq_len, data_type = "train"):
indexed_samples = []
for ind, sample in tqdm(enumerate(data), desc = "Generate indexed train or valid data"):
text = sample["text"]
# codes for bert input
codes = self.tokenizer.encode_plus(text,
return_offsets_mapping = True,
add_special_tokens = False,
max_length = max_seq_len,
truncation = True,
pad_to_max_length = True)
# tagging
matrix_spots = None
if data_type != "test":
matrix_spots = self.shaking_tagger.get_spots(sample)
# get codes
input_ids = torch.tensor(codes["input_ids"]).long()
attention_mask = torch.tensor(codes["attention_mask"]).long()
token_type_ids = torch.tensor(codes["token_type_ids"]).long()
tok2char_span = codes["offset_mapping"]
sample_tp = (sample,
input_ids,
attention_mask,
token_type_ids,
tok2char_span,
matrix_spots,
)
indexed_samples.append(sample_tp)
return indexed_samples
def generate_batch(self, batch_data, data_type = "train"):
sample_list = []
input_ids_list = []
attention_mask_list = []
token_type_ids_list = []
tok2char_span_list = []
matrix_spots_list = []
for tp in batch_data:
sample_list.append(tp[0])
input_ids_list.append(tp[1])
attention_mask_list.append(tp[2])
token_type_ids_list.append(tp[3])
tok2char_span_list.append(tp[4])
if data_type != "test":
matrix_spots_list.append(tp[5])
# @specific: indexed by bert tokenizer
batch_input_ids = torch.stack(input_ids_list, dim = 0)
batch_attention_mask = torch.stack(attention_mask_list, dim = 0)
batch_token_type_ids = torch.stack(token_type_ids_list, dim = 0)
batch_shaking_tag = None
if data_type != "test":
batch_shaking_tag = self.shaking_tagger.spots2shaking_tag4batch(matrix_spots_list)
return sample_list, \
batch_input_ids, batch_attention_mask, batch_token_type_ids, tok2char_span_list, \
batch_shaking_tag
class DataMaker4BiLSTM():
def __init__(self, text2indices, get_tok2char_span_map, shaking_tagger):
self.text2indices = text2indices
self.shaking_tagger = shaking_tagger
self.get_tok2char_span_map = get_tok2char_span_map
def get_indexed_data(self, data, max_seq_len, data_type = "train"):
indexed_samples = []
for ind, sample in tqdm(enumerate(data), desc = "Generate indexed train or valid data"):
text = sample["text"]
# tagging
matrix_spots = None
if data_type != "test":
matrix_spots = self.shaking_tagger.get_spots(sample)
tok2char_span = self.get_tok2char_span_map(text)
tok2char_span.extend([(-1, -1)] * (max_seq_len - len(tok2char_span)))
input_ids = self.text2indices(text, max_seq_len)
sample_tp = (sample,
input_ids,
tok2char_span,
matrix_spots,
)
indexed_samples.append(sample_tp)
return indexed_samples
def generate_batch(self, batch_data, data_type = "train"):
sample_list = []
input_ids_list = []
tok2char_span_list = []
matrix_spots_list = []
for tp in batch_data:
sample_list.append(tp[0])
input_ids_list.append(tp[1])
tok2char_span_list.append(tp[2])
if data_type != "test":
matrix_spots_list.append(tp[3])
batch_input_ids = torch.stack(input_ids_list, dim = 0)
batch_shaking_tag = None
if data_type != "test":
batch_shaking_tag = self.shaking_tagger.spots2shaking_tag4batch(matrix_spots_list)
return sample_list, \
batch_input_ids, tok2char_span_list, \
batch_shaking_tag
class TPLinkerPlusBert(nn.Module):
def __init__(self, encoder,
tag_size,
shaking_type,
inner_enc_type,
tok_pair_sample_rate = 1):
super().__init__()
self.encoder = encoder
self.tok_pair_sample_rate = tok_pair_sample_rate
shaking_hidden_size = encoder.config.hidden_size
self.fc = nn.Linear(shaking_hidden_size, tag_size)
# handshaking kernel
self.handshaking_kernel = HandshakingKernel(shaking_hidden_size, shaking_type, inner_enc_type)
def forward(self, input_ids,
attention_mask,
token_type_ids
):
# input_ids, attention_mask, token_type_ids: (batch_size, seq_len)
context_outputs = self.encoder(input_ids, attention_mask, token_type_ids)
# last_hidden_state: (batch_size, seq_len, hidden_size)
last_hidden_state = context_outputs[0]
seq_len = last_hidden_state.size()[1]
# shaking_hiddens: (batch_size, shaking_seq_len, hidden_size)
shaking_hiddens = self.handshaking_kernel(last_hidden_state)
sampled_tok_pair_indices = None
if self.training:
# randomly sample segments of token pairs
shaking_seq_len = shaking_hiddens.size()[1]
segment_len = int(shaking_seq_len * self.tok_pair_sample_rate)
seg_num = math.ceil(shaking_seq_len // segment_len)
start_ind = torch.randint(seg_num, []) * segment_len
end_ind = min(start_ind + segment_len, shaking_seq_len)
# sampled_tok_pair_indices: (batch_size, ~segment_len) ~end_ind - start_ind <= segment_len
sampled_tok_pair_indices = torch.arange(start_ind, end_ind)[None, :].repeat(shaking_hiddens.size()[0], 1)
# sampled_tok_pair_indices = torch.randint(shaking_seq_len, (shaking_hiddens.size()[0], segment_len))
sampled_tok_pair_indices = sampled_tok_pair_indices.to(shaking_hiddens.device)
# sampled_tok_pair_indices will tell model what token pairs should be fed into fcs
# shaking_hiddens: (batch_size, ~segment_len, hidden_size)
shaking_hiddens = shaking_hiddens.gather(1, sampled_tok_pair_indices[:,:,None].repeat(1, 1, shaking_hiddens.size()[-1]))
# outputs: (batch_size, segment_len, tag_size) or (batch_size, shaking_seq_len, tag_size)
outputs = self.fc(shaking_hiddens)
return outputs, sampled_tok_pair_indices
class TPLinkerPlusBiLSTM(nn.Module):
def __init__(self, init_word_embedding_matrix,
emb_dropout_rate,
enc_hidden_size,
dec_hidden_size,
rnn_dropout_rate,
tag_size,
shaking_type,
inner_enc_type,
tok_pair_sample_rate = 1
):
super().__init__()
self.word_embeds = nn.Embedding.from_pretrained(init_word_embedding_matrix, freeze = False)
self.emb_dropout = nn.Dropout(emb_dropout_rate)
self.enc_lstm = nn.LSTM(init_word_embedding_matrix.size()[-1],
enc_hidden_size // 2,
num_layers = 1,
bidirectional = True,
batch_first = True)
self.dec_lstm = nn.LSTM(enc_hidden_size,
dec_hidden_size // 2,
num_layers = 1,
bidirectional = True,
batch_first = True)
self.rnn_dropout = nn.Dropout(rnn_dropout_rate)
self.tok_pair_sample_rate = tok_pair_sample_rate
shaking_hidden_size = dec_hidden_size
self.fc = nn.Linear(shaking_hidden_size, tag_size)
# handshaking kernel
self.handshaking_kernel = HandshakingKernel(shaking_hidden_size, shaking_type, inner_enc_type)
def forward(self, input_ids):
# input_ids: (batch_size, seq_len)
# embedding: (batch_size, seq_len, emb_dim)
embedding = self.word_embeds(input_ids)
embedding = self.emb_dropout(embedding)
# lstm_outputs: (batch_size, seq_len, enc_hidden_size)
lstm_outputs, _ = self.enc_lstm(embedding)
lstm_outputs = self.rnn_dropout(lstm_outputs)
# lstm_outputs: (batch_size, seq_len, dec_hidden_size)
lstm_outputs, _ = self.dec_lstm(lstm_outputs)
lstm_outputs = self.rnn_dropout(lstm_outputs)
seq_len = lstm_outputs.size()[1]
# shaking_hiddens: (batch_size, shaking_seq_len, dec_hidden_size)
shaking_hiddens = self.handshaking_kernel(lstm_outputs)
sampled_tok_pair_indices = None
if self.training:
# randomly sample segments of token pairs
shaking_seq_len = shaking_hiddens.size()[1]
segment_len = int(shaking_seq_len * self.tok_pair_sample_rate)
seg_num = math.ceil(shaking_seq_len // segment_len)
start_ind = torch.randint(seg_num, []) * segment_len
end_ind = min(start_ind + segment_len, shaking_seq_len)
# sampled_tok_pair_indices: (batch_size, ~segment_len) ~end_ind - start_ind <= segment_len
sampled_tok_pair_indices = torch.arange(start_ind, end_ind)[None, :].repeat(shaking_hiddens.size()[0], 1)
# sampled_tok_pair_indices = torch.randint(shaking_hiddens, (shaking_hiddens.size()[0], segment_len))
sampled_tok_pair_indices = sampled_tok_pair_indices.to(shaking_hiddens.device)
# sampled_tok_pair_indices will tell model what token pairs should be fed into fcs
# shaking_hiddens: (batch_size, ~segment_len, hidden_size)
shaking_hiddens = shaking_hiddens.gather(1, sampled_tok_pair_indices[:,:,None].repeat(1, 1, shaking_hiddens.size()[-1]))
# outputs: (batch_size, segment_len, tag_size) or (batch_size, shaking_hiddens, tag_size)
outputs = self.fc(shaking_hiddens)
return outputs, sampled_tok_pair_indices
class MetricsCalculator():
def __init__(self, shaking_tagger):
self.shaking_tagger = shaking_tagger
self.last_weights = None # for exponential moving averaging
def GHM(self, gradient, bins = 10, beta = 0.9):
'''
gradient_norm: gradient_norms of all examples in this batch; (batch_size, shaking_seq_len)
'''
avg = torch.mean(gradient)
std = torch.std(gradient) + 1e-12
gradient_norm = torch.sigmoid((gradient - avg) / std) # normalization and pass through sigmoid to 0 ~ 1.
min_, max_ = torch.min(gradient_norm), torch.max(gradient_norm)
gradient_norm = (gradient_norm - min_) / (max_ - min_)
gradient_norm = torch.clamp(gradient_norm, 0, 0.9999999) # ensure elements in gradient_norm != 1.
example_sum = torch.flatten(gradient_norm).size()[0] # N
# calculate weights
current_weights = torch.zeros(bins).to(gradient.device)
hits_vec = torch.zeros(bins).to(gradient.device)
count_hits = 0 # coungradient_normof hits
for i in range(bins):
bar = float((i + 1) / bins)
hits = torch.sum((gradient_norm <= bar)) - count_hits
count_hits += hits
hits_vec[i] = hits.item()
current_weights[i] = example_sum / bins / (hits.item() + example_sum / bins )
# EMA: exponential moving averaging
# print()
# print("hits_vec: {}".format(hits_vec))
# print("current_weights: {}".format(current_weights))
if self.last_weights is None:
self.last_weights = torch.ones(bins).to(gradient.device) # init by ones
current_weights = self.last_weights * beta + (1 - beta) * current_weights
self.last_weights = current_weights
# print("ema current_weights: {}".format(current_weights))
# weights4examples: pick weights for all examples
weight_pk_idx = (gradient_norm / (1 / bins)).long()[:, :, None]
weights_rp = current_weights[None, None, :].repeat(gradient_norm.size()[0], gradient_norm.size()[1], 1)
weights4examples = torch.gather(weights_rp, -1, weight_pk_idx).squeeze(-1)
weights4examples /= torch.sum(weights4examples)
return weights4examples * gradient # return weighted gradients
# loss func
def _multilabel_categorical_crossentropy(self, y_pred, y_true, ghm = True):
"""
y_pred: (batch_size, shaking_seq_len, type_size)
y_true: (batch_size, shaking_seq_len, type_size)
y_true and y_pred have the same shape,elements in y_true are either 0 or 1,
1 tags positive classes,0 tags negtive classes(means tok-pair does not have this type of link).
"""
y_pred = (1 - 2 * y_true) * y_pred # -1 -> pos classes, 1 -> neg classes
y_pred_neg = y_pred - y_true * 1e12 # mask the pred oudtuts of pos classes
y_pred_pos = y_pred - (1 - y_true) * 1e12 # mask the pred oudtuts of neg classes
zeros = torch.zeros_like(y_pred[..., :1]) # st - st
y_pred_neg = torch.cat([y_pred_neg, zeros], dim = -1)
y_pred_pos = torch.cat([y_pred_pos, zeros], dim = -1)
neg_loss = torch.logsumexp(y_pred_neg, dim = -1)
pos_loss = torch.logsumexp(y_pred_pos, dim = -1)
if ghm:
return (self.GHM(neg_loss + pos_loss, bins = 1000)).sum()
else:
return (neg_loss + pos_loss).mean()
def loss_func(self, y_pred, y_true, ghm):
return self._multilabel_categorical_crossentropy(y_pred, y_true, ghm = ghm)
def get_sample_accuracy(self, pred, truth):
'''
计算该batch的pred与truth全等的样本比例
'''
# # (batch_size, ..., seq_len, tag_size) -> (batch_size, ..., seq_len)
# pred = torch.argmax(pred, dim = -1)
# (batch_size, ..., seq_len) -> (batch_size, seq_len)
pred = pred.view(pred.size()[0], -1)
truth = truth.view(truth.size()[0], -1)
# (batch_size, ),每个元素是pred与truth之间tag相同的数量
correct_tag_num = torch.sum(torch.eq(truth, pred).float(), dim = 1)
# seq维上所有tag必须正确,所以correct_tag_num必须等于seq的长度才算一个correct的sample
sample_acc_ = torch.eq(correct_tag_num, torch.ones_like(correct_tag_num) * truth.size()[-1]).float()
sample_acc = torch.mean(sample_acc_)
return sample_acc
def get_mark_sets_event(self, event_list):
trigger_iden_set, trigger_class_set, arg_iden_set, arg_class_set = set(), set(), set(), set()
for event in event_list:
event_type = event["trigger_type"]
trigger_offset = event["trigger_tok_span"]
trigger_iden_set.add("{}\u2E80{}".format(trigger_offset[0], trigger_offset[1]))
trigger_class_set.add("{}\u2E80{}\u2E80{}".format(event_type, trigger_offset[0], trigger_offset[1]))
for arg in event["argument_list"]:
argument_offset = arg["tok_span"]
argument_role = arg["type"]
arg_iden_set.add("{}\u2E80{}\u2E80{}\u2E80{}\u2E80{}".format(event_type, trigger_offset[0], trigger_offset[1], argument_offset[0], argument_offset[1]))
arg_class_set.add("{}\u2E80{}\u2E80{}\u2E80{}\u2E80{}\u2E80{}".format(event_type, trigger_offset[0], trigger_offset[1], argument_offset[0], argument_offset[1], argument_role))
return trigger_iden_set, \
trigger_class_set, \
arg_iden_set, \
arg_class_set
# def get_mark_sets_rel(self, pred_rel_list, gold_rel_list, pred_ent_list, gold_ent_list, pattern = "only_head_text", gold_event_list = None):
# if pattern == "only_head_index":
# gold_rel_set = set(["{}\u2E80{}\u2E80{}".format(rel["subj_tok_span"][0], rel["predicate"], rel["obj_tok_span"][0]) for rel in gold_rel_list])
# pred_rel_set = set(["{}\u2E80{}\u2E80{}".format(rel["subj_tok_span"][0], rel["predicate"], rel["obj_tok_span"][0]) for rel in pred_rel_list])
# gold_ent_set = set(["{}\u2E80{}".format(ent["tok_span"][0], ent["type"]) for ent in gold_ent_list])
# pred_ent_set = set(["{}\u2E80{}".format(ent["tok_span"][0], ent["type"]) for ent in pred_ent_list])
# elif pattern == "whole_span":
# gold_rel_set = set(["{}\u2E80{}\u2E80{}\u2E80{}\u2E80{}".format(rel["subj_tok_span"][0], rel["subj_tok_span"][1], rel["predicate"], rel["obj_tok_span"][0], rel["obj_tok_span"][1]) for rel in gold_rel_list])
# pred_rel_set = set(["{}\u2E80{}\u2E80{}\u2E80{}\u2E80{}".format(rel["subj_tok_span"][0], rel["subj_tok_span"][1], rel["predicate"], rel["obj_tok_span"][0], rel["obj_tok_span"][1]) for rel in pred_rel_list])
# gold_ent_set = set(["{}\u2E80{}\u2E80{}".format(ent["tok_span"][0], ent["tok_span"][1], ent["type"]) for ent in gold_ent_list])
# pred_ent_set = set(["{}\u2E80{}\u2E80{}".format(ent["tok_span"][0], ent["tok_span"][1], ent["type"]) for ent in pred_ent_list])
# elif pattern == "whole_text":
# gold_rel_set = set(["{}\u2E80{}\u2E80{}".format(rel["subject"], rel["predicate"], rel["object"]) for rel in gold_rel_list])
# pred_rel_set = set(["{}\u2E80{}\u2E80{}".format(rel["subject"], rel["predicate"], rel["object"]) for rel in pred_rel_list])
# gold_ent_set = set(["{}\u2E80{}".format(ent["text"], ent["type"]) for ent in gold_ent_list])
# pred_ent_set = set(["{}\u2E80{}".format(ent["text"], ent["type"]) for ent in pred_ent_list])
# elif pattern == "only_head_text":
# gold_rel_set = set(["{}\u2E80{}\u2E80{}".format(rel["subject"].split(" ")[0], rel["predicate"], rel["object"].split(" ")[0]) for rel in gold_rel_list])
# pred_rel_set = set(["{}\u2E80{}\u2E80{}".format(rel["subject"].split(" ")[0], rel["predicate"], rel["object"].split(" ")[0]) for rel in pred_rel_list])
# gold_ent_set = set(["{}\u2E80{}".format(ent["text"].split(" ")[0], ent["type"]) for ent in gold_ent_list])
# pred_ent_set = set(["{}\u2E80{}".format(ent["text"].split(" ")[0], ent["type"]) for ent in pred_ent_list])
# return pred_rel_set, gold_rel_set, pred_ent_set, gold_ent_set
def get_mark_sets_rel(self, rel_list, ent_list, pattern = "only_head_text"):
if pattern == "only_head_index":
rel_set = set(["{}\u2E80{}\u2E80{}".format(rel["subj_tok_span"][0], rel["predicate"], rel["obj_tok_span"][0]) for rel in rel_list])
ent_set = set(["{}\u2E80{}".format(ent["tok_span"][0], ent["type"]) for ent in ent_list])
elif pattern == "whole_span":
rel_set = set(["{}\u2E80{}\u2E80{}\u2E80{}\u2E80{}".format(rel["subj_tok_span"][0], rel["subj_tok_span"][1], rel["predicate"], rel["obj_tok_span"][0], rel["obj_tok_span"][1]) for rel in rel_list])
ent_set = set(["{}\u2E80{}\u2E80{}".format(ent["tok_span"][0], ent["tok_span"][1], ent["type"]) for ent in ent_list])
elif pattern == "whole_text":
rel_set = set(["{}\u2E80{}\u2E80{}".format(rel["subject"], rel["predicate"], rel["object"]) for rel in rel_list])
ent_set = set(["{}\u2E80{}".format(ent["text"], ent["type"]) for ent in ent_list])
elif pattern == "only_head_text":
rel_set = set(["{}\u2E80{}\u2E80{}".format(rel["subject"].split(" ")[0], rel["predicate"], rel["object"].split(" ")[0]) for rel in rel_list])
ent_set = set(["{}\u2E80{}".format(ent["text"].split(" ")[0], ent["type"]) for ent in ent_list])
return rel_set, ent_set
def _cal_cpg(self, pred_set, gold_set, cpg):
'''
cpg is a list: [correct_num, pred_num, gold_num]
'''
for mark_str in pred_set:
if mark_str in gold_set:
cpg[0] += 1
cpg[1] += len(pred_set)
cpg[2] += len(gold_set)
def cal_rel_cpg(self, pred_rel_list, pred_ent_list, gold_rel_list, gold_ent_list, ere_cpg_dict, pattern):
'''
ere_cpg_dict = {
"rel_cpg": [0, 0, 0],
"ent_cpg": [0, 0, 0],
}
pattern: metric pattern
'''
gold_rel_set, gold_ent_set = self.get_mark_sets_rel(gold_rel_list, gold_ent_list, pattern)
pred_rel_set, pred_ent_set = self.get_mark_sets_rel(pred_rel_list, pred_ent_list, pattern)
self._cal_cpg(pred_rel_set, gold_rel_set, ere_cpg_dict["rel_cpg"])
self._cal_cpg(pred_ent_set, gold_ent_set, ere_cpg_dict["ent_cpg"])
def cal_event_cpg(self, pred_event_list, gold_event_list, ee_cpg_dict):
'''
ee_cpg_dict = {
"trigger_iden_cpg": [0, 0, 0],
"trigger_class_cpg": [0, 0, 0],
"arg_iden_cpg": [0, 0, 0],
"arg_class_cpg": [0, 0, 0],
}
'''
pred_trigger_iden_set, \
pred_trigger_class_set, \
pred_arg_iden_set, \
pred_arg_class_set = self.get_mark_sets_event(pred_event_list)
gold_trigger_iden_set, \
gold_trigger_class_set, \
gold_arg_iden_set, \
gold_arg_class_set = self.get_mark_sets_event(gold_event_list)
self._cal_cpg(pred_trigger_iden_set, gold_trigger_iden_set, ee_cpg_dict["trigger_iden_cpg"])
self._cal_cpg(pred_trigger_class_set, gold_trigger_class_set, ee_cpg_dict["trigger_class_cpg"])
self._cal_cpg(pred_arg_iden_set, gold_arg_iden_set, ee_cpg_dict["arg_iden_cpg"])
self._cal_cpg(pred_arg_class_set, gold_arg_class_set, ee_cpg_dict["arg_class_cpg"])
def get_cpg(self, sample_list,
tok2char_span_list,
batch_pred_shaking_tag,
pattern = "only_head_text"):
'''
return correct number, predict number, gold number (cpg)
'''
ee_cpg_dict = {
"trigger_iden_cpg": [0, 0, 0],
"trigger_class_cpg": [0, 0, 0],
"arg_iden_cpg": [0, 0, 0],
"arg_class_cpg": [0, 0, 0],
}
ere_cpg_dict = {
"rel_cpg": [0, 0, 0],
"ent_cpg": [0, 0, 0],
}
# go through all sentences
for ind in range(len(sample_list)):
sample = sample_list[ind]
text = sample["text"]
tok2char_span = tok2char_span_list[ind]
pred_shaking_tag = batch_pred_shaking_tag[ind]
pred_rel_list, pred_ent_list = self.shaking_tagger.decode_rel(text,
pred_shaking_tag,
tok2char_span) # decoding
gold_rel_list = sample["relation_list"]
gold_ent_list = sample["entity_list"]
if pattern == "event_extraction":
pred_event_list = self.shaking_tagger.trans2ee(pred_rel_list, pred_ent_list) # transform to event list
gold_event_list = sample["event_list"]
self.cal_event_cpg(pred_event_list, gold_event_list, ee_cpg_dict)
else:
self.cal_rel_cpg(pred_rel_list, pred_ent_list, gold_rel_list, gold_ent_list, ere_cpg_dict, pattern)
if pattern == "event_extraction":
return ee_cpg_dict
else:
return ere_cpg_dict
def get_prf_scores(self, correct_num, pred_num, gold_num):
minimini = 1e-12
precision = correct_num / (pred_num + minimini)
recall = correct_num / (gold_num + minimini)
f1 = 2 * precision * recall / (precision + recall + minimini)
return precision, recall, f1 | 37,902 | 47.59359 | 220 | py |
TPlinker-joint-extraction | TPlinker-joint-extraction-master/tplinker_plus/train.py | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
import json
import os
from tqdm import tqdm
import re
from IPython.core.debugger import set_trace
from pprint import pprint
import unicodedata
from transformers import BertModel, BertTokenizerFast
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader, Dataset
import torch.optim as optim
import glob
import time
import logging
from common.utils import Preprocessor, DefaultLogger
from tplinker_plus import (HandshakingTaggingScheme,
DataMaker4Bert,
DataMaker4BiLSTM,
TPLinkerPlusBert,
TPLinkerPlusBiLSTM,
MetricsCalculator)
import wandb
from glove import Glove
import numpy as np
import config
# In[ ]:
config = config.train_config
hyper_parameters = config["hyper_parameters"]
# In[ ]:
os.environ["TOKENIZERS_PARALLELISM"] = "true"
os.environ["CUDA_VISIBLE_DEVICES"] = str(config["device_num"])
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# In[ ]:
# for reproductivity
torch.manual_seed(hyper_parameters["seed"]) # pytorch random seed
torch.backends.cudnn.deterministic = True
# In[ ]:
data_home = config["data_home"]
experiment_name = config["exp_name"]
train_data_path = os.path.join(data_home, experiment_name, config["train_data"])
valid_data_path = os.path.join(data_home, experiment_name, config["valid_data"])
rel2id_path = os.path.join(data_home, experiment_name, config["rel2id"])
ent2id_path = os.path.join(data_home, experiment_name, config["ent2id"])
# In[ ]:
if config["logger"] == "wandb":
# init wandb
wandb.init(project = experiment_name,
name = config["run_name"],
config = hyper_parameters # Initialize config
)
wandb.config.note = config["note"]
model_state_dict_dir = wandb.run.dir
logger = wandb
else:
logger = DefaultLogger(config["log_path"], experiment_name, config["run_name"], config["run_id"], hyper_parameters)
model_state_dict_dir = config["path_to_save_model"]
if not os.path.exists(model_state_dict_dir):
os.makedirs(model_state_dict_dir)
# # Load Data
# In[ ]:
train_data = json.load(open(train_data_path, "r", encoding = "utf-8"))
valid_data = json.load(open(valid_data_path, "r", encoding = "utf-8"))
# # Split
# In[ ]:
# @specific
if config["encoder"] == "BERT":
tokenizer = BertTokenizerFast.from_pretrained(config["bert_path"], add_special_tokens = False, do_lower_case = False)
tokenize = tokenizer.tokenize
get_tok2char_span_map = lambda text: tokenizer.encode_plus(text, return_offsets_mapping = True, add_special_tokens = False)["offset_mapping"]
elif config["encoder"] in {"BiLSTM", }:
tokenize = lambda text: text.split(" ")
def get_tok2char_span_map(text):
tokens = text.split(" ")
tok2char_span = []
char_num = 0
for tok in tokens:
tok2char_span.append((char_num, char_num + len(tok)))
char_num += len(tok) + 1 # +1: whitespace
return tok2char_span
# In[ ]:
preprocessor = Preprocessor(tokenize_func = tokenize,
get_tok2char_span_map_func = get_tok2char_span_map)
# In[ ]:
# train and valid max token num
max_tok_num = 0
all_data = train_data + valid_data
for sample in all_data:
tokens = tokenize(sample["text"])
max_tok_num = max(max_tok_num, len(tokens))
max_tok_num
# In[ ]:
if max_tok_num > hyper_parameters["max_seq_len"]:
train_data = preprocessor.split_into_short_samples(train_data,
hyper_parameters["max_seq_len"],
sliding_len = hyper_parameters["sliding_len"],
encoder = config["encoder"]
)
valid_data = preprocessor.split_into_short_samples(valid_data,
hyper_parameters["max_seq_len"],
sliding_len = hyper_parameters["sliding_len"],
encoder = config["encoder"]
)
# In[ ]:
print("train: {}".format(len(train_data)), "valid: {}".format(len(valid_data)))
# In[ ]:
# count_neg = 0 # 74.8% are neg samples 0.7485367594575303
# for example in train_data + valid_data:
# if len(example["relation_list"]) == 0 and len(example["entity_list"]) == 0:
# count_neg += 1
# print(count_neg/len(indexed_train_data + indexed_valid_data))
# # Tagger (Decoder)
# In[ ]:
max_seq_len = min(max_tok_num, hyper_parameters["max_seq_len"])
rel2id = json.load(open(rel2id_path, "r", encoding = "utf-8"))
ent2id = json.load(open(ent2id_path, "r", encoding = "utf-8"))
handshaking_tagger = HandshakingTaggingScheme(rel2id, max_seq_len, ent2id)
tag_size = handshaking_tagger.get_tag_size()
# In[ ]:
def sample_equal_to(sample1, sample2):
assert sample1["id"] == sample2["id"]
assert sample1["text"] == sample2["text"]
memory_set = set()
for rel in sample2["relation_list"]:
memory = "{}\u2E80{}\u2E80{}\u2E80{}\u2E80{}\u2E80{}\u2E80{}".format(rel["subject"],
rel["predicate"],
rel["object"],
*rel["subj_tok_span"],
*rel["obj_tok_span"])
memory_set.add(memory)
for rel in sample1["relation_list"]:
memory = "{}\u2E80{}\u2E80{}\u2E80{}\u2E80{}\u2E80{}\u2E80{}".format(rel["subject"],
rel["predicate"],
rel["object"],
*rel["subj_tok_span"],
*rel["obj_tok_span"])
if memory not in memory_set:
set_trace()
return False
return True
# In[ ]:
# # check tagging and decoding
# batch_size = hyper_parameters["batch_size"]
# for idx in tqdm(range(0, len(train_data), batch_size), desc = "check tagging and decoding"):
# batch_matrix_spots = []
# batch_data = train_data[idx:idx + batch_size]
# for sample in batch_data:
# matrix_spots = handshaking_tagger.get_spots(sample)
# # %timeit shaking_tagger.get_spots(sample)
# batch_matrix_spots.append(matrix_spots)
# # tagging
# # batch_shaking_tag: (batch_size, rel_id, seq_len, seq_len)
# batch_shaking_tag = handshaking_tagger.spots2shaking_tag4batch(batch_matrix_spots)
# # %timeit shaking_tagger.spots2shaking_tag4batch(batch_matrix_spots) #0.3s
# for batch_idx in range(len(batch_data)):
# gold_sample = batch_data[batch_idx]
# shaking_tag = batch_shaking_tag[batch_idx]
# # decode
# text = batch_data[batch_idx]["text"]
# tok2char_span = get_tok2char_span_map(text)
# rel_list = handshaking_tagger.decode_rel(text, shaking_tag, tok2char_span)
# pred_sample = {
# "text": text,
# "id": gold_sample["id"],
# "relation_list": rel_list,
# }
# if not sample_equal_to(pred_sample, gold_sample) or not sample_equal_to(gold_sample, pred_sample):
# set_trace()
# # Dataset
# In[ ]:
if config["encoder"] == "BERT":
tokenizer = BertTokenizerFast.from_pretrained(config["bert_path"], add_special_tokens = False, do_lower_case = False)
data_maker = DataMaker4Bert(tokenizer, handshaking_tagger)
elif config["encoder"] in {"BiLSTM", }:
token2idx_path = os.path.join(data_home, experiment_name, config["token2idx"])
token2idx = json.load(open(token2idx_path, "r", encoding = "utf-8"))
idx2token = {idx:tok for tok, idx in token2idx.items()}
def text2indices(text, max_seq_len):
input_ids = []
tokens = text.split(" ")
for tok in tokens:
if tok not in token2idx:
input_ids.append(token2idx['<UNK>'])
else:
input_ids.append(token2idx[tok])
if len(input_ids) < max_seq_len:
input_ids.extend([token2idx['<PAD>']] * (max_seq_len - len(input_ids)))
input_ids = torch.tensor(input_ids[:max_seq_len])
return input_ids
data_maker = DataMaker4BiLSTM(text2indices, get_tok2char_span_map, handshaking_tagger)
# In[ ]:
class MyDataset(Dataset):
def __init__(self, data):
self.data = data
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return len(self.data)
# In[ ]:
indexed_train_data = data_maker.get_indexed_data(train_data, max_seq_len)
indexed_valid_data = data_maker.get_indexed_data(valid_data, max_seq_len)
# In[ ]:
train_dataloader = DataLoader(MyDataset(indexed_train_data),
batch_size = hyper_parameters["batch_size"],
shuffle = True,
num_workers = 6,
drop_last = False,
collate_fn = data_maker.generate_batch,
)
valid_dataloader = DataLoader(MyDataset(indexed_valid_data),
batch_size = hyper_parameters["batch_size"],
shuffle = True,
num_workers = 6,
drop_last = False,
collate_fn = data_maker.generate_batch,
)
# In[ ]:
# # have a look at dataloader
# train_data_iter = iter(train_dataloader)
# batch_data = next(train_data_iter)
# text_id_list, text_list, batch_input_ids, \
# batch_attention_mask, batch_token_type_ids, \
# tok2char_span_list, batch_shaking_tag = batch_data
# print(text_list[0])
# print()
# print(tokenizer.decode(batch_input_ids[0].tolist()))
# print(batch_input_ids.size())
# print(batch_attention_mask.size())
# print(batch_token_type_ids.size())
# print(len(tok2char_span_list))
# print(batch_shaking_tag.size())
# # decode
# idx = 2
# print(text_list[idx])
# shaking_tag = batch_shaking_tag[idx]
# text = text_list[idx]
# tok2char_span = tok2char_span_list[idx]
# handshaking_tagger.decode_rel(text, shaking_tag, tok2char_span)
# # Model
# In[ ]:
if config["encoder"] == "BERT":
encoder = BertModel.from_pretrained(config["bert_path"])
hidden_size = encoder.config.hidden_size
rel_extractor = TPLinkerPlusBert(encoder,
tag_size,
hyper_parameters["shaking_type"],
hyper_parameters["inner_enc_type"],
hyper_parameters["tok_pair_sample_rate"]
)
elif config["encoder"] in {"BiLSTM", }:
glove = Glove()
glove = glove.load(config["pretrained_word_embedding_path"])
# prepare embedding matrix
word_embedding_init_matrix = np.random.normal(-1, 1, size=(len(token2idx), hyper_parameters["word_embedding_dim"]))
count_in = 0
# 在预训练词向量中的用该预训练向量
# 不在预训练集里的用随机向量
for ind, tok in tqdm(idx2token.items(), desc="Embedding matrix initializing..."):
if tok in glove.dictionary:
count_in += 1
word_embedding_init_matrix[ind] = glove.word_vectors[glove.dictionary[tok]]
print("{:.4f} tokens are in the pretrain word embedding matrix".format(count_in / len(idx2token))) # 命中预训练词向量的比例
word_embedding_init_matrix = torch.FloatTensor(word_embedding_init_matrix)
rel_extractor = TPLinkerPlusBiLSTM(word_embedding_init_matrix,
hyper_parameters["emb_dropout"],
hyper_parameters["enc_hidden_size"],
hyper_parameters["dec_hidden_size"],
hyper_parameters["rnn_dropout"],
tag_size,
hyper_parameters["shaking_type"],
hyper_parameters["inner_enc_type"],
hyper_parameters["tok_pair_sample_rate"],
)
rel_extractor = rel_extractor.to(device)
# In[ ]:
# # test outputs
# rel_extractor.train()
# with torch.no_grad():
# outputs, sampled_tok_pair_indices = rel_extractor(batch_input_ids.to(device),
# batch_attention_mask.to(device),
# batch_token_type_ids.to(device),
# )
# print(outputs.size())
# if rel_extractor.training:
# print(sampled_tok_pair_indices.size())
# # Metrics
# In[ ]:
metrics = MetricsCalculator(handshaking_tagger)
loss_func = lambda y_pred, y_true: metrics.loss_func(y_pred, y_true, ghm = hyper_parameters["ghm"])
# # Train
# In[ ]:
# train step
def train_step(batch_train_data, optimizer):
if config["encoder"] == "BERT":
sample_list, batch_input_ids, batch_attention_mask, batch_token_type_ids, tok2char_span_list, batch_shaking_tag = batch_train_data
batch_input_ids, batch_attention_mask, batch_token_type_ids, batch_shaking_tag = (batch_input_ids.to(device),
batch_attention_mask.to(device),
batch_token_type_ids.to(device),
batch_shaking_tag.to(device)
)
elif config["encoder"] in {"BiLSTM", }:
sample_list, batch_input_ids, tok2char_span_list, batch_shaking_tag = batch_train_data
batch_input_ids, batch_shaking_tag = (batch_input_ids.to(device),
batch_shaking_tag.to(device)
)
# zero the parameter gradients
optimizer.zero_grad()
if config["encoder"] == "BERT":
pred_small_shaking_outputs, sampled_tok_pair_indices = rel_extractor(batch_input_ids,
batch_attention_mask,
batch_token_type_ids
)
elif config["encoder"] in {"BiLSTM", }:
pred_small_shaking_outputs, sampled_tok_pair_indices = rel_extractor(batch_input_ids)
# sampled_tok_pair_indices: (batch_size, ~segment_len)
# batch_small_shaking_tag: (batch_size, ~segment_len, tag_size)
batch_small_shaking_tag = batch_shaking_tag.gather(1, sampled_tok_pair_indices[:, :, None].repeat(1, 1, tag_size))
loss = loss_func(pred_small_shaking_outputs, batch_small_shaking_tag)
# set_trace()
# t1 = time.time()
loss.backward()
optimizer.step()
# print("bp: {}".format(time.time() - t1))
pred_small_shaking_tag = (pred_small_shaking_outputs > 0.).long()
sample_acc = metrics.get_sample_accuracy(pred_small_shaking_tag,
batch_small_shaking_tag)
return loss.item(), sample_acc.item()
# valid step
def valid_step(batch_valid_data):
if config["encoder"] == "BERT":
sample_list, batch_input_ids, batch_attention_mask, batch_token_type_ids, tok2char_span_list, batch_shaking_tag = batch_valid_data
batch_input_ids, batch_attention_mask, batch_token_type_ids, batch_shaking_tag = (batch_input_ids.to(device),
batch_attention_mask.to(device),
batch_token_type_ids.to(device),
batch_shaking_tag.to(device)
)
elif config["encoder"] in {"BiLSTM", }:
sample_list, batch_input_ids, tok2char_span_list, batch_shaking_tag = batch_valid_data
batch_input_ids, batch_shaking_tag = (batch_input_ids.to(device),
batch_shaking_tag.to(device)
)
with torch.no_grad():
if config["encoder"] == "BERT":
pred_shaking_outputs, _ = rel_extractor(batch_input_ids,
batch_attention_mask,
batch_token_type_ids,
)
elif config["encoder"] in {"BiLSTM", }:
pred_shaking_outputs, _ = rel_extractor(batch_input_ids)
pred_shaking_tag = (pred_shaking_outputs > 0.).long()
sample_acc = metrics.get_sample_accuracy(pred_shaking_tag,
batch_shaking_tag)
cpg_dict = metrics.get_cpg(sample_list,
tok2char_span_list,
pred_shaking_tag,
hyper_parameters["match_pattern"])
return sample_acc.item(), cpg_dict
# In[ ]:
max_f1 = 0.
def train_n_valid(train_dataloader, dev_dataloader, optimizer, scheduler, num_epoch):
def train(dataloader, ep):
# train
rel_extractor.train()
t_ep = time.time()
total_loss, total_sample_acc = 0., 0.
for batch_ind, batch_train_data in enumerate(dataloader):
t_batch = time.time()
loss, sample_acc = train_step(batch_train_data, optimizer)
total_loss += loss
total_sample_acc += sample_acc
avg_loss = total_loss / (batch_ind + 1)
# scheduler
if hyper_parameters["scheduler"] == "ReduceLROnPlateau":
scheduler.step(avg_loss)
else:
scheduler.step()
avg_sample_acc = total_sample_acc / (batch_ind + 1)
batch_print_format = "\rproject: {}, run_name: {}, Epoch: {}/{}, batch: {}/{}, train_loss: {}, " + "t_sample_acc: {}," + "lr: {}, batch_time: {}, total_time: {} -------------"
print(batch_print_format.format(experiment_name, config["run_name"],
ep + 1, num_epoch,
batch_ind + 1, len(dataloader),
avg_loss,
avg_sample_acc,
optimizer.param_groups[0]['lr'],
time.time() - t_batch,
time.time() - t_ep,
), end="")
if config["logger"] == "wandb" and batch_ind % hyper_parameters["log_interval"] == 0:
logger.log({
"train_loss": avg_loss,
"train_small_shaking_seq_acc": avg_sample_acc,
"learning_rate": optimizer.param_groups[0]['lr'],
"time": time.time() - t_ep,
})
if config["logger"] != "wandb": # only log once for training if logger is not wandb
logger.log({
"train_loss": avg_loss,
"train_small_shaking_seq_acc": avg_sample_acc,
"learning_rate": optimizer.param_groups[0]['lr'],
"time": time.time() - t_ep,
})
def valid(dataloader, ep):
# valid
rel_extractor.eval()
t_ep = time.time()
total_sample_acc = 0.
# total_rel_correct_num, total_rel_pred_num, total_rel_gold_num = 0, 0, 0
# total_ent_correct_num, total_ent_pred_num, total_ent_gold_num = 0, 0, 0
total_cpg_dict = {}
for batch_ind, batch_valid_data in enumerate(tqdm(dataloader, desc = "Validating")):
sample_acc, cpg_dict = valid_step(batch_valid_data)
total_sample_acc += sample_acc
# init total_cpg_dict
for k in cpg_dict.keys():
if k not in total_cpg_dict:
total_cpg_dict[k] = [0, 0, 0]
for k, cpg in cpg_dict.items():
for idx, n in enumerate(cpg):
total_cpg_dict[k][idx] += cpg[idx]
# total_rel_correct_num += rel_cpg[0]
# total_rel_pred_num += rel_cpg[1]
# total_rel_gold_num += rel_cpg[2]
# total_ent_correct_num += ent_cpg[0]
# total_ent_pred_num += ent_cpg[1]
# total_ent_gold_num += ent_cpg[2]
avg_sample_acc = total_sample_acc / len(dataloader)
if "rel_cpg" in total_cpg_dict:
rel_prf = metrics.get_prf_scores(total_cpg_dict["rel_cpg"][0], total_cpg_dict["rel_cpg"][1], total_cpg_dict["rel_cpg"][2])
ent_prf = metrics.get_prf_scores(total_cpg_dict["ent_cpg"][0], total_cpg_dict["ent_cpg"][1], total_cpg_dict["ent_cpg"][2])
final_score = rel_prf[2]
log_dict = {
"val_shaking_tag_acc": avg_sample_acc,
"val_rel_prec": rel_prf[0],
"val_rel_recall": rel_prf[1],
"val_rel_f1": rel_prf[2],
"val_ent_prec": ent_prf[0],
"val_ent_recall": ent_prf[1],
"val_ent_f1": ent_prf[2],
"time": time.time() - t_ep,
}
elif "trigger_iden_cpg" in total_cpg_dict:
trigger_iden_prf = metrics.get_prf_scores(total_cpg_dict["trigger_iden_cpg"][0],
total_cpg_dict["trigger_iden_cpg"][1],
total_cpg_dict["trigger_iden_cpg"][2])
trigger_class_prf = metrics.get_prf_scores(total_cpg_dict["trigger_class_cpg"][0],
total_cpg_dict["trigger_class_cpg"][1],
total_cpg_dict["trigger_class_cpg"][2])
arg_iden_prf = metrics.get_prf_scores(total_cpg_dict["arg_iden_cpg"][0], total_cpg_dict["arg_iden_cpg"][1], total_cpg_dict["arg_iden_cpg"][2])
arg_class_prf = metrics.get_prf_scores(total_cpg_dict["arg_class_cpg"][0], total_cpg_dict["arg_class_cpg"][1], total_cpg_dict["arg_class_cpg"][2])
final_score = arg_class_prf[2]
log_dict = {
"val_shaking_tag_acc": avg_sample_acc,
"val_trigger_iden_prec": trigger_iden_prf[0],
"val_trigger_iden_recall": trigger_iden_prf[1],
"val_trigger_iden_f1": trigger_iden_prf[2],
"val_trigger_class_prec": trigger_class_prf[0],
"val_trigger_class_recall": trigger_class_prf[1],
"val_trigger_class_f1": trigger_class_prf[2],
"val_arg_iden_prec": arg_iden_prf[0],
"val_arg_iden_recall": arg_iden_prf[1],
"val_arg_iden_f1": arg_iden_prf[2],
"val_arg_class_prec": arg_class_prf[0],
"val_arg_class_recall": arg_class_prf[1],
"val_arg_class_f1": arg_class_prf[2],
"time": time.time() - t_ep,
}
logger.log(log_dict)
pprint(log_dict)
return final_score
for ep in range(num_epoch):
train(train_dataloader, ep)
valid_f1 = valid(valid_dataloader, ep)
global max_f1
if valid_f1 >= max_f1:
max_f1 = valid_f1
if valid_f1 > config["f1_2_save"]: # save the best model
modle_state_num = len(glob.glob(model_state_dict_dir + "/model_state_dict_*.pt"))
torch.save(rel_extractor.state_dict(), os.path.join(model_state_dict_dir, "model_state_dict_{}.pt".format(modle_state_num)))
# scheduler_state_num = len(glob.glob(schedule_state_dict_dir + "/scheduler_state_dict_*.pt"))
# torch.save(scheduler.state_dict(), os.path.join(schedule_state_dict_dir, "scheduler_state_dict_{}.pt".format(scheduler_state_num)))
print("Current avf_f1: {}, Best f1: {}".format(valid_f1, max_f1))
# In[ ]:
# optimizer
init_learning_rate = float(hyper_parameters["lr"])
optimizer = torch.optim.Adam(rel_extractor.parameters(), lr = init_learning_rate)
# In[ ]:
if hyper_parameters["scheduler"] == "CAWR":
T_mult = hyper_parameters["T_mult"]
rewarm_epoch_num = hyper_parameters["rewarm_epoch_num"]
scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, len(train_dataloader) * rewarm_epoch_num, T_mult)
elif hyper_parameters["scheduler"] == "Step":
decay_rate = hyper_parameters["decay_rate"]
decay_steps = hyper_parameters["decay_steps"]
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size = decay_steps, gamma = decay_rate)
elif hyper_parameters["scheduler"] == "ReduceLROnPlateau":
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, "min", verbose = True, patience = 6)
# In[ ]:
if not config["fr_scratch"]:
model_state_path = config["model_state_dict_path"]
rel_extractor.load_state_dict(torch.load(model_state_path))
print("------------model state {} loaded ----------------".format(model_state_path.split("/")[-1]))
train_n_valid(train_dataloader, valid_dataloader, optimizer, scheduler, hyper_parameters["epochs"])
| 26,441 | 37.266281 | 252 | py |
camel_tools | camel_tools-master/setup.py | # -*- coding: utf-8 -*-
# MIT License
#
# Copyright 2018-2022 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
from setuptools import setup
import sys
VERSION_FILE = os.path.join(os.path.dirname(__file__),
'camel_tools',
'VERSION')
with open(VERSION_FILE, encoding='utf-8') as version_fp:
VERSION = version_fp.read().strip()
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Information Technology',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: Arabic',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Text Processing',
'Topic :: Text Processing :: Linguistic',
]
DESCRIPTION = ('A suite of Arabic natural language processing tools developed '
'by the CAMeL Lab at New York University Abu Dhabi.')
README_FILE = os.path.join(os.path.dirname(__file__), 'README.rst')
with open(README_FILE, 'r', encoding='utf-8') as version_fp:
LONG_DESCRIPTION = version_fp.read().strip()
INSTALL_REQUIRES = [
'future',
'six',
'docopt',
'cachetools',
'numpy',
'scipy',
'pandas',
'scikit-learn',
'dill',
'torch>=1.3',
'transformers>=3.0.2',
'editdistance',
'requests',
'emoji',
'pyrsistent',
'tabulate',
'tqdm',
'muddler',
]
INSTALL_REQUIRES_NOT_WINDOWS = [
'camel-kenlm >= 2023.3.17.2 ; platform_system!="Windows"'
]
if sys.platform != 'win32':
INSTALL_REQUIRES.extend(INSTALL_REQUIRES_NOT_WINDOWS)
setup(
name='camel_tools',
version=VERSION,
author='Ossama W. Obeid',
author_email='oobeid@nyu.edu',
maintainer='Ossama W. Obeid',
maintainer_email='oobeid@nyu.edu',
packages=['camel_tools',
'camel_tools.cli',
'camel_tools.utils',
'camel_tools.morphology',
'camel_tools.disambig',
'camel_tools.disambig.bert',
'camel_tools.tokenizers',
'camel_tools.tagger',
'camel_tools.data',
'camel_tools.sentiment',
'camel_tools.dialectid',
'camel_tools.ner'],
package_data={
'camel_tools.utils': ['charmaps/*.json'],
},
include_package_data=True,
entry_points={
'console_scripts': [
('camel_transliterate='
'camel_tools.cli.camel_transliterate:main'),
('camel_arclean='
'camel_tools.cli.camel_arclean:main'),
('camel_morphology='
'camel_tools.cli.camel_morphology:main'),
('camel_dediac='
'camel_tools.cli.camel_dediac:main'),
('camel_word_tokenize='
'camel_tools.cli.camel_word_tokenize:main'),
('camel_diac='
'camel_tools.cli.camel_diac:main'),
('camel_data='
'camel_tools.cli.camel_data:main'),
],
},
url='https://github.com/CAMeL-Lab/CAMeL_Tools',
license='MIT',
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
classifiers=CLASSIFIERS,
install_requires=INSTALL_REQUIRES,
python_requires='>=3.7.0, <3.11'
)
| 4,817 | 32.227586 | 79 | py |
camel_tools | camel_tools-master/camel_tools/sentiment/__init__.py |
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright 2018-2022 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""This module contains the CAMeL Tools sentiment analyzer component.
"""
import torch
import torch.nn.functional as torch_fun
from torch.utils.data import Dataset, DataLoader
from transformers import BertTokenizer, BertForSequenceClassification
from camel_tools.data import CATALOGUE
_LABELS = ('positive', 'negative', 'neutral')
class SentimentDataset(Dataset):
"""Sentiment PyTorch Dataset
Args:
sentences (:obj:`list` of :obj:`list` of :obj:`str`): The input
sentences.
tokenizer (:obj:`PreTrainedTokenizer`): Bert's pretrained tokenizer.
max_seq_length (:obj:`int`): Maximum sentence length.
"""
def __init__(self, sentences, tokenizer, max_seq_length):
self.encoded_sents = tokenizer(sentences, add_special_tokens=True,
padding=True, max_length=max_seq_length,
truncation=True, return_tensors="pt")
def __getitem__(self, idx):
return {
'input_ids': self.encoded_sents.input_ids[idx],
'token_type_ids': self.encoded_sents.token_type_ids[idx],
'attention_mask': self.encoded_sents.attention_mask[idx]
}
def __len__(self):
return self.encoded_sents.input_ids.shape[0]
class SentimentAnalyzer:
"""CAMeL Tools sentiment analysis component.
Args:
model_path (:obj:`str`): The path to the fine-tuned model.
use_gpu (:obj:`bool`, optional): The flag to use a GPU or not.
Defaults to True.
"""
def __init__(self, model_path, use_gpu=True):
self.model = BertForSequenceClassification.from_pretrained(model_path)
self.tokenizer = BertTokenizer.from_pretrained(model_path)
self.labels_map = self.model.config.id2label
self.use_gpu = use_gpu
@staticmethod
def pretrained(model_name=None, use_gpu=True):
"""Load a pre-trained model provided with camel_tools.
Args:
model_name (:obj:`str`, optional): Name of pre-trained model to
load.
Two models are available: 'arabert' and 'mbert'.
If None, the default model ('arabert') will be loaded.
Defaults to None.
use_gpu (:obj:`bool`, optional): The flag to use a GPU or not.
Defaults to True.
Returns:
:obj:`SentimentAnalyzer`: Instance with loaded pre-trained model.
"""
if model_name is None:
model_name = CATALOGUE.components['SentimentAnalysis'].default
model_info = (CATALOGUE.components['SentimentAnalysis']
.datasets[model_name])
model_path = str(model_info.path)
return SentimentAnalyzer(model_path, use_gpu)
@staticmethod
def labels():
"""Get the list of possible sentiment labels returned by predictions.
Returns:
:obj:`list` of :obj:`str`: List of sentiment labels.
"""
return list(_LABELS)
def predict_sentence(self, sentence):
"""Predict the sentiment label of a single sentence.
Args:
sentence (:obj:`str`): Input sentence.
Returns:
:obj:`str`: The predicted sentiment label for given sentence.
"""
return self.predict([sentence])[0]
def predict(self, sentences, batch_size=32):
"""Predict the sentiment labels of a list of sentences.
Args:
sentences (:obj:`list` of :obj:`str`): Input sentences.
batch_size (:obj:`int`): The batch size.
Returns:
:obj:`list` of :obj:`str`: The predicted sentiment labels for given
sentences.
"""
sentiment_dataset = SentimentDataset(sentences, self.tokenizer,
max_seq_length=512)
data_loader = DataLoader(sentiment_dataset, batch_size=batch_size,
shuffle=False, drop_last=False)
device = ('cuda' if self.use_gpu and torch.cuda.is_available() else
'cpu')
self.model.to(device)
self.model.eval()
predicted_labels = []
with torch.no_grad():
for batch in data_loader:
batch = {k: v.to(device) for k, v in batch.items()}
inputs = {'input_ids': batch['input_ids'],
'token_type_ids': batch['token_type_ids'],
'attention_mask': batch['attention_mask']}
logits = self.model(**inputs)[0]
predictions = torch_fun.softmax(logits, dim=-1)
max_predictions = torch.argmax(predictions, dim=-1)
batch_preds = [self.labels_map[p.item()] for p in max_predictions]
predicted_labels.extend(batch_preds)
return predicted_labels
| 6,028 | 34.464706 | 82 | py |
camel_tools | camel_tools-master/camel_tools/ner/__init__.py | # -*- coding: utf-8 -*-
# MIT License
#
# Copyright 2018-2022 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""This module contains the CAMeL Tools Named Entity Recognition component.
"""
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, Dataset
from transformers import BertForTokenClassification, BertTokenizer
from camel_tools.data import CATALOGUE
_LABELS = ['B-LOC', 'B-ORG', 'B-PERS', 'B-MISC', 'I-LOC', 'I-ORG', 'I-PERS',
'I-MISC', 'O']
class _PrepSentence:
"""A single input sentence for token classification.
Args:
guid (:obj:`str`): Unique id for the sentence.
words (:obj:`list` of :obj:`str`): list of words of the sentence.
labels (:obj:`list` of :obj:`str`): The labels for each word
of the sentence.
"""
def __init__(self, guid, words, labels):
self.guid = guid
self.words = words
self.labels = labels
def _prepare_sentences(sentences):
"""
Encapsulates the input sentences into PrepSentence
objects.
Args:
sentences (:obj:`list` of :obj:`list` of :obj: `str): The input
sentences.
Returns:
:obj:`list` of :obj:`PrepSentence`: The list of PrepSentence objects.
"""
guid_index = 1
prepared_sentences = []
for words in sentences:
labels = ['O']*len(words)
prepared_sentences.append(_PrepSentence(guid=f"{guid_index}",
words=words,
labels=labels))
guid_index += 1
return prepared_sentences
class NERDataset(Dataset):
"""NER PyTorch Dataset
Args:
sentences (:obj:`list` of :obj:`list` of :obj:`str`): The input
sentences.
tokenizer (:obj:`PreTrainedTokenizer`): Bert's pretrained tokenizer.
labels (:obj:`list` of :obj:`str`): The labels which the model was
trained to classify.
max_seq_length (:obj:`int`): Maximum sentence length.
"""
def __init__(self, sentences, tokenizer, labels, max_seq_length):
prepared_sentences = _prepare_sentences(sentences)
# Use cross entropy ignore_index as padding label id so that only
# real label ids contribute to the loss later.
self.pad_token_label_id = nn.CrossEntropyLoss().ignore_index
self.features = self._featurize_input(
prepared_sentences,
labels,
max_seq_length,
tokenizer,
cls_token=tokenizer.cls_token,
sep_token=tokenizer.sep_token,
pad_token=tokenizer.pad_token_id,
pad_token_segment_id=tokenizer.pad_token_type_id,
pad_token_label_id=self.pad_token_label_id,
)
def _featurize_input(self, prepared_sentences, label_list, max_seq_length,
tokenizer, cls_token="[CLS]", cls_token_segment_id=0,
sep_token="[SEP]", pad_token=0, pad_token_segment_id=0,
pad_token_label_id=-100, sequence_a_segment_id=0,
mask_padding_with_zero=True):
"""Featurizes the input which will be fed to the fine-tuned BERT model.
Args:
prepared_sentences (:obj:`list` of :obj:`PrepSentence`): list of
PrepSentence objects.
label_list (:obj:`list` of :obj:`str`): The labels which the model
was trained to classify.
max_seq_length (:obj:`int`): Maximum sequence length.
tokenizer (:obj:`PreTrainedTokenizer`): Bert's pretrained
tokenizer.
cls_token (:obj:`str`): BERT's CLS token. Defaults to [CLS].
cls_token_segment_id (:obj:`int`): BERT's CLS token segment id.
Defaults to 0.
sep_token (:obj:`str`): BERT's CLS token. Defaults to [SEP].
pad_token (:obj:`int`): BERT's pading token. Defaults to 0.
pad_token_segment_id (:obj:`int`): BERT's pading token segment id.
Defaults to 0.
pad_token_label_id (:obj:`int`): BERT's pading token label id.
Defaults to -100.
sequence_a_segment_id (:obj:`int`): BERT's segment id.
Defaults to 0.
mask_padding_with_zero (:obj:`bool`): Whether to masks the padding
tokens with zero or not. Defaults to True.
Returns:
obj:`list` of :obj:`Dict`: list of dicts of the needed features.
"""
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for sent_id, sentence in enumerate(prepared_sentences):
tokens = []
label_ids = []
for word, label in zip(sentence.words, sentence.labels):
word_tokens = tokenizer.tokenize(word)
# bert-base-multilingual-cased sometimes output "nothing ([])
# when calling tokenize with just a space.
if len(word_tokens) > 0:
tokens.append(word_tokens)
# Use the real label id for the first token of the word,
# and padding ids for the remaining tokens
label_ids.append([label_map[label]] +
[pad_token_label_id] *
(len(word_tokens) - 1))
token_segments = []
token_segment = []
label_ids_segments = []
label_ids_segment = []
num_word_pieces = 0
seg_seq_length = max_seq_length - 2
# Dealing with empty sentences
if len(tokens) == 0:
data = self._add_special_tokens(token_segment,
label_ids_segment,
tokenizer,
max_seq_length,
cls_token,
sep_token, pad_token,
cls_token_segment_id,
pad_token_segment_id,
pad_token_label_id,
sequence_a_segment_id,
mask_padding_with_zero)
# Adding sentence id
data['sent_id'] = sent_id
features.append(data)
else:
# Chunking the tokenized sentence into multiple segments
# if it's longer than max_seq_length - 2
for idx, word_pieces in enumerate(tokens):
if num_word_pieces + len(word_pieces) > seg_seq_length:
data = self._add_special_tokens(token_segment,
label_ids_segment,
tokenizer,
max_seq_length,
cls_token,
sep_token, pad_token,
cls_token_segment_id,
pad_token_segment_id,
pad_token_label_id,
sequence_a_segment_id,
mask_padding_with_zero)
# Adding sentence id
data['sent_id'] = sent_id
features.append(data)
token_segments.append(token_segment)
label_ids_segments.append(label_ids_segment)
token_segment = list(word_pieces)
label_ids_segment = list(label_ids[idx])
num_word_pieces = len(word_pieces)
else:
token_segment.extend(word_pieces)
label_ids_segment.extend(label_ids[idx])
num_word_pieces += len(word_pieces)
# Adding the last segment
if len(token_segment) > 0:
data = self._add_special_tokens(token_segment,
label_ids_segment,
tokenizer,
max_seq_length,
cls_token,
sep_token, pad_token,
cls_token_segment_id,
pad_token_segment_id,
pad_token_label_id,
sequence_a_segment_id,
mask_padding_with_zero)
# Adding sentence id
data['sent_id'] = sent_id
features.append(data)
token_segments.append(token_segment)
label_ids_segments.append(label_ids_segment)
# DEBUG: Making sure we got all segments correctly
# assert sum([len(_) for _ in label_ids_segments]) == \
# sum([len(_) for _ in label_ids])
# assert sum([len(_) for _ in token_segments]) == \
# sum([len(_) for _ in tokens])
return features
def _add_special_tokens(self, tokens, label_ids, tokenizer, max_seq_length,
cls_token, sep_token, pad_token,
cls_token_segment_id, pad_token_segment_id,
pad_token_label_id, sequence_a_segment_id,
mask_padding_with_zero):
_tokens = list(tokens)
_label_ids = list(label_ids)
_tokens += [sep_token]
_label_ids += [pad_token_label_id]
segment_ids = [sequence_a_segment_id] * len(_tokens)
_tokens = [cls_token] + _tokens
_label_ids = [pad_token_label_id] + _label_ids
segment_ids = [cls_token_segment_id] + segment_ids
input_ids = tokenizer.convert_tokens_to_ids(_tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only
# real tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
_label_ids += [pad_token_label_id] * padding_length
return {'input_ids': torch.tensor(input_ids),
'attention_mask': torch.tensor(input_mask),
'token_type_ids': torch.tensor(segment_ids),
'label_ids': torch.tensor(_label_ids)}
def __len__(self):
return len(self.features)
def __getitem__(self, i):
return self.features[i]
class NERecognizer():
"""CAMeL Tools NER component.
Args:
model_path (:obj:`str`): The path to the fine-tuned model.
use_gpu (:obj:`bool`, optional): The flag to use a GPU or not.
Defaults to True.
"""
def __init__(self, model_path, use_gpu=True):
self.model = BertForTokenClassification.from_pretrained(model_path)
self.tokenizer = BertTokenizer.from_pretrained(model_path)
self.labels_map = self.model.config.id2label
self.use_gpu = use_gpu
@staticmethod
def pretrained(model_name=None, use_gpu=True):
"""Load a pre-trained model provided with camel_tools.
Args:
model_name (:obj:`str`, optional): Name of pre-trained model to
load. One model is available: 'arabert'.
If None, the default model ('arabert') will be loaded.
Defaults to None.
use_gpu (:obj:`bool`, optional): The flag to use a GPU or not.
Defaults to True.
Returns:
:obj:`NERecognizer`: Instance with loaded pre-trained model.
"""
if model_name is None:
model_name = CATALOGUE.components['NamedEntityRecognition'].default
model_info = (CATALOGUE.components['NamedEntityRecognition']
.datasets[model_name])
model_path = str(model_info.path)
return NERecognizer(model_path, use_gpu)
@staticmethod
def labels():
"""Get the list of NER labels returned by predictions.
Returns:
:obj:`list` of :obj:`str`: List of NER labels.
"""
return list(_LABELS)
def _align_predictions(self, predictions, label_ids, sent_ids):
"""Aligns the predictions of the model with the inputs and it takes
care of getting rid of the padding token.
Args:
predictions (:obj:`np.ndarray`): The predictions of the model
label_ids (:obj:`np.ndarray`): The label ids of the inputs.
They will always be the ids of Os since we're dealing with a
test dataset. Note that label_ids are also padded.
sent_ids (:obj:`np.ndarray`): The sent ids of the inputs.
Returns:
:obj:`list` of :obj:`list` of :obj:`str`: The predicted labels for
all the sentences in the batch
"""
preds = np.argmax(predictions, axis=2)
batch_size, seq_len = preds.shape
preds_list = [[] for _ in range(batch_size)]
for i in range(batch_size):
for j in range(seq_len):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
preds_list[i].append(self.labels_map[preds[i][j]])
# Collating the predicted labels based on the sentence ids
final_preds_list = [[] for _ in range(len(set(sent_ids)))]
for i, id in enumerate(sent_ids):
final_preds_list[id].extend(preds_list[i])
return final_preds_list
def predict(self, sentences, batch_size=32):
"""Predict the named entity labels of a list of sentences.
Args:
sentences (:obj:`list` of :obj:`list` of :obj:`str`): The input
sentences.
batch_size (:obj:`int`): The batch size.
Returns:
:obj:`list` of :obj:`list` of :obj:`str`: The predicted named
entity labels for the given sentences.
"""
if len(sentences) == 0:
return []
test_dataset = NERDataset(sentences=sentences,
tokenizer=self.tokenizer,
labels=list(self.labels_map.values()),
max_seq_length=256)
data_loader = DataLoader(test_dataset, batch_size=batch_size,
shuffle=False, drop_last=False)
label_ids = None
preds = None
sent_ids = None
device = ('cuda' if self.use_gpu and torch.cuda.is_available()
else 'cpu')
self.model.to(device)
self.model.eval()
with torch.no_grad():
for batch in data_loader:
batch = {k: v.to(device) for k, v in batch.items()}
inputs = {'input_ids': batch['input_ids'],
'token_type_ids': batch['token_type_ids'],
'attention_mask': batch['attention_mask']}
label_ids = (batch['label_ids'] if label_ids is None
else torch.cat((label_ids, batch['label_ids'])))
sent_ids = (batch['sent_id'] if sent_ids is None
else torch.cat((sent_ids, batch['sent_id'])))
logits = self.model(**inputs)[0]
preds = logits if preds is None else torch.cat((preds, logits),
dim=0)
predictions = self._align_predictions(preds.cpu().numpy(),
label_ids.cpu().numpy(),
sent_ids.cpu().numpy())
return predictions
def predict_sentence(self, sentence):
"""Predict the named entity labels of a single sentence.
Args:
sentence (:obj:`list` of :obj:`str`): The input sentence.
Returns:
:obj:`list` of :obj:`str`: The predicted named entity
labels for the given sentence.
"""
return self.predict([sentence])[0]
| 17,999 | 40.189931 | 79 | py |
camel_tools | camel_tools-master/camel_tools/disambig/bert/unfactored.py | # -*- coding: utf-8 -*-
# MIT License
#
# Copyright 2018-2022 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import json
from pathlib import Path
import pickle
from cachetools import LFUCache
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from transformers import BertForTokenClassification, BertTokenizer
from camel_tools.data import CATALOGUE
from camel_tools.morphology.database import MorphologyDB
from camel_tools.morphology.analyzer import Analyzer
from camel_tools.disambig.common import Disambiguator, DisambiguatedWord
from camel_tools.disambig.common import ScoredAnalysis
from camel_tools.disambig.bert._bert_morph_dataset import MorphDataset
from camel_tools.disambig.score_function import score_analysis_uniform
from camel_tools.disambig.score_function import FEATURE_SET_MAP
from camel_tools.utils.dediac import dediac_ar
_SCORING_FUNCTION_MAP = {
'uniform': score_analysis_uniform
}
def _read_json(f_path):
with open(f_path) as f:
return json.load(f)
def _dediac_sentence(sentence):
dediaced_sentence = []
for word in sentence:
dediaced = dediac_ar(word)
if len(dediaced) > 0:
dediaced_sentence.append(dediaced)
else:
dediaced_sentence.append(word)
return dediaced_sentence
class _BERTFeatureTagger:
"""A feature tagger based on the fine-tuned BERT architecture.
Args:
model_path (:obj:`str`): The path to the fine-tuned model.
use_gpu (:obj:`bool`, optional): The flag to use a GPU or not.
Defaults to True.
"""
def __init__(self, model_path, use_gpu=True):
self._model = BertForTokenClassification.from_pretrained(model_path)
self._tokenizer = BertTokenizer.from_pretrained(model_path)
self._labels_map = self._model.config.id2label
self._use_gpu = use_gpu
def labels(self):
"""Get the list of Morph labels returned by predictions.
Returns:
:obj:`list` of :obj:`str`: List of Morph labels.
"""
return list(self._labels_map.values())
def _align_predictions(self, predictions, label_ids, sent_ids):
"""Aligns the predictions of the model with the inputs and it takes
care of getting rid of the padding token.
Args:
predictions (:obj:`np.ndarray`): The predictions of the model
label_ids (:obj:`np.ndarray`): The label ids of the inputs.
They will always be the ids of Os since we're dealing with a
test dataset. Note that label_ids are also padded.
sent_ids (:obj:`np.ndarray`): The sent ids of the inputs.
Returns:
:obj:`list` of :obj:`list` of :obj:`str`: The predicted labels for
all the sentences in the batch
"""
preds = np.argmax(predictions, axis=2)
batch_size, seq_len = preds.shape
preds_list = [[] for _ in range(batch_size)]
for i in range(batch_size):
for j in range(seq_len):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
preds_list[i].append(self._labels_map[preds[i][j]])
# Collating the predicted labels based on the sentence ids
final_preds_list = [[] for _ in range(len(set(sent_ids)))]
for i, id in enumerate(sent_ids):
id = id - sent_ids[0]
final_preds_list[id].extend(preds_list[i])
return final_preds_list
def predict(self, sentences, batch_size=32, max_seq_length=512):
"""Predict the morphosyntactic labels of a list of sentences.
Args:
sentences (:obj:`list` of :obj:`list` of :obj:`str`): The input
sentences.
batch_size (:obj:`int`): The batch size.
max_seq_length (:obj:`int`): The max sequence size.
Returns:
:obj:`list` of :obj:`list` of :obj:`str`: The predicted
morphosyntactic labels for the given sentences.
"""
if len(sentences) == 0:
return []
sorted_sentences = list(enumerate(sentences))
sorted_sentences = sorted(sorted_sentences, key=lambda x: len(x[1]))
sorted_sentences_idx = [i[0] for i in sorted_sentences]
sorted_sentences_text = [i[1] for i in sorted_sentences]
test_dataset = MorphDataset(sentences=sorted_sentences_text,
tokenizer=self._tokenizer,
labels=list(self._labels_map.values()),
max_seq_length=max_seq_length)
data_loader = DataLoader(test_dataset, batch_size=batch_size,
shuffle=False, drop_last=False,
collate_fn=self._collate_fn)
predictions = []
device = ('cuda' if self._use_gpu and torch.cuda.is_available()
else 'cpu')
self._model.to(device)
self._model.eval()
with torch.no_grad():
for batch in data_loader:
batch = {k: v.to(device) for k, v in batch.items()}
inputs = {'input_ids': batch['input_ids'],
'token_type_ids': batch['token_type_ids'],
'attention_mask': batch['attention_mask']}
label_ids = batch['label_ids']
sent_ids = batch['sent_id']
logits = self._model(**inputs)[0]
preds = logits
prediction = self._align_predictions(preds.cpu().numpy(),
label_ids.cpu().numpy(),
sent_ids.cpu().numpy())
predictions.extend(prediction)
sorted_predictions_pair = zip(sorted_sentences_idx, predictions)
sorted_predictions = sorted(sorted_predictions_pair,
key=lambda x: x[0])
return [i[1] for i in sorted_predictions]
def _collate_fn(self, batch):
input_ids = []
token_type_ids = []
attention_mask = []
label_ids = []
sent_id = []
# Find max length within the batch
max_seq_length = 0
for sent in batch:
l = len(sent['input_ids'][sent['input_ids'].nonzero()].squeeze())
max_seq_length = max(max_seq_length, l)
# Truncate the unnecessary paddings
for sent in batch:
for _, t in sent.items():
if _ != 'sent_id':
sent[_] = t[:max_seq_length]
for sent in batch:
input_ids.append(sent['input_ids'])
token_type_ids.append(sent['token_type_ids'])
attention_mask.append(sent['attention_mask'])
label_ids.append(sent['label_ids'])
sent_id.append(sent['sent_id'])
return {
'input_ids': torch.stack(input_ids),
'token_type_ids': torch.stack(token_type_ids),
'attention_mask': torch.stack(attention_mask),
'label_ids': torch.stack(label_ids),
'sent_id': torch.tensor(sent_id, dtype=torch.int32),
}
class BERTUnfactoredDisambiguator(Disambiguator):
"""A disambiguator using an unfactored BERT model. This model is based on
*Morphosyntactic Tagging with Pre-trained Language Models for Arabic and
its Dialects* by Inoue, Khalifa, and Habash. Findings of ACL 2022.
(https://arxiv.org/abs/2110.06852)
Args:
model_path (:obj:`str`): The path to the fine-tuned model.
analyzer (:obj:`~camel_tools.morphology.analyzer.Analyzer`): Analyzer
to use for providing full morphological analysis of a word.
features: :obj:`list`, optional): A list of morphological features
used in the model. Defaults to 14 features.
top (:obj:`int`, optional): The maximum number of top analyses to
return. Defaults to 1.
scorer (:obj:`str`, optional): The scoring function that computes
matches between the predicted features from the model and the
output from the analyzer. If `uniform`, the scoring based on the
uniform weight is used. Defaults to `uniform`.
tie_breaker (:obj:`str`, optional): The tie breaker used in the feature
match function. If `tag`, tie breaking based on the unfactored tag
MLE and factored tag MLE is used. Defaults to `tag`.
use_gpu (:obj:`bool`, optional): The flag to use a GPU or not.
Defaults to True.
batch_size (:obj:`int`, optional): The batch size. Defaults to 32.
ranking_cache (:obj:`LFUCache`, optional): The cache of pre-computed
scored analyses. Defaults to `None`.
ranking_cache_size (:obj:`int`, optional): The number of unique word
disambiguations to cache. If 0, no ranked analyses will be cached.
The cache uses a least-frequently-used eviction policy.
Defaults to 100000.
"""
def __init__(self, model_path, analyzer,
features=FEATURE_SET_MAP['feats_14'], top=1,
scorer='uniform', tie_breaker='tag', use_gpu=True,
batch_size=32, ranking_cache=None, ranking_cache_size=100000):
self._model = {
'unfactored': _BERTFeatureTagger(model_path, use_gpu=use_gpu)
}
self._analyzer = analyzer
self._features = features
self._top = max(top, 1)
self._scorer = _SCORING_FUNCTION_MAP.get(scorer, None)
self._tie_breaker = tie_breaker
self._use_gpu = use_gpu
self._batch_size = batch_size
self._mle = _read_json(f'{model_path}/mle_model.json')
if ranking_cache is None:
if ranking_cache_size <= 0:
self._ranking_cache = None
self._disambiguate_word_fn = self._disambiguate_word
else:
self._ranking_cache = LFUCache(ranking_cache_size)
self._disambiguate_word_fn = self._disambiguate_word_cached
else:
self._ranking_cache = ranking_cache
self._disambiguate_word_fn = self._disambiguate_word_cached
@staticmethod
def pretrained(model_name='msa', top=1, use_gpu=True, batch_size=32,
cache_size=10000, pretrained_cache=True,
ranking_cache_size=100000):
"""Load a pre-trained model provided with camel_tools.
Args:
model_name (:obj:`str`, optional): Name of pre-trained model to
load. Three models are available: 'msa', 'egy', and 'glf.
Defaults to `msa`.
top (:obj:`int`, optional): The maximum number of top analyses to
return. Defaults to 1.
use_gpu (:obj:`bool`, optional): The flag to use a GPU or not.
Defaults to True.
batch_size (:obj:`int`, optional): The batch size. Defaults to 32.
cache_size (:obj:`int`, optional): If greater than zero, then
the analyzer will cache the analyses for the cache_size most
frequent words, otherwise no analyses will be cached.
Defaults to 100000.
pretrained_cache (:obj:`bool`, optional): The flag to use a
pretrained cache that stores ranked analyses.
Defaults to True.
ranking_cache_size (:obj:`int`, optional): The number of unique
word disambiguations to cache. If 0, no ranked analyses will be
cached. The cache uses a least-frequently-used eviction policy.
This argument is ignored if pretrained_cache is True.
Defaults to 100000.
Returns:
:obj:`BERTUnfactoredDisambiguator`: Instance with loaded
pre-trained model.
"""
model_info = CATALOGUE.get_dataset('DisambigBertUnfactored',
model_name)
model_config = _read_json(Path(model_info.path, 'default_config.json'))
model_path = str(model_info.path)
features = FEATURE_SET_MAP[model_config['feature']]
db = MorphologyDB.builtin_db(model_config['db_name'], 'a')
analyzer = Analyzer(db, backoff=model_config['backoff'],
cache_size=cache_size)
scorer = model_config['scorer']
tie_breaker = model_config['tie_breaker']
if pretrained_cache:
cache_info = CATALOGUE.get_dataset('DisambigRankingCache',
model_config['ranking_cache'])
cache_path = Path(cache_info.path, 'default_cache.pickle')
with open(cache_path, 'rb') as f:
ranking_cache = pickle.load(f)
else:
ranking_cache = None
return BERTUnfactoredDisambiguator(
model_path,
analyzer,
top=top,
features=features,
scorer=scorer,
tie_breaker=tie_breaker,
use_gpu=use_gpu,
batch_size=batch_size,
ranking_cache=ranking_cache,
ranking_cache_size=ranking_cache_size)
@staticmethod
def _pretrained_from_config(config, top=1, use_gpu=True, batch_size=32,
cache_size=10000, pretrained_cache=True,
ranking_cache_size=100000):
"""Load a pre-trained model from a config file.
Args:
config (:obj:`str`): Config file that defines the model details.
Defaults to `None`.
top (:obj:`int`, optional): The maximum number of top analyses
to return. Defaults to 1.
use_gpu (:obj:`bool`, optional): The flag to use a GPU or not.
Defaults to True.
batch_size (:obj:`int`, optional): The batch size. Defaults to 32.
cache_size (:obj:`int`, optional): If greater than zero, then
the analyzer will cache the analyses for the cache_size
most frequent words, otherwise no analyses will be cached.
Defaults to 100000.
pretrained_cache (:obj:`bool`, optional): The flag to use a
pretrained cache that stores ranked analyses.
Defaults to True.
ranking_cache_size (:obj:`int`, optional): The number of unique
word disambiguations to cache. If 0, no ranked analyses will be
cached. The cache uses a least-frequently-used eviction policy.
This argument is ignored if pretrained_cache is True.
Defaults to 100000.
Returns:
:obj:`BERTUnfactoredDisambiguator`: Instance with loaded
pre-trained model.
"""
model_config = _read_json(config)
model_path = model_config['model_path']
features = FEATURE_SET_MAP[model_config['feature']]
db = MorphologyDB(model_config['db_path'], 'a')
analyzer = Analyzer(db,
backoff=model_config['backoff'],
cache_size=cache_size)
scorer = model_config['scorer']
tie_breaker = model_config['tie_breaker']
if pretrained_cache:
cache_path = model_config['ranking_cache']
with open(cache_path, 'rb') as f:
ranking_cache = pickle.load(f)
else:
ranking_cache = None
return BERTUnfactoredDisambiguator(
model_path,
analyzer,
top=top,
features=features,
scorer=scorer,
tie_breaker=tie_breaker,
use_gpu=use_gpu,
batch_size=batch_size,
ranking_cache=ranking_cache,
ranking_cache_size=ranking_cache_size)
def _predict_sentences(self, sentences):
"""Predict the morphosyntactic labels of a list of sentences.
Args:
sentences (:obj:`list` of :obj:`list` of :obj:`str`): The input
sentences.
Returns:
:obj:`list` of :obj:`list` of :obj:`dict`: The predicted
morphosyntactic labels for the given sentences.
"""
preds = self._model['unfactored'].predict(sentences, self._batch_size)
parsed_predictions = []
for sent, pred in zip(sentences, preds):
parsed_prediction = []
for word, pred in zip(sent, pred):
d = {}
for feat in pred.split('__'):
f, v = feat.split(':')
d[f] = v
d['lex'] = word # Copy the word when analyzer is not used
d['diac'] = word # Copy the word when analyzer is not used
parsed_prediction.append(d)
parsed_predictions.append(parsed_prediction)
return parsed_predictions
def _predict_sentence(self, sentence):
"""Predict the morphosyntactic labels of a single sentence.
Args:
sentence (:obj:`list` of :obj:`str`): The input sentence.
Returns:
:obj:`list` of :obj:`dict`: The predicted morphosyntactic labels
for the given sentence.
"""
parsed_predictions = []
model = self._model['unfactored']
preds = model.predict([sentence], self._batch_size)[0]
for word, pred in zip(sentence, preds):
d = {}
for feat in pred.split('__'):
f, v = feat.split(':')
d[f] = v
d['lex'] = word # Copy the word when analyzer is not used
d['diac'] = word # Copy the word when analyzer is not used
parsed_predictions.append(d)
return parsed_predictions
def _scored_analyses(self, word_dd, prediction):
bert_analysis = prediction
analyses = self._analyzer.analyze(word_dd)
if len(analyses) == 0:
# If the word is not found in the analyzer,
# return the predictions from BERT
return [ScoredAnalysis(0, # score
bert_analysis, # analysis
word_dd, # diac
-99, # pos_lex_logprob
-99, # lex_logprob
)]
scored = [(self._scorer(a,
bert_analysis,
self._mle,
tie_breaker=self._tie_breaker,
features=self._features), a)
for a in analyses]
max_score = max(s[0] for s in scored)
if max_score == 0:
max_score = 1
scored_analyses = [
ScoredAnalysis(
s / max_score, # score
a, # analysis
a['diac'], # diac
a.get('pos_lex_logprob', -99), # pos_lex_logprob
a.get('lex_logprob', -99), # lex_logprob
) for s, a in scored]
scored_analyses.sort()
return scored_analyses
def _disambiguate_word(self, word, pred):
scored_analyses = self._scored_analyses(word, pred)
return DisambiguatedWord(word, scored_analyses[:self._top])
def _disambiguate_word_cached(self, word, pred):
# Create a key for caching scored analysis given word and bert
# predictions
key = (word, tuple(pred[feat] for feat in self._features))
if key in self._ranking_cache:
scored_analyses = self._ranking_cache[key]
else:
scored_analyses = self._scored_analyses(word, pred)
self._ranking_cache[key] = scored_analyses
return DisambiguatedWord(word, scored_analyses[:self._top])
def disambiguate_word(self, sentence, word_ndx):
"""Disambiguates a single word of a sentence.
Args:
sentence (:obj:`list` of :obj:`str`): The input sentence.
word_ndx (:obj:`int`): The index of the word token in `sentence` to
disambiguate.
Returns:
:obj:`~camel_tools.disambig.common.DisambiguatedWord`: The
disambiguation of the word token in `sentence` at `word_ndx`.
"""
return self.disambiguate(sentence)[word_ndx]
def disambiguate(self, sentence):
"""Disambiguate all words of a single sentence.
Args:
sentence (:obj:`list` of :obj:`str`): The input sentence.
Returns:
:obj:`list` of :obj:`~camel_tools.disambig.common.DisambiguatedWord`: The
disambiguated analyses for the given sentence.
"""
dediaced_sentence = _dediac_sentence(sentence)
predictions = self._predict_sentence(dediaced_sentence)
return [self._disambiguate_word_fn(w, p)
for (w, p) in zip(sentence, predictions)]
def disambiguate_sentences(self, sentences):
"""Disambiguate all words of a list of sentences.
Args:
sentences (:obj:`list` of :obj:`list` of :obj:`str`): The input
sentences.
Returns:
:obj:`list` of :obj:`list` of :obj:`~camel_tools.disambig.common.DisambiguatedWord`: The
disambiguated analyses for the given sentences.
"""
dediaced_sentences = [_dediac_sentence(s) for s in sentences]
predictions = self._predict_sentences(dediaced_sentences)
disambiguated_sentences = []
for sentence, prediction in zip(sentences, predictions):
disambiguated_sentence = [
self._disambiguate_word_fn(w, p)
for (w, p) in zip(sentence, prediction)
]
disambiguated_sentences.append(disambiguated_sentence)
return disambiguated_sentences
def tag_sentences(self, sentences, use_analyzer=True):
"""Predict the morphosyntactic labels of a list of sentences.
Args:
sentences (:obj:`list` of :obj:`list` of :obj:`str`): The input
sentences.
use_analyzer (:obj:`bool`): The flag to use an analyzer or not.
If set to False, we return the original input as diac and lex.
Defaults to True.
Returns:
:obj:`list` of :obj:`list` of :obj:`dict`: The predicted The list
of feature tags for each word in the given sentences
"""
if use_analyzer:
tagged_sentences = []
for prediction in self.disambiguate_sentences(sentences):
tagged_sentence = [a.analyses[0].analysis for a in prediction]
tagged_sentences.append(tagged_sentence)
return tagged_sentences
return self._predict_sentences(sentences)
def tag_sentence(self, sentence, use_analyzer=True):
"""Predict the morphosyntactic labels of a single sentence.
Args:
sentence (:obj:`list` of :obj:`str`): The list of space and
punctuation seperated list of tokens comprising a given
sentence.
use_analyzer (:obj:`bool`): The flag to use an analyzer or not.
If set to False, we return the original input as diac and lex.
Defaults to True.
Returns:
:obj:`list` of :obj:`dict`: The list of feature tags for each word
in the given sentence
"""
if use_analyzer:
return [a.analyses[0].analysis
for a in self.disambiguate(sentence)]
return self._predict_sentence(sentence)
def all_feats(self):
"""Return a set of all features produced by this disambiguator.
Returns:
:obj:`frozenset` of :obj:`str`: The set all features produced by
this disambiguator.
"""
return self._analyzer.all_feats()
def tok_feats(self):
"""Return a set of tokenization features produced by this
disambiguator.
Returns:
:obj:`frozenset` of :obj:`str`: The set tokenization features
produced by this disambiguator.
"""
return self._analyzer.tok_feats()
| 25,500 | 38.536434 | 100 | py |
camel_tools | camel_tools-master/camel_tools/disambig/bert/_bert_morph_dataset.py | # -*- coding: utf-8 -*-
# MIT License
#
# Copyright 2018-2022 New York University Abu Dhabi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import torch
import torch.nn as nn
from torch.utils.data import Dataset
def _prepare_sentences(sentences, placeholder=''):
"""
Encapsulates the input sentences into PrepSentence
objects.
Args:
sentences (:obj:`list` of :obj:`list` of :obj: `str): The input
sentences.
Returns:
:obj:`list` of :obj:`PrepSentence`: The list of PrepSentence objects.
"""
guid_index = 1
prepared_sentences = []
for words in sentences:
labels = [placeholder]*len(words)
prepared_sentences.append(_PrepSentence(guid=f"{guid_index}",
words=words,
labels=labels))
guid_index += 1
return prepared_sentences
class _PrepSentence:
"""A single input sentence for token classification.
Args:
guid (:obj:`str`): Unique id for the sentence.
words (:obj:`list` of :obj:`str`): list of words of the sentence.
labels (:obj:`list` of :obj:`str`): The labels for each word
of the sentence.
"""
def __init__(self, guid, words, labels):
self.guid = guid
self.words = words
self.labels = labels
class MorphDataset(Dataset):
"""Morph PyTorch Dataset
Args:
sentences (:obj:`list` of :obj:`list` of :obj:`str`): The input
sentences.
tokenizer (:obj:`PreTrainedTokenizer`): Bert's pretrained tokenizer.
labels (:obj:`list` of :obj:`str`): The labels which the model was
trained to classify.
max_seq_length (:obj:`int`): Maximum sentence length.
"""
def __init__(self, sentences, tokenizer, labels, max_seq_length):
prepared_sentences = _prepare_sentences(sentences,
placeholder=labels[0])
# Use cross entropy ignore_index as padding label id so that only
# real label ids contribute to the loss later.
self.pad_token_label_id = nn.CrossEntropyLoss().ignore_index
self.features = self._featurize_input(
prepared_sentences,
labels,
max_seq_length,
tokenizer,
cls_token=tokenizer.cls_token,
sep_token=tokenizer.sep_token,
pad_token=tokenizer.pad_token_id,
pad_token_segment_id=tokenizer.pad_token_type_id,
pad_token_label_id=self.pad_token_label_id,
)
def _featurize_input(self, prepared_sentences, label_list, max_seq_length,
tokenizer, cls_token="[CLS]", cls_token_segment_id=0,
sep_token="[SEP]", pad_token=0, pad_token_segment_id=0,
pad_token_label_id=-100, sequence_a_segment_id=0,
mask_padding_with_zero=True):
"""Featurizes the input which will be fed to the fine-tuned BERT model.
Args:
prepared_sentences (:obj:`list` of :obj:`PrepSentence`): list of
PrepSentence objects.
label_list (:obj:`list` of :obj:`str`): The labels which the model
was trained to classify.
max_seq_length (:obj:`int`): Maximum sequence length.
tokenizer (:obj:`PreTrainedTokenizer`): Bert's pretrained
tokenizer.
cls_token (:obj:`str`): BERT's CLS token. Defaults to [CLS].
cls_token_segment_id (:obj:`int`): BERT's CLS token segment id.
Defaults to 0.
sep_token (:obj:`str`): BERT's CLS token. Defaults to [SEP].
pad_token (:obj:`int`): BERT's pading token. Defaults to 0.
pad_token_segment_id (:obj:`int`): BERT's pading token segment id.
Defaults to 0.
pad_token_label_id (:obj:`int`): BERT's pading token label id.
Defaults to -100.
sequence_a_segment_id (:obj:`int`): BERT's segment id.
Defaults to 0.
mask_padding_with_zero (:obj:`bool`): Whether to masks the padding
tokens with zero or not. Defaults to True.
Returns:
obj:`list` of :obj:`Dict`: list of dicts of the needed features.
"""
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for sent_id, sentence in enumerate(prepared_sentences):
tokens = []
label_ids = []
for word, label in zip(sentence.words, sentence.labels):
word_tokens = tokenizer.tokenize(word)
# bert-base-multilingual-cased sometimes output "nothing ([])
# when calling tokenize with just a space.
if len(word_tokens) > 0:
tokens.append(word_tokens)
# Use the real label id for the first token of the word,
# and padding ids for the remaining tokens
label_ids.append([label_map[label]] +
[pad_token_label_id] *
(len(word_tokens) - 1))
token_segments = []
token_segment = []
label_ids_segments = []
label_ids_segment = []
num_word_pieces = 0
seg_seq_length = max_seq_length - 2
# Dealing with empty sentences
if len(tokens) == 0:
data = self._add_special_tokens(token_segment,
label_ids_segment,
tokenizer,
max_seq_length,
cls_token,
sep_token, pad_token,
cls_token_segment_id,
pad_token_segment_id,
pad_token_label_id,
sequence_a_segment_id,
mask_padding_with_zero)
# Adding sentence id
data['sent_id'] = sent_id
features.append(data)
else:
# Chunking the tokenized sentence into multiple segments
# if it's longer than max_seq_length - 2
for idx, word_pieces in enumerate(tokens):
if num_word_pieces + len(word_pieces) > seg_seq_length:
data = self._add_special_tokens(token_segment,
label_ids_segment,
tokenizer,
max_seq_length,
cls_token,
sep_token, pad_token,
cls_token_segment_id,
pad_token_segment_id,
pad_token_label_id,
sequence_a_segment_id,
mask_padding_with_zero)
# Adding sentence id
data['sent_id'] = sent_id
features.append(data)
token_segments.append(token_segment)
label_ids_segments.append(label_ids_segment)
token_segment = list(word_pieces)
label_ids_segment = list(label_ids[idx])
num_word_pieces = len(word_pieces)
else:
token_segment.extend(word_pieces)
label_ids_segment.extend(label_ids[idx])
num_word_pieces += len(word_pieces)
# Adding the last segment
if len(token_segment) > 0:
data = self._add_special_tokens(token_segment,
label_ids_segment,
tokenizer,
max_seq_length,
cls_token,
sep_token, pad_token,
cls_token_segment_id,
pad_token_segment_id,
pad_token_label_id,
sequence_a_segment_id,
mask_padding_with_zero)
# Adding sentence id
data['sent_id'] = sent_id
features.append(data)
token_segments.append(token_segment)
label_ids_segments.append(label_ids_segment)
# DEBUG: Making sure we got all segments correctly
# assert sum([len(_) for _ in label_ids_segments]) == \
# sum([len(_) for _ in label_ids])
# assert sum([len(_) for _ in token_segments]) == \
# sum([len(_) for _ in tokens])
return features
def _add_special_tokens(self, tokens, label_ids, tokenizer, max_seq_length,
cls_token, sep_token, pad_token,
cls_token_segment_id, pad_token_segment_id,
pad_token_label_id, sequence_a_segment_id,
mask_padding_with_zero):
_tokens = list(tokens)
_label_ids = list(label_ids)
_tokens += [sep_token]
_label_ids += [pad_token_label_id]
segment_ids = [sequence_a_segment_id] * len(_tokens)
_tokens = [cls_token] + _tokens
_label_ids = [pad_token_label_id] + _label_ids
segment_ids = [cls_token_segment_id] + segment_ids
input_ids = tokenizer.convert_tokens_to_ids(_tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only
# real tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
_label_ids += [pad_token_label_id] * padding_length
return {'input_ids': torch.tensor(input_ids),
'attention_mask': torch.tensor(input_mask),
'token_type_ids': torch.tensor(segment_ids),
'label_ids': torch.tensor(_label_ids)}
def __len__(self):
return len(self.features)
def __getitem__(self, i):
return self.features[i]
| 12,226 | 43.300725 | 79 | py |
cqr | cqr-master/cqr/torch_models.py |
import sys
import copy
import torch
import numpy as np
import torch.nn as nn
from cqr import helper
from sklearn.model_selection import train_test_split
if torch.cuda.is_available():
device = "cuda:0"
else:
device = "cpu"
###############################################################################
# Helper functions
###############################################################################
def epoch_internal_train(model, loss_func, x_train, y_train, batch_size, optimizer, cnt=0, best_cnt=np.Inf):
""" Sweep over the data and update the model's parameters
Parameters
----------
model : class of neural net model
loss_func : class of loss function
x_train : pytorch tensor n training features, each of dimension p (nXp)
batch_size : integer, size of the mini-batch
optimizer : class of SGD solver
cnt : integer, counting the gradient steps
best_cnt: integer, stop the training if current cnt > best_cnt
Returns
-------
epoch_loss : mean loss value
cnt : integer, cumulative number of gradient steps
"""
model.train()
shuffle_idx = np.arange(x_train.shape[0])
np.random.shuffle(shuffle_idx)
x_train = x_train[shuffle_idx]
y_train = y_train[shuffle_idx]
epoch_losses = []
for idx in range(0, x_train.shape[0], batch_size):
cnt = cnt + 1
optimizer.zero_grad()
batch_x = x_train[idx : min(idx + batch_size, x_train.shape[0]),:]
batch_y = y_train[idx : min(idx + batch_size, y_train.shape[0])]
preds = model(batch_x)
loss = loss_func(preds, batch_y)
loss.backward()
optimizer.step()
epoch_losses.append(loss.cpu().detach().numpy())
if cnt >= best_cnt:
break
epoch_loss = np.mean(epoch_losses)
return epoch_loss, cnt
def rearrange(all_quantiles, quantile_low, quantile_high, test_preds):
""" Produce monotonic quantiles
Parameters
----------
all_quantiles : numpy array (q), grid of quantile levels in the range (0,1)
quantile_low : float, desired low quantile in the range (0,1)
quantile_high : float, desired high quantile in the range (0,1)
test_preds : numpy array of predicted quantile (nXq)
Returns
-------
q_fixed : numpy array (nX2), containing the rearranged estimates of the
desired low and high quantile
References
----------
.. [1] Chernozhukov, Victor, Iván Fernández‐Val, and Alfred Galichon.
"Quantile and probability curves without crossing."
Econometrica 78.3 (2010): 1093-1125.
"""
scaling = all_quantiles[-1] - all_quantiles[0]
low_val = (quantile_low - all_quantiles[0])/scaling
high_val = (quantile_high - all_quantiles[0])/scaling
q_fixed = np.quantile(test_preds,(low_val, high_val),interpolation='linear',axis=1)
return q_fixed.T
###############################################################################
# Deep conditional mean regression
# Minimizing MSE loss
###############################################################################
# Define the network
class mse_model(nn.Module):
""" Conditional mean estimator, formulated as neural net
"""
def __init__(self,
in_shape=1,
hidden_size=64,
dropout=0.5):
""" Initialization
Parameters
----------
in_shape : integer, input signal dimension (p)
hidden_size : integer, hidden layer dimension
dropout : float, dropout rate
"""
super().__init__()
self.in_shape = in_shape
self.out_shape = 1
self.hidden_size = hidden_size
self.dropout = dropout
self.build_model()
self.init_weights()
def build_model(self):
""" Construct the network
"""
self.base_model = nn.Sequential(
nn.Linear(self.in_shape, self.hidden_size),
nn.ReLU(),
nn.Dropout(self.dropout),
nn.Linear(self.hidden_size, self.hidden_size),
nn.ReLU(),
nn.Dropout(self.dropout),
nn.Linear(self.hidden_size, 1),
)
def init_weights(self):
""" Initialize the network parameters
"""
for m in self.base_model:
if isinstance(m, nn.Linear):
nn.init.orthogonal_(m.weight)
nn.init.constant_(m.bias, 0)
def forward(self, x):
""" Run forward pass
"""
return torch.squeeze(self.base_model(x))
# Define the training procedure
class LearnerOptimized:
""" Fit a neural network (conditional mean) to training data
"""
def __init__(self, model, optimizer_class, loss_func, device='cpu', test_ratio=0.2, random_state=0):
""" Initialization
Parameters
----------
model : class of neural network model
optimizer_class : class of SGD optimizer (e.g. Adam)
loss_func : loss to minimize
device : string, "cuda:0" or "cpu"
test_ratio : float, test size used in cross-validation (CV)
random_state : int, seed to be used in CV when splitting to train-test
"""
self.model = model.to(device)
self.optimizer_class = optimizer_class
self.optimizer = optimizer_class(self.model.parameters())
self.loss_func = loss_func.to(device)
self.device = device
self.test_ratio = test_ratio
self.random_state = random_state
self.loss_history = []
self.test_loss_history = []
self.full_loss_history = []
def fit(self, x, y, epochs, batch_size, verbose=False):
""" Fit the model to data
Parameters
----------
x : numpy array, containing the training features (nXp)
y : numpy array, containing the training labels (n)
epochs : integer, maximal number of epochs
batch_size : integer, mini-batch size for SGD
"""
sys.stdout.flush()
model = copy.deepcopy(self.model)
model = model.to(device)
optimizer = self.optimizer_class(model.parameters())
best_epoch = epochs
x_train, xx, y_train, yy = train_test_split(x, y, test_size=self.test_ratio,random_state=self.random_state)
x_train = torch.from_numpy(x_train).float().to(self.device).requires_grad_(False)
xx = torch.from_numpy(xx).float().to(self.device).requires_grad_(False)
y_train = torch.from_numpy(y_train).float().to(self.device).requires_grad_(False)
yy = torch.from_numpy(yy).float().to(self.device).requires_grad_(False)
best_cnt = 1e10
best_test_epoch_loss = 1e10
cnt = 0
for e in range(epochs):
epoch_loss, cnt = epoch_internal_train(model, self.loss_func, x_train, y_train, batch_size, optimizer, cnt)
self.loss_history.append(epoch_loss)
# test
model.eval()
preds = model(xx)
test_preds = preds.cpu().detach().numpy()
test_preds = np.squeeze(test_preds)
test_epoch_loss = self.loss_func(preds, yy).cpu().detach().numpy()
self.test_loss_history.append(test_epoch_loss)
if (test_epoch_loss <= best_test_epoch_loss):
best_test_epoch_loss = test_epoch_loss
best_epoch = e
best_cnt = cnt
if (e+1) % 100 == 0 and verbose:
print("CV: Epoch {}: Train {}, Test {}, Best epoch {}, Best loss {}".format(e+1, epoch_loss, test_epoch_loss, best_epoch, best_test_epoch_loss))
sys.stdout.flush()
# use all the data to train the model, for best_cnt steps
x = torch.from_numpy(x).float().to(self.device).requires_grad_(False)
y = torch.from_numpy(y).float().to(self.device).requires_grad_(False)
cnt = 0
for e in range(best_epoch+1):
if cnt > best_cnt:
break
epoch_loss, cnt = epoch_internal_train(self.model, self.loss_func, x, y, batch_size, self.optimizer, cnt, best_cnt)
self.full_loss_history.append(epoch_loss)
if (e+1) % 100 == 0 and verbose:
print("Full: Epoch {}: {}, cnt {}".format(e+1, epoch_loss, cnt))
sys.stdout.flush()
def predict(self, x):
""" Estimate the label given the features
Parameters
----------
x : numpy array of training features (nXp)
Returns
-------
ret_val : numpy array of predicted labels (n)
"""
self.model.eval()
ret_val = self.model(torch.from_numpy(x).to(self.device).requires_grad_(False)).cpu().detach().numpy()
return ret_val
##############################################################################
# Quantile regression
# Implementation inspired by:
# https://github.com/ceshine/quantile-regression-tensorflow
##############################################################################
class AllQuantileLoss(nn.Module):
""" Pinball loss function
"""
def __init__(self, quantiles):
""" Initialize
Parameters
----------
quantiles : pytorch vector of quantile levels, each in the range (0,1)
"""
super().__init__()
self.quantiles = quantiles
def forward(self, preds, target):
""" Compute the pinball loss
Parameters
----------
preds : pytorch tensor of estimated labels (n)
target : pytorch tensor of true labels (n)
Returns
-------
loss : cost function value
"""
assert not target.requires_grad
assert preds.size(0) == target.size(0)
losses = []
for i, q in enumerate(self.quantiles):
errors = target - preds[:, i]
losses.append(torch.max((q-1) * errors, q * errors).unsqueeze(1))
loss = torch.mean(torch.sum(torch.cat(losses, dim=1), dim=1))
return loss
class all_q_model(nn.Module):
""" Conditional quantile estimator, formulated as neural net
"""
def __init__(self,
quantiles,
in_shape=1,
hidden_size=64,
dropout=0.5):
""" Initialization
Parameters
----------
quantiles : numpy array of quantile levels (q), each in the range (0,1)
in_shape : integer, input signal dimension (p)
hidden_size : integer, hidden layer dimension
dropout : float, dropout rate
"""
super().__init__()
self.quantiles = quantiles
self.num_quantiles = len(quantiles)
self.hidden_size = hidden_size
self.in_shape = in_shape
self.out_shape = len(quantiles)
self.dropout = dropout
self.build_model()
self.init_weights()
def build_model(self):
""" Construct the network
"""
self.base_model = nn.Sequential(
nn.Linear(self.in_shape, self.hidden_size),
nn.ReLU(),
nn.Dropout(self.dropout),
nn.Linear(self.hidden_size, self.hidden_size),
nn.ReLU(),
nn.Dropout(self.dropout),
nn.Linear(self.hidden_size, self.num_quantiles),
)
def init_weights(self):
""" Initialize the network parameters
"""
for m in self.base_model:
if isinstance(m, nn.Linear):
nn.init.orthogonal_(m.weight)
nn.init.constant_(m.bias, 0)
def forward(self, x):
""" Run forward pass
"""
return self.base_model(x)
class LearnerOptimizedCrossing:
""" Fit a neural network (conditional quantile) to training data
"""
def __init__(self, model, optimizer_class, loss_func, device='cpu', test_ratio=0.2, random_state=0,
qlow=0.05, qhigh=0.95, use_rearrangement=False):
""" Initialization
Parameters
----------
model : class of neural network model
optimizer_class : class of SGD optimizer (e.g. pytorch's Adam)
loss_func : loss to minimize
device : string, "cuda:0" or "cpu"
test_ratio : float, test size used in cross-validation (CV)
random_state : integer, seed used in CV when splitting to train-test
qlow : float, low quantile level in the range (0,1)
qhigh : float, high quantile level in the range (0,1)
use_rearrangement : boolean, use the rearrangement algorithm (True)
of not (False)
"""
self.model = model.to(device)
self.use_rearrangement = use_rearrangement
self.compute_coverage = True
self.quantile_low = qlow
self.quantile_high = qhigh
self.target_coverage = 100.0*(self.quantile_high - self.quantile_low)
self.all_quantiles = loss_func.quantiles
self.optimizer_class = optimizer_class
self.optimizer = optimizer_class(self.model.parameters())
self.loss_func = loss_func.to(device)
self.device = device
self.test_ratio = test_ratio
self.random_state = random_state
self.loss_history = []
self.test_loss_history = []
self.full_loss_history = []
def fit(self, x, y, epochs, batch_size, verbose=False):
""" Fit the model to data
Parameters
----------
x : numpy array of training features (nXp)
y : numpy array of training labels (n)
epochs : integer, maximal number of epochs
batch_size : integer, mini-batch size used in SGD solver
"""
sys.stdout.flush()
model = copy.deepcopy(self.model)
model = model.to(device)
optimizer = self.optimizer_class(model.parameters())
best_epoch = epochs
x_train, xx, y_train, yy = train_test_split(x,
y,
test_size=self.test_ratio,
random_state=self.random_state)
x_train = torch.from_numpy(x_train).float().to(self.device).requires_grad_(False)
xx = torch.from_numpy(xx).float().to(self.device).requires_grad_(False)
y_train = torch.from_numpy(y_train).float().to(self.device).requires_grad_(False)
yy_cpu = yy
yy = torch.from_numpy(yy).float().to(self.device).requires_grad_(False)
best_avg_length = 1e10
best_coverage = 0
best_cnt = 1e10
cnt = 0
for e in range(epochs):
model.train()
epoch_loss, cnt = epoch_internal_train(model, self.loss_func, x_train, y_train, batch_size, optimizer, cnt)
self.loss_history.append(epoch_loss)
model.eval()
preds = model(xx)
test_epoch_loss = self.loss_func(preds, yy).cpu().detach().numpy()
self.test_loss_history.append(test_epoch_loss)
test_preds = preds.cpu().detach().numpy()
test_preds = np.squeeze(test_preds)
if self.use_rearrangement:
test_preds = rearrange(self.all_quantiles, self.quantile_low, self.quantile_high, test_preds)
y_lower = test_preds[:,0]
y_upper = test_preds[:,1]
coverage, avg_length = helper.compute_coverage_len(yy_cpu, y_lower, y_upper)
if (coverage >= self.target_coverage) and (avg_length < best_avg_length):
best_avg_length = avg_length
best_coverage = coverage
best_epoch = e
best_cnt = cnt
if (e+1) % 100 == 0 and verbose:
print("CV: Epoch {}: Train {}, Test {}, Best epoch {}, Best Coverage {} Best Length {} Cur Coverage {}".format(e+1, epoch_loss, test_epoch_loss, best_epoch, best_coverage, best_avg_length, coverage))
sys.stdout.flush()
x = torch.from_numpy(x).float().to(self.device).requires_grad_(False)
y = torch.from_numpy(y).float().to(self.device).requires_grad_(False)
cnt = 0
for e in range(best_epoch+1):
if cnt > best_cnt:
break
epoch_loss, cnt = epoch_internal_train(self.model, self.loss_func, x, y, batch_size, self.optimizer, cnt, best_cnt)
self.full_loss_history.append(epoch_loss)
if (e+1) % 100 == 0 and verbose:
print("Full: Epoch {}: {}, cnt {}".format(e+1, epoch_loss, cnt))
sys.stdout.flush()
def predict(self, x):
""" Estimate the conditional low and high quantile given the features
Parameters
----------
x : numpy array of training features (nXp)
Returns
-------
test_preds : numpy array of predicted low and high quantiles (nX2)
"""
self.model.eval()
test_preds = self.model(torch.from_numpy(x).to(self.device).requires_grad_(False)).cpu().detach().numpy()
if self.use_rearrangement:
test_preds = rearrange(self.all_quantiles, self.quantile_low, self.quantile_high, test_preds)
else:
test_preds[:,0] = np.min(test_preds,axis=1)
test_preds[:,1] = np.max(test_preds,axis=1)
return test_preds
| 17,313 | 33.217391 | 215 | py |
cqr | cqr-master/cqr/helper.py |
import sys
import torch
import numpy as np
from cqr import torch_models
from functools import partial
from cqr import tune_params_cv
from nonconformist.cp import IcpRegressor
from nonconformist.base import RegressorAdapter
from skgarden import RandomForestQuantileRegressor
if torch.cuda.is_available():
device = "cuda:0"
else:
device = "cpu"
def compute_coverage_len(y_test, y_lower, y_upper):
""" Compute average coverage and length of prediction intervals
Parameters
----------
y_test : numpy array, true labels (n)
y_lower : numpy array, estimated lower bound for the labels (n)
y_upper : numpy array, estimated upper bound for the labels (n)
Returns
-------
coverage : float, average coverage
avg_length : float, average length
"""
in_the_range = np.sum((y_test >= y_lower) & (y_test <= y_upper))
coverage = in_the_range / len(y_test) * 100
avg_length = np.mean(abs(y_upper - y_lower))
return coverage, avg_length
def run_icp(nc, X_train, y_train, X_test, idx_train, idx_cal, significance, condition=None):
""" Run split conformal method
Parameters
----------
nc : class of nonconformist object
X_train : numpy array, training features (n1Xp)
y_train : numpy array, training labels (n1)
X_test : numpy array, testing features (n2Xp)
idx_train : numpy array, indices of proper training set examples
idx_cal : numpy array, indices of calibration set examples
significance : float, significance level (e.g. 0.1)
condition : function, mapping feature vector to group id
Returns
-------
y_lower : numpy array, estimated lower bound for the labels (n2)
y_upper : numpy array, estimated upper bound for the labels (n2)
"""
icp = IcpRegressor(nc,condition=condition)
# Fit the ICP using the proper training set
icp.fit(X_train[idx_train,:], y_train[idx_train])
# Calibrate the ICP using the calibration set
icp.calibrate(X_train[idx_cal,:], y_train[idx_cal])
# Produce predictions for the test set, with confidence 90%
predictions = icp.predict(X_test, significance=significance)
y_lower = predictions[:,0]
y_upper = predictions[:,1]
return y_lower, y_upper
def run_icp_sep(nc, X_train, y_train, X_test, idx_train, idx_cal, significance, condition):
""" Run split conformal method, train a seperate regressor for each group
Parameters
----------
nc : class of nonconformist object
X_train : numpy array, training features (n1Xp)
y_train : numpy array, training labels (n1)
X_test : numpy array, testing features (n2Xp)
idx_train : numpy array, indices of proper training set examples
idx_cal : numpy array, indices of calibration set examples
significance : float, significance level (e.g. 0.1)
condition : function, mapping a feature vector to group id
Returns
-------
y_lower : numpy array, estimated lower bound for the labels (n2)
y_upper : numpy array, estimated upper bound for the labels (n2)
"""
X_proper_train = X_train[idx_train,:]
y_proper_train = y_train[idx_train]
X_calibration = X_train[idx_cal,:]
y_calibration = y_train[idx_cal]
category_map_proper_train = np.array([condition((X_proper_train[i, :], y_proper_train[i])) for i in range(y_proper_train.size)])
category_map_calibration = np.array([condition((X_calibration[i, :], y_calibration[i])) for i in range(y_calibration.size)])
category_map_test = np.array([condition((X_test[i, :], None)) for i in range(X_test.shape[0])])
categories = np.unique(category_map_proper_train)
y_lower = np.zeros(X_test.shape[0])
y_upper = np.zeros(X_test.shape[0])
cnt = 0
for cond in categories:
icp = IcpRegressor(nc[cnt])
idx_proper_train_group = category_map_proper_train == cond
# Fit the ICP using the proper training set
icp.fit(X_proper_train[idx_proper_train_group,:], y_proper_train[idx_proper_train_group])
idx_calibration_group = category_map_calibration == cond
# Calibrate the ICP using the calibration set
icp.calibrate(X_calibration[idx_calibration_group,:], y_calibration[idx_calibration_group])
idx_test_group = category_map_test == cond
# Produce predictions for the test set, with confidence 90%
predictions = icp.predict(X_test[idx_test_group,:], significance=significance)
y_lower[idx_test_group] = predictions[:,0]
y_upper[idx_test_group] = predictions[:,1]
cnt = cnt + 1
return y_lower, y_upper
def compute_coverage(y_test,y_lower,y_upper,significance,name=""):
""" Compute average coverage and length, and print results
Parameters
----------
y_test : numpy array, true labels (n)
y_lower : numpy array, estimated lower bound for the labels (n)
y_upper : numpy array, estimated upper bound for the labels (n)
significance : float, desired significance level
name : string, optional output string (e.g. the method name)
Returns
-------
coverage : float, average coverage
avg_length : float, average length
"""
in_the_range = np.sum((y_test >= y_lower) & (y_test <= y_upper))
coverage = in_the_range / len(y_test) * 100
print("%s: Percentage in the range (expecting %.2f): %f" % (name, 100 - significance*100, coverage))
sys.stdout.flush()
avg_length = abs(np.mean(y_lower - y_upper))
print("%s: Average length: %f" % (name, avg_length))
sys.stdout.flush()
return coverage, avg_length
def compute_coverage_per_sample(y_test,y_lower,y_upper,significance,name="",x_test=None,condition=None):
""" Compute average coverage and length, and print results
Parameters
----------
y_test : numpy array, true labels (n)
y_lower : numpy array, estimated lower bound for the labels (n)
y_upper : numpy array, estimated upper bound for the labels (n)
significance : float, desired significance level
name : string, optional output string (e.g. the method name)
x_test : numpy array, test features
condition : function, mapping a feature vector to group id
Returns
-------
coverage : float, average coverage
avg_length : float, average length
"""
if condition is not None:
category_map = np.array([condition((x_test[i, :], y_test[i])) for i in range(y_test.size)])
categories = np.unique(category_map)
coverage = np.empty(len(categories), dtype=np.object)
length = np.empty(len(categories), dtype=np.object)
cnt = 0
for cond in categories:
idx = category_map == cond
coverage[cnt] = (y_test[idx] >= y_lower[idx]) & (y_test[idx] <= y_upper[idx])
coverage_avg = np.sum( coverage[cnt] ) / len(y_test[idx]) * 100
print("%s: Group %d : Percentage in the range (expecting %.2f): %f" % (name, cond, 100 - significance*100, coverage_avg))
sys.stdout.flush()
length[cnt] = abs(y_upper[idx] - y_lower[idx])
print("%s: Group %d : Average length: %f" % (name, cond, np.mean(length[cnt])))
sys.stdout.flush()
cnt = cnt + 1
else:
coverage = (y_test >= y_lower) & (y_test <= y_upper)
coverage_avg = np.sum(coverage) / len(y_test) * 100
print("%s: Percentage in the range (expecting %.2f): %f" % (name, 100 - significance*100, coverage_avg))
sys.stdout.flush()
length = abs(y_upper - y_lower)
print("%s: Average length: %f" % (name, np.mean(length)))
sys.stdout.flush()
return coverage, length
def plot_func_data(y_test,y_lower,y_upper,name=""):
""" Plot the test labels along with the constructed prediction band
Parameters
----------
y_test : numpy array, true labels (n)
y_lower : numpy array, estimated lower bound for the labels (n)
y_upper : numpy array, estimated upper bound for the labels (n)
name : string, optional output string (e.g. the method name)
"""
# allowed to import graphics
import matplotlib.pyplot as plt
interval = y_upper - y_lower
sort_ind = np.argsort(interval)
y_test_sorted = y_test[sort_ind]
upper_sorted = y_upper[sort_ind]
lower_sorted = y_lower[sort_ind]
mean = (upper_sorted + lower_sorted) / 2
# Center such that the mean of the prediction interval is at 0.0
y_test_sorted -= mean
upper_sorted -= mean
lower_sorted -= mean
plt.plot(y_test_sorted, "ro")
plt.fill_between(
np.arange(len(upper_sorted)), lower_sorted, upper_sorted, alpha=0.2, color="r",
label="Pred. interval")
plt.xlabel("Ordered samples")
plt.ylabel("Values and prediction intervals")
plt.title(name)
plt.show()
interval = y_upper - y_lower
sort_ind = np.argsort(y_test)
y_test_sorted = y_test[sort_ind]
upper_sorted = y_upper[sort_ind]
lower_sorted = y_lower[sort_ind]
plt.plot(y_test_sorted, "ro")
plt.fill_between(
np.arange(len(upper_sorted)), lower_sorted, upper_sorted, alpha=0.2, color="r",
label="Pred. interval")
plt.xlabel("Ordered samples by response")
plt.ylabel("Values and prediction intervals")
plt.title(name)
plt.show()
###############################################################################
# Deep conditional mean regression
# Minimizing MSE loss
###############################################################################
class MSENet_RegressorAdapter(RegressorAdapter):
""" Conditional mean estimator, formulated as neural net
"""
def __init__(self,
model,
fit_params=None,
in_shape=1,
hidden_size=1,
learn_func=torch.optim.Adam,
epochs=1000,
batch_size=10,
dropout=0.1,
lr=0.01,
wd=1e-6,
test_ratio=0.2,
random_state=0):
""" Initialization
Parameters
----------
model : unused parameter (for compatibility with nc class)
fit_params : unused parameter (for compatibility with nc class)
in_shape : integer, input signal dimension
hidden_size : integer, hidden layer dimension
learn_func : class of Pytorch's SGD optimizer
epochs : integer, maximal number of epochs
batch_size : integer, mini-batch size for SGD
dropout : float, dropout rate
lr : float, learning rate for SGD
wd : float, weight decay
test_ratio : float, ratio of held-out data, used in cross-validation
random_state : integer, seed for splitting the data in cross-validation
"""
super(MSENet_RegressorAdapter, self).__init__(model, fit_params)
# Instantiate model
self.epochs = epochs
self.batch_size = batch_size
self.dropout = dropout
self.lr = lr
self.wd = wd
self.test_ratio = test_ratio
self.random_state = random_state
self.model = torch_models.mse_model(in_shape=in_shape, hidden_size=hidden_size, dropout=dropout)
self.loss_func = torch.nn.MSELoss()
self.learner = torch_models.LearnerOptimized(self.model,
partial(learn_func, lr=lr, weight_decay=wd),
self.loss_func,
device=device,
test_ratio=self.test_ratio,
random_state=self.random_state)
def fit(self, x, y):
""" Fit the model to data
Parameters
----------
x : numpy array of training features (nXp)
y : numpy array of training labels (n)
"""
self.learner.fit(x, y, self.epochs, batch_size=self.batch_size)
def predict(self, x):
""" Estimate the label given the features
Parameters
----------
x : numpy array of training features (nXp)
Returns
-------
ret_val : numpy array of predicted labels (n)
"""
return self.learner.predict(x)
###############################################################################
# Deep neural network for conditional quantile regression
# Minimizing pinball loss
###############################################################################
class AllQNet_RegressorAdapter(RegressorAdapter):
""" Conditional quantile estimator, formulated as neural net
"""
def __init__(self,
model,
fit_params=None,
in_shape=1,
hidden_size=1,
quantiles=[.05, .95],
learn_func=torch.optim.Adam,
epochs=1000,
batch_size=10,
dropout=0.1,
lr=0.01,
wd=1e-6,
test_ratio=0.2,
random_state=0,
use_rearrangement=False):
""" Initialization
Parameters
----------
model : None, unused parameter (for compatibility with nc class)
fit_params : None, unused parameter (for compatibility with nc class)
in_shape : integer, input signal dimension
hidden_size : integer, hidden layer dimension
quantiles : numpy array, low and high quantile levels in range (0,1)
learn_func : class of Pytorch's SGD optimizer
epochs : integer, maximal number of epochs
batch_size : integer, mini-batch size for SGD
dropout : float, dropout rate
lr : float, learning rate for SGD
wd : float, weight decay
test_ratio : float, ratio of held-out data, used in cross-validation
random_state : integer, seed for splitting the data in cross-validation
use_rearrangement : boolean, use the rearrangement algorithm (True)
of not (False). See reference [1].
References
----------
.. [1] Chernozhukov, Victor, Iván Fernández‐Val, and Alfred Galichon.
"Quantile and probability curves without crossing."
Econometrica 78.3 (2010): 1093-1125.
"""
super(AllQNet_RegressorAdapter, self).__init__(model, fit_params)
# Instantiate model
self.quantiles = quantiles
if use_rearrangement:
self.all_quantiles = torch.from_numpy(np.linspace(0.01,0.99,99)).float()
else:
self.all_quantiles = self.quantiles
self.epochs = epochs
self.batch_size = batch_size
self.dropout = dropout
self.lr = lr
self.wd = wd
self.test_ratio = test_ratio
self.random_state = random_state
self.model = torch_models.all_q_model(quantiles=self.all_quantiles,
in_shape=in_shape,
hidden_size=hidden_size,
dropout=dropout)
self.loss_func = torch_models.AllQuantileLoss(self.all_quantiles)
self.learner = torch_models.LearnerOptimizedCrossing(self.model,
partial(learn_func, lr=lr, weight_decay=wd),
self.loss_func,
device=device,
test_ratio=self.test_ratio,
random_state=self.random_state,
qlow=self.quantiles[0],
qhigh=self.quantiles[1],
use_rearrangement=use_rearrangement)
def fit(self, x, y):
""" Fit the model to data
Parameters
----------
x : numpy array of training features (nXp)
y : numpy array of training labels (n)
"""
self.learner.fit(x, y, self.epochs, self.batch_size)
def predict(self, x):
""" Estimate the conditional low and high quantiles given the features
Parameters
----------
x : numpy array of training features (nXp)
Returns
-------
ret_val : numpy array of estimated conditional quantiles (nX2)
"""
return self.learner.predict(x)
###############################################################################
# Quantile random forests model
###############################################################################
class QuantileForestRegressorAdapter(RegressorAdapter):
""" Conditional quantile estimator, defined as quantile random forests (QRF)
References
----------
.. [1] Meinshausen, Nicolai. "Quantile regression forests."
Journal of Machine Learning Research 7.Jun (2006): 983-999.
"""
def __init__(self,
model,
fit_params=None,
quantiles=[5, 95],
params=None):
""" Initialization
Parameters
----------
model : None, unused parameter (for compatibility with nc class)
fit_params : None, unused parameter (for compatibility with nc class)
quantiles : numpy array, low and high quantile levels in range (0,100)
params : dictionary of parameters
params["random_state"] : integer, seed for splitting the data
in cross-validation. Also used as the
seed in quantile random forests (QRF)
params["min_samples_leaf"] : integer, parameter of QRF
params["n_estimators"] : integer, parameter of QRF
params["max_features"] : integer, parameter of QRF
params["CV"] : boolean, use cross-validation (True) or
not (False) to tune the two QRF quantile levels
to obtain the desired coverage
params["test_ratio"] : float, ratio of held-out data, used
in cross-validation
params["coverage_factor"] : float, to avoid too conservative
estimation of the prediction band,
when tuning the two QRF quantile
levels in cross-validation one may
ask for prediction intervals with
reduced average coverage, equal to
coverage_factor*(q_high - q_low).
params["range_vals"] : float, determines the lowest and highest
quantile level parameters when tuning
the quanitle levels bt cross-validation.
The smallest value is equal to
quantiles[0] - range_vals.
Similarly, the largest is equal to
quantiles[1] + range_vals.
params["num_vals"] : integer, when tuning QRF's quantile
parameters, sweep over a grid of length
num_vals.
"""
super(QuantileForestRegressorAdapter, self).__init__(model, fit_params)
# Instantiate model
self.quantiles = quantiles
self.cv_quantiles = self.quantiles
self.params = params
self.rfqr = RandomForestQuantileRegressor(random_state=params["random_state"],
min_samples_leaf=params["min_samples_leaf"],
n_estimators=params["n_estimators"],
max_features=params["max_features"])
def fit(self, x, y):
""" Fit the model to data
Parameters
----------
x : numpy array of training features (nXp)
y : numpy array of training labels (n)
"""
if self.params["CV"]:
target_coverage = self.quantiles[1] - self.quantiles[0]
coverage_factor = self.params["coverage_factor"]
range_vals = self.params["range_vals"]
num_vals = self.params["num_vals"]
grid_q_low = np.linspace(self.quantiles[0],self.quantiles[0]+range_vals,num_vals).reshape(-1,1)
grid_q_high = np.linspace(self.quantiles[1],self.quantiles[1]-range_vals,num_vals).reshape(-1,1)
grid_q = np.concatenate((grid_q_low,grid_q_high),1)
self.cv_quantiles = tune_params_cv.CV_quntiles_rf(self.params,
x,
y,
target_coverage,
grid_q,
self.params["test_ratio"],
self.params["random_state"],
coverage_factor)
self.rfqr.fit(x, y)
def predict(self, x):
""" Estimate the conditional low and high quantiles given the features
Parameters
----------
x : numpy array of training features (nXp)
Returns
-------
ret_val : numpy array of estimated conditional quantiles (nX2)
"""
lower = self.rfqr.predict(x, quantile=self.cv_quantiles[0])
upper = self.rfqr.predict(x, quantile=self.cv_quantiles[1])
ret_val = np.zeros((len(lower),2))
ret_val[:,0] = lower
ret_val[:,1] = upper
return ret_val
| 22,414 | 36.927242 | 133 | py |
cqr | cqr-master/reproducible_experiments/run_equalized_coverage_experiment.py | #!/usr/bin/env python
# coding: utf-8
import os
import torch
import random
import numpy as np
np.warnings.filterwarnings('ignore')
from datasets import datasets
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import pandas as pd
# for MEPS
def condition(x, y=None):
return int(x[0][-1]>0)
from cqr import helper
from nonconformist.nc import RegressorNc
from nonconformist.nc import SignErrorErrFunc
from nonconformist.nc import QuantileRegAsymmetricErrFunc
def append_statistics(coverage_sample,
length_sample,
method_name,
dataset_name_vec,
method_vec,
coverage_vec,
length_vec,
seed_vec,
test_ratio_vec,
seed,
test_ratio,
dataset_name_group_0,
dataset_name_group_1):
dataset_name_group = [dataset_name_group_0, dataset_name_group_1]
for group_id in range(len(dataset_name_group)):
coverage = (coverage_sample[group_id]).astype(np.float)
length = length_sample[group_id]
for i in range(len(coverage)):
dataset_name_vec.append(dataset_name_group[group_id])
method_vec.append(method_name)
coverage_vec.append(coverage[i])
length_vec.append(length[i])
seed_vec.append(seed)
test_ratio_vec.append(test_ratio)
def run_equalized_coverage_experiment(dataset_name, method, seed, save_to_csv=True, test_ratio = 0.2):
random_state_train_test = seed
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
if os.path.isdir('/scratch'):
local_machine = 0
else:
local_machine = 1
if local_machine:
dataset_base_path = '/Users/romano/mydata/regression_data/'
else:
dataset_base_path = '/scratch/users/yromano/data/regression_data/'
# desired miscoverage error
alpha = 0.1
# desired quanitile levels
quantiles = [0.05, 0.95]
# name of dataset
dataset_name_group_0 = dataset_name + "_non_white"
dataset_name_group_1 = dataset_name + "_white"
# load the dataset
X, y = datasets.GetDataset(dataset_name, dataset_base_path)
# divide the dataset into test and train based on the test_ratio parameter
x_train, x_test, y_train, y_test = train_test_split(X,
y,
test_size=test_ratio,
random_state=random_state_train_test)
# In[2]:
# compute input dimensions
n_train = x_train.shape[0]
in_shape = x_train.shape[1]
# divide the data into proper training set and calibration set
idx = np.random.permutation(n_train)
n_half = int(np.floor(n_train/2))
idx_train, idx_cal = idx[:n_half], idx[n_half:2*n_half]
# zero mean and unit variance scaling
scalerX = StandardScaler()
scalerX = scalerX.fit(x_train[idx_train])
# scale
x_train = scalerX.transform(x_train)
x_test = scalerX.transform(x_test)
y_train = np.log(1.0 + y_train)
y_test = np.log(1.0 + y_test)
# reshape the data
x_train = np.asarray(x_train)
y_train = np.squeeze(np.asarray(y_train))
x_test = np.asarray(x_test)
y_test = np.squeeze(np.asarray(y_test))
# display basic information
print("Dataset: %s" % (dataset_name))
print("Dimensions: train set (n=%d, p=%d) ; test set (n=%d, p=%d)" %
(x_train.shape[0], x_train.shape[1], x_test.shape[0], x_test.shape[1]))
# In[3]:
dataset_name_vec = []
method_vec = []
coverage_vec = []
length_vec = []
seed_vec = []
test_ratio_vec = []
if method == "net":
# pytorch's optimizer object
nn_learn_func = torch.optim.Adam
# number of epochs
epochs = 1000
# learning rate
lr = 0.0005
# mini-batch size
batch_size = 64
# hidden dimension of the network
hidden_size = 64
# dropout regularization rate
dropout = 0.1
# weight decay regularization
wd = 1e-6
# ratio of held-out data, used in cross-validation
cv_test_ratio = 0.1
# seed for splitting the data in cross-validation.
# Also used as the seed in quantile random forests function
cv_random_state = 1
# In[4]:
model = helper.MSENet_RegressorAdapter(model=None,
fit_params=None,
in_shape = in_shape,
hidden_size = hidden_size,
learn_func = nn_learn_func,
epochs = epochs,
batch_size=batch_size,
dropout=dropout,
lr=lr,
wd=wd,
test_ratio=cv_test_ratio,
random_state=cv_random_state)
nc = RegressorNc(model, SignErrorErrFunc())
y_lower, y_upper = helper.run_icp(nc, x_train, y_train, x_test, idx_train, idx_cal, alpha)
method_name = "Marginal Conformal Neural Network"
# compute and print average coverage and average length
coverage_sample, length_sample = helper.compute_coverage_per_sample(y_test,
y_lower,
y_upper,
alpha,
method_name,
x_test,
condition)
append_statistics(coverage_sample,
length_sample,
method_name,
dataset_name_vec,
method_vec,
coverage_vec,
length_vec,
seed_vec,
test_ratio_vec,
seed,
test_ratio,
dataset_name_group_0,
dataset_name_group_1)
# In[]
model = helper.MSENet_RegressorAdapter(model=None,
fit_params=None,
in_shape = in_shape,
hidden_size = hidden_size,
learn_func = nn_learn_func,
epochs = epochs,
batch_size=batch_size,
dropout=dropout,
lr=lr,
wd=wd,
test_ratio=cv_test_ratio,
random_state=cv_random_state)
nc = RegressorNc(model, SignErrorErrFunc())
y_lower, y_upper = helper.run_icp(nc, x_train, y_train, x_test, idx_train, idx_cal, alpha, condition)
method_name = "Conditional Conformal Neural Network (joint)"
# compute and print average coverage and average length
coverage_sample, length_sample = helper.compute_coverage_per_sample(y_test,
y_lower,
y_upper,
alpha,
method_name,
x_test,
condition)
append_statistics(coverage_sample,
length_sample,
method_name,
dataset_name_vec,
method_vec,
coverage_vec,
length_vec,
seed_vec,
test_ratio_vec,
seed,
test_ratio,
dataset_name_group_0,
dataset_name_group_1)
# In[6]
category_map = np.array([condition((x_train[i, :], None)) for i in range(x_train.shape[0])])
categories = np.unique(category_map)
estimator_list = []
nc_list = []
for i in range(len(categories)):
# define a QRF model per group
estimator_list.append(helper.MSENet_RegressorAdapter(model=None,
fit_params=None,
in_shape = in_shape,
hidden_size = hidden_size,
learn_func = nn_learn_func,
epochs = epochs,
batch_size=batch_size,
dropout=dropout,
lr=lr,
wd=wd,
test_ratio=cv_test_ratio,
random_state=cv_random_state))
# define the CQR object
nc_list.append(RegressorNc(estimator_list[i], SignErrorErrFunc()))
# run CQR procedure
y_lower, y_upper = helper.run_icp_sep(nc_list, x_train, y_train, x_test, idx_train, idx_cal, alpha, condition)
method_name = "Conditional Conformal Neural Network (groupwise)"
# compute and print average coverage and average length
coverage_sample, length_sample = helper.compute_coverage_per_sample(y_test,
y_lower,
y_upper,
alpha,
method_name,
x_test,
condition)
append_statistics(coverage_sample,
length_sample,
method_name,
dataset_name_vec,
method_vec,
coverage_vec,
length_vec,
seed_vec,
test_ratio_vec,
seed,
test_ratio,
dataset_name_group_0,
dataset_name_group_1)
# In[]
if method == "qnet":
# pytorch's optimizer object
nn_learn_func = torch.optim.Adam
# number of epochs
epochs = 1000
# learning rate
lr = 0.0005
# mini-batch size
batch_size = 64
# hidden dimension of the network
hidden_size = 64
# dropout regularization rate
dropout = 0.1
# weight decay regularization
wd = 1e-6
# desired quantiles
quantiles_net = [0.05, 0.95]
# ratio of held-out data, used in cross-validation
cv_test_ratio = 0.1
# seed for splitting the data in cross-validation.
# Also used as the seed in quantile random forests function
cv_random_state = 1
# In[7]:
# define quantile neural network model
quantile_estimator = helper.AllQNet_RegressorAdapter(model=None,
fit_params=None,
in_shape=in_shape,
hidden_size=hidden_size,
quantiles=quantiles_net,
learn_func=nn_learn_func,
epochs=epochs,
batch_size=batch_size,
dropout=dropout,
lr=lr,
wd=wd,
test_ratio=cv_test_ratio,
random_state=cv_random_state,
use_rearrangement=False)
# define the CQR object, computing the absolute residual error of points
# located outside the estimated quantile neural network band
nc = RegressorNc(quantile_estimator, QuantileRegAsymmetricErrFunc())
# run CQR procedure
y_lower, y_upper = helper.run_icp(nc, x_train, y_train, x_test, idx_train, idx_cal, alpha)
method_name = "Marginal CQR Neural Network"
# compute and print average coverage and average length
coverage_sample, length_sample = helper.compute_coverage_per_sample(y_test,
y_lower,
y_upper,
alpha,
method_name,
x_test,
condition)
append_statistics(coverage_sample,
length_sample,
method_name,
dataset_name_vec,
method_vec,
coverage_vec,
length_vec,
seed_vec,
test_ratio_vec,
seed,
test_ratio,
dataset_name_group_0,
dataset_name_group_1)
# In[]
# define qnet model
quantile_estimator = helper.AllQNet_RegressorAdapter(model=None,
fit_params=None,
in_shape=in_shape,
hidden_size=hidden_size,
quantiles=quantiles_net,
learn_func=nn_learn_func,
epochs=epochs,
batch_size=batch_size,
dropout=dropout,
lr=lr,
wd=wd,
test_ratio=cv_test_ratio,
random_state=cv_random_state,
use_rearrangement=False)
# define the CQR object
nc = RegressorNc(quantile_estimator, QuantileRegAsymmetricErrFunc())
# run CQR procedure
y_lower, y_upper = helper.run_icp(nc, x_train, y_train, x_test, idx_train, idx_cal, alpha, condition)
method_name = "Conditional CQR Neural Network (joint)"
# compute and print average coverage and average length
coverage_sample, length_sample = helper.compute_coverage_per_sample(y_test,
y_lower,
y_upper,
alpha,
method_name,
x_test,
condition)
append_statistics(coverage_sample,
length_sample,
method_name,
dataset_name_vec,
method_vec,
coverage_vec,
length_vec,
seed_vec,
test_ratio_vec,
seed,
test_ratio,
dataset_name_group_0,
dataset_name_group_1)
# In[6]
category_map = np.array([condition((x_train[i, :], None)) for i in range(x_train.shape[0])])
categories = np.unique(category_map)
quantile_estimator_list = []
nc_list = []
for i in range(len(categories)):
# define a QRF model per group
quantile_estimator_list.append(helper.AllQNet_RegressorAdapter(model=None,
fit_params=None,
in_shape=in_shape,
hidden_size=hidden_size,
quantiles=quantiles_net,
learn_func=nn_learn_func,
epochs=epochs,
batch_size=batch_size,
dropout=dropout,
lr=lr,
wd=wd,
test_ratio=cv_test_ratio,
random_state=cv_random_state,
use_rearrangement=False))
# append a CQR object
nc_list.append(RegressorNc(quantile_estimator_list[i], QuantileRegAsymmetricErrFunc()))
# run CQR procedure
y_lower, y_upper = helper.run_icp_sep(nc_list, x_train, y_train, x_test, idx_train, idx_cal, alpha, condition)
method_name = "Conditional CQR Neural Network (groupwise)"
# compute and print average coverage and average length
coverage_sample, length_sample = helper.compute_coverage_per_sample(y_test,
y_lower,
y_upper,
alpha,
method_name,
x_test,
condition)
append_statistics(coverage_sample,
length_sample,
method_name,
dataset_name_vec,
method_vec,
coverage_vec,
length_vec,
seed_vec,
test_ratio_vec,
seed,
test_ratio,
dataset_name_group_0,
dataset_name_group_1)
# In[]
############### Summary
coverage_str = 'Coverage (expected ' + str(100 - alpha*100) + '%)'
if save_to_csv:
outdir = './results/'
if not os.path.exists(outdir):
os.mkdir(outdir)
out_name = outdir + 'results.csv'
df = pd.DataFrame({'name': dataset_name_vec,
'method': method_vec,
coverage_str : coverage_vec,
'Avg. Length' : length_vec,
'seed' : seed_vec,
'train test ratio' : test_ratio_vec})
if os.path.isfile(out_name):
df2 = pd.read_csv(out_name)
df = pd.concat([df2, df], ignore_index=True)
df.to_csv(out_name, index=False) | 22,793 | 41.36803 | 118 | py |
cqr | cqr-master/reproducible_experiments/run_cqr_experiment.py | import os
import sys
import torch
import random
import numpy as np
import pandas as pd
from cqr import helper
from datasets import datasets
from sklearn import linear_model
from nonconformist.nc import NcFactory
from nonconformist.nc import RegressorNc
from nonconformist.nc import AbsErrorErrFunc
from nonconformist.nc import QuantileRegErrFunc
from nonconformist.nc import RegressorNormalizer
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from nonconformist.nc import QuantileRegAsymmetricErrFunc
pd.set_option('precision', 3)
base_dataset_path = './datasets/'
if os.path.isdir('/scratch'):
local_machine = 0
else:
local_machine = 1
if local_machine:
base_dataset_path = '/Users/romano/mydata/regression_data/'
else:
base_dataset_path = '/scratch/users/yromano/data/regression_data/'
plot_results = False
def run_experiment(dataset_name,
test_method,
random_state_train_test,
save_to_csv=True):
""" Estimate prediction intervals and print the average length and coverage
Parameters
----------
dataset_name : array of strings, list of datasets
test_method : string, method to be tested, estimating
the 90% prediction interval
random_state_train_test : integer, random seed to be used
save_to_csv : boolean, save average length and coverage to csv (True)
or not (False)
"""
dataset_name_vec = []
method_vec = []
coverage_vec = []
length_vec = []
seed_vec = []
seed = random_state_train_test
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
coverage_linear=0
length_linear=0
coverage_linear_local=0
length_linear_local=0
coverage_net=0
length_net=0
coverage_net_local=0
length_net_local=0
coverage_forest=0
length_forest=0
coverage_forest_local=0
length_forest_local=0
coverage_cp_qnet=0
length_cp_qnet=0
coverage_qnet=0
length_qnet=0
coverage_cp_sign_qnet=0
length_cp_sign_qnet=0
coverage_cp_re_qnet=0
length_cp_re_qnet=0
coverage_re_qnet=0
length_re_qnet=0
coverage_cp_sign_re_qnet=0
length_cp_sign_re_qnet=0
coverage_cp_qforest=0
length_cp_qforest=0
coverage_qforest=0
length_qforest=0
coverage_cp_sign_qforest=0
length_cp_sign_qforest=0
# determines the size of test set
test_ratio = 0.2
# conformal prediction miscoverage level
significance = 0.1
# desired quantile levels, used by the quantile regression methods
quantiles = [0.05, 0.95]
# Random forests parameters (shared by conditional quantile random forests
# and conditional mean random forests regression).
n_estimators = 1000 # usual random forests n_estimators parameter
min_samples_leaf = 1 # default parameter of sklearn
# Quantile random forests parameters.
# See QuantileForestRegressorAdapter class for more details
quantiles_forest = [5, 95]
CV_qforest = True
coverage_factor = 0.85
cv_test_ratio = 0.05
cv_random_state = 1
cv_range_vals = 30
cv_num_vals = 10
# Neural network parameters (shared by conditional quantile neural network
# and conditional mean neural network regression)
# See AllQNet_RegressorAdapter and MSENet_RegressorAdapter in helper.py
nn_learn_func = torch.optim.Adam
epochs = 1000
lr = 0.0005
hidden_size = 64
batch_size = 64
dropout = 0.1
wd = 1e-6
# Ask for a reduced coverage when tuning the network parameters by
# cross-validation to avoid too conservative initial estimation of the
# prediction interval. This estimation will be conformalized by CQR.
quantiles_net = [0.1, 0.9]
# local conformal prediction parameter.
# See RegressorNc class for more details.
beta = 1
beta_net = 1
# local conformal prediction parameter. The local ridge regression method
# uses nearest neighbor regression as the MAD estimator.
# Number of neighbors used by nearest neighbor regression.
n_neighbors = 11
print(dataset_name)
sys.stdout.flush()
try:
# load the dataset
X, y = datasets.GetDataset(dataset_name, base_dataset_path)
except:
print("CANNOT LOAD DATASET!")
return
# Dataset is divided into test and train data based on test_ratio parameter
X_train, X_test, y_train, y_test = train_test_split(X,
y,
test_size=test_ratio,
random_state=random_state_train_test)
# fit a simple ridge regression model (sanity check)
model = linear_model.RidgeCV()
model = model.fit(X_train, np.squeeze(y_train))
predicted_data = model.predict(X_test).astype(np.float32)
# calculate the normalized mean squared error
print("Ridge relative error: %f" % (np.sum((np.squeeze(y_test)-predicted_data)**2)/np.sum(np.squeeze(y_test)**2)))
sys.stdout.flush()
# reshape the data
X_train = np.asarray(X_train)
y_train = np.asarray(y_train)
X_test = np.asarray(X_test)
y_test = np.asarray(y_test)
# input dimensions
n_train = X_train.shape[0]
in_shape = X_train.shape[1]
print("Size: train (%d, %d), test (%d, %d)" % (X_train.shape[0], X_train.shape[1], X_test.shape[0], X_test.shape[1]))
sys.stdout.flush()
# set seed for splitting the data into proper train and calibration
np.random.seed(seed)
idx = np.random.permutation(n_train)
# divide the data into proper training set and calibration set
n_half = int(np.floor(n_train/2))
idx_train, idx_cal = idx[:n_half], idx[n_half:2*n_half]
# zero mean and unit variance scaling of the train and test features
scalerX = StandardScaler()
scalerX = scalerX.fit(X_train[idx_train])
X_train = scalerX.transform(X_train)
X_test = scalerX.transform(X_test)
# scale the labels by dividing each by the mean absolute response
mean_ytrain = np.mean(np.abs(y_train[idx_train]))
y_train = np.squeeze(y_train)/mean_ytrain
y_test = np.squeeze(y_test)/mean_ytrain
######################## Linear
if 'linear' == test_method:
model = linear_model.RidgeCV()
nc = RegressorNc(model)
y_lower, y_upper = helper.run_icp(nc, X_train, y_train, X_test, idx_train, idx_cal, significance)
if plot_results:
helper.plot_func_data(y_test,y_lower,y_upper,"Ridge")
coverage_linear, length_linear = helper.compute_coverage(y_test,y_lower,y_upper,significance,"Ridge")
dataset_name_vec.append(dataset_name)
method_vec.append('Ridge')
coverage_vec.append(coverage_linear)
length_vec.append(length_linear)
seed_vec.append(seed)
nc = NcFactory.create_nc(
linear_model.RidgeCV(),
normalizer_model=KNeighborsRegressor(n_neighbors=n_neighbors)
)
y_lower, y_upper = helper.run_icp(nc, X_train, y_train, X_test, idx_train, idx_cal, significance)
if plot_results:
helper.plot_func_data(y_test,y_lower,y_upper,"Ridge-L")
coverage_linear_local, length_linear_local = helper.compute_coverage(y_test,y_lower,y_upper,significance,"Ridge-L")
dataset_name_vec.append(dataset_name)
method_vec.append('Ridge-L')
coverage_vec.append(coverage_linear_local)
length_vec.append(length_linear_local)
seed_vec.append(seed)
######################### Neural net
if 'neural_net' == test_method:
model = helper.MSENet_RegressorAdapter(model=None,
fit_params=None,
in_shape = in_shape,
hidden_size = hidden_size,
learn_func = nn_learn_func,
epochs = epochs,
batch_size=batch_size,
dropout=dropout,
lr=lr,
wd=wd,
test_ratio=cv_test_ratio,
random_state=cv_random_state)
nc = RegressorNc(model)
y_lower, y_upper = helper.run_icp(nc, X_train, y_train, X_test, idx_train, idx_cal, significance)
if plot_results:
helper.plot_func_data(y_test,y_lower,y_upper,"Net")
coverage_net, length_net = helper.compute_coverage(y_test,y_lower,y_upper,significance,"Net")
dataset_name_vec.append(dataset_name)
method_vec.append('Net')
coverage_vec.append(coverage_net)
length_vec.append(length_net)
seed_vec.append(seed)
normalizer_adapter = helper.MSENet_RegressorAdapter(model=None,
fit_params=None,
in_shape = in_shape,
hidden_size = hidden_size,
learn_func = nn_learn_func,
epochs = epochs,
batch_size=batch_size,
dropout=dropout,
lr=lr,
wd=wd,
test_ratio=cv_test_ratio,
random_state=cv_random_state)
adapter = helper.MSENet_RegressorAdapter(model=None,
fit_params=None,
in_shape = in_shape,
hidden_size = hidden_size,
learn_func = nn_learn_func,
epochs = epochs,
batch_size=batch_size,
dropout=dropout,
lr=lr,
wd=wd,
test_ratio=cv_test_ratio,
random_state=cv_random_state)
normalizer = RegressorNormalizer(adapter,
normalizer_adapter,
AbsErrorErrFunc())
nc = RegressorNc(adapter, AbsErrorErrFunc(), normalizer, beta=beta_net)
y_lower, y_upper = helper.run_icp(nc, X_train, y_train, X_test, idx_train, idx_cal, significance)
if plot_results:
helper.plot_func_data(y_test,y_lower,y_upper,"Net-L")
coverage_net_local, length_net_local = helper.compute_coverage(y_test,y_lower,y_upper,significance,"Net-L")
dataset_name_vec.append(dataset_name)
method_vec.append('Net-L')
coverage_vec.append(coverage_net_local)
length_vec.append(length_net_local)
seed_vec.append(seed)
################## Random Forest
if 'random_forest' == test_method:
model = RandomForestRegressor(n_estimators=n_estimators,min_samples_leaf=min_samples_leaf, random_state=0)
nc = RegressorNc(model, AbsErrorErrFunc())
y_lower, y_upper = helper.run_icp(nc, X_train, y_train, X_test, idx_train, idx_cal, significance)
if plot_results:
helper.plot_func_data(y_test,y_lower,y_upper,"RF")
coverage_forest, length_forest = helper.compute_coverage(y_test,y_lower,y_upper,significance,"RF")
dataset_name_vec.append(dataset_name)
method_vec.append('RF')
coverage_vec.append(coverage_forest)
length_vec.append(length_forest)
seed_vec.append(seed)
normalizer_adapter = RandomForestRegressor(n_estimators=n_estimators, min_samples_leaf=min_samples_leaf, random_state=0)
adapter = RandomForestRegressor(n_estimators=n_estimators, min_samples_leaf=min_samples_leaf, random_state=0)
normalizer = RegressorNormalizer(adapter,
normalizer_adapter,
AbsErrorErrFunc())
nc = RegressorNc(adapter, AbsErrorErrFunc(), normalizer, beta=beta)
y_lower, y_upper = helper.run_icp(nc, X_train, y_train, X_test, idx_train, idx_cal, significance)
if plot_results:
helper.plot_func_data(y_test,y_lower,y_upper,"RF-L")
coverage_forest_local, length_forest_local = helper.compute_coverage(y_test,y_lower,y_upper,significance,"RF-L")
dataset_name_vec.append(dataset_name)
method_vec.append('RF-L')
coverage_vec.append(coverage_forest_local)
length_vec.append(length_forest_local)
seed_vec.append(seed)
################## Quantile Net
if 'quantile_net' == test_method:
model_full = helper.AllQNet_RegressorAdapter(model=None,
fit_params=None,
in_shape = in_shape,
hidden_size = hidden_size,
quantiles = quantiles,
learn_func = nn_learn_func,
epochs = epochs,
batch_size=batch_size,
dropout=dropout,
lr=lr,
wd=wd,
test_ratio=cv_test_ratio,
random_state=cv_random_state,
use_rearrangement=False)
model_full.fit(X_train, y_train)
tmp = model_full.predict(X_test)
y_lower = tmp[:,0]
y_upper = tmp[:,1]
if plot_results:
helper.plot_func_data(y_test,y_lower,y_upper,"QNet")
coverage_qnet, length_qnet = helper.compute_coverage(y_test,y_lower,y_upper,significance,"QNet")
dataset_name_vec.append(dataset_name)
method_vec.append('QNet')
coverage_vec.append(coverage_qnet)
length_vec.append(length_qnet)
seed_vec.append(seed)
if 'cqr_quantile_net' == test_method:
model = helper.AllQNet_RegressorAdapter(model=None,
fit_params=None,
in_shape = in_shape,
hidden_size = hidden_size,
quantiles = quantiles_net,
learn_func = nn_learn_func,
epochs = epochs,
batch_size=batch_size,
dropout=dropout,
lr=lr,
wd=wd,
test_ratio=cv_test_ratio,
random_state=cv_random_state,
use_rearrangement=False)
nc = RegressorNc(model, QuantileRegErrFunc())
y_lower, y_upper = helper.run_icp(nc, X_train, y_train, X_test, idx_train, idx_cal, significance)
if plot_results:
helper.plot_func_data(y_test,y_lower,y_upper,"CQR Net")
coverage_cp_qnet, length_cp_qnet = helper.compute_coverage(y_test,y_lower,y_upper,significance,"CQR Net")
dataset_name_vec.append(dataset_name)
method_vec.append('CQR Net')
coverage_vec.append(coverage_cp_qnet)
length_vec.append(length_cp_qnet)
seed_vec.append(seed)
if 'cqr_asymmetric_quantile_net' == test_method:
model = helper.AllQNet_RegressorAdapter(model=None,
fit_params=None,
in_shape = in_shape,
hidden_size = hidden_size,
quantiles = quantiles_net,
learn_func = nn_learn_func,
epochs = epochs,
batch_size=batch_size,
dropout=dropout,
lr=lr,
wd=wd,
test_ratio=cv_test_ratio,
random_state=cv_random_state,
use_rearrangement=False)
nc = RegressorNc(model, QuantileRegAsymmetricErrFunc())
y_lower, y_upper = helper.run_icp(nc, X_train, y_train, X_test, idx_train, idx_cal, significance)
if plot_results:
helper.plot_func_data(y_test,y_lower,y_upper,"CQR Sign Net")
coverage_cp_sign_qnet, length_cp_sign_qnet = helper.compute_coverage(y_test,y_lower,y_upper,significance,"CQR Sign Net")
dataset_name_vec.append(dataset_name)
method_vec.append('CQR Sign Net')
coverage_vec.append(coverage_cp_sign_qnet)
length_vec.append(length_cp_sign_qnet)
seed_vec.append(seed)
################### Rearrangement Quantile Net
if 'rearrangement' == test_method:
model_full = helper.AllQNet_RegressorAdapter(model=None,
fit_params=None,
in_shape = in_shape,
hidden_size = hidden_size,
quantiles = quantiles,
learn_func = nn_learn_func,
epochs = epochs,
batch_size=batch_size,
dropout=dropout,
lr=lr,
wd=wd,
test_ratio=cv_test_ratio,
random_state=cv_random_state,
use_rearrangement=True)
model_full.fit(X_train, y_train)
tmp = model_full.predict(X_test)
y_lower = tmp[:,0]
y_upper = tmp[:,1]
if plot_results:
helper.plot_func_data(y_test,y_lower,y_upper,"Rearrange QNet")
coverage_re_qnet, length_re_qnet = helper.compute_coverage(y_test,y_lower,y_upper,significance,"Rearrange QNet")
dataset_name_vec.append(dataset_name)
method_vec.append('Rearrange QNet')
coverage_vec.append(coverage_re_qnet)
length_vec.append(length_re_qnet)
seed_vec.append(seed)
if 'cqr_rearrangement' == test_method:
model = helper.AllQNet_RegressorAdapter(model=None,
fit_params=None,
in_shape = in_shape,
hidden_size = hidden_size,
quantiles = quantiles_net,
learn_func = nn_learn_func,
epochs = epochs,
batch_size=batch_size,
dropout=dropout,
lr=lr,
wd=wd,
test_ratio=cv_test_ratio,
random_state=cv_random_state,
use_rearrangement=True)
nc = RegressorNc(model, QuantileRegErrFunc())
y_lower, y_upper = helper.run_icp(nc, X_train, y_train, X_test, idx_train, idx_cal, significance)
if plot_results:
helper.plot_func_data(y_test,y_lower,y_upper,"Rearrange CQR Net")
coverage_cp_re_qnet, length_cp_re_qnet = helper.compute_coverage(y_test,y_lower,y_upper,significance,"Rearrange CQR Net")
dataset_name_vec.append(dataset_name)
method_vec.append('Rearrange CQR Net')
coverage_vec.append(coverage_cp_re_qnet)
length_vec.append(length_cp_re_qnet)
seed_vec.append(seed)
if 'cqr_asymmetric_rearrangement' == test_method:
model = helper.AllQNet_RegressorAdapter(model=None,
fit_params=None,
in_shape = in_shape,
hidden_size = hidden_size,
quantiles = quantiles_net,
learn_func = nn_learn_func,
epochs = epochs,
batch_size=batch_size,
dropout=dropout,
lr=lr,
wd=wd,
test_ratio=cv_test_ratio,
random_state=cv_random_state,
use_rearrangement=True)
nc = RegressorNc(model, QuantileRegAsymmetricErrFunc())
y_lower, y_upper = helper.run_icp(nc, X_train, y_train, X_test, idx_train, idx_cal, significance)
if plot_results:
helper.plot_func_data(y_test,y_lower,y_upper,"Rearrange CQR Sign Net")
coverage_cp_sign_re_qnet, length_cp_sign_re_qnet = helper.compute_coverage(y_test,y_lower,y_upper,significance,"Rearrange CQR Net")
dataset_name_vec.append(dataset_name)
method_vec.append('Rearrange CQR Sign Net')
coverage_vec.append(coverage_cp_sign_re_qnet)
length_vec.append(length_cp_sign_re_qnet)
seed_vec.append(seed)
################### Quantile Random Forest
if 'quantile_forest' == test_method:
params_qforest = dict()
params_qforest["random_state"] = 0
params_qforest["min_samples_leaf"] = min_samples_leaf
params_qforest["n_estimators"] = n_estimators
params_qforest["max_features"] = X_train.shape[1]
params_qforest["CV"]=False
params_qforest["coverage_factor"] = coverage_factor
params_qforest["test_ratio"]=cv_test_ratio
params_qforest["random_state"]=cv_random_state
params_qforest["range_vals"] = cv_range_vals
params_qforest["num_vals"] = cv_num_vals
model_full = helper.QuantileForestRegressorAdapter(model = None,
fit_params=None,
quantiles=np.dot(100,quantiles),
params = params_qforest)
model_full.fit(X_train, y_train)
tmp = model_full.predict(X_test)
y_lower = tmp[:,0]
y_upper = tmp[:,1]
if plot_results:
helper.plot_func_data(y_test,y_lower,y_upper,"QRF")
coverage_qforest, length_qforest = helper.compute_coverage(y_test,y_lower,y_upper,significance,"QRF")
dataset_name_vec.append(dataset_name)
method_vec.append('QRF')
coverage_vec.append(coverage_qforest)
length_vec.append(length_qforest)
seed_vec.append(seed)
if 'cqr_quantile_forest' == test_method:
params_qforest = dict()
params_qforest["random_state"] = 0
params_qforest["min_samples_leaf"] = min_samples_leaf
params_qforest["n_estimators"] = n_estimators
params_qforest["max_features"] = X_train.shape[1]
params_qforest["CV"]=CV_qforest
params_qforest["coverage_factor"] = coverage_factor
params_qforest["test_ratio"]=cv_test_ratio
params_qforest["random_state"]=cv_random_state
params_qforest["range_vals"] = cv_range_vals
params_qforest["num_vals"] = cv_num_vals
model = helper.QuantileForestRegressorAdapter(model = None,
fit_params=None,
quantiles=quantiles_forest,
params = params_qforest)
nc = RegressorNc(model, QuantileRegErrFunc())
y_lower, y_upper = helper.run_icp(nc, X_train, y_train, X_test, idx_train, idx_cal, significance)
if plot_results:
helper.plot_func_data(y_test,y_lower,y_upper,"CQR RF")
coverage_cp_qforest, length_cp_qforest = helper.compute_coverage(y_test,y_lower,y_upper,significance,"CQR RF")
dataset_name_vec.append(dataset_name)
method_vec.append('CQR RF')
coverage_vec.append(coverage_cp_qforest)
length_vec.append(length_cp_qforest)
seed_vec.append(seed)
if 'cqr_asymmetric_quantile_forest' == test_method:
params_qforest = dict()
params_qforest["random_state"] = 0
params_qforest["min_samples_leaf"] = min_samples_leaf
params_qforest["n_estimators"] = n_estimators
params_qforest["max_features"] = X_train.shape[1]
params_qforest["CV"]=CV_qforest
params_qforest["coverage_factor"] = coverage_factor
params_qforest["test_ratio"]=cv_test_ratio
params_qforest["random_state"]=cv_random_state
params_qforest["range_vals"] = cv_range_vals
params_qforest["num_vals"] = cv_num_vals
model = helper.QuantileForestRegressorAdapter(model = None,
fit_params=None,
quantiles=quantiles_forest,
params = params_qforest)
nc = RegressorNc(model, QuantileRegAsymmetricErrFunc())
y_lower, y_upper = helper.run_icp(nc, X_train, y_train, X_test, idx_train, idx_cal, significance)
if plot_results:
helper.plot_func_data(y_test,y_lower,y_upper,"CQR Sign RF")
coverage_cp_sign_qforest, length_cp_sign_qforest = helper.compute_coverage(y_test,y_lower,y_upper,significance,"CQR Sign RF")
dataset_name_vec.append(dataset_name)
method_vec.append('CQR Sign RF')
coverage_vec.append(coverage_cp_sign_qforest)
length_vec.append(length_cp_sign_qforest)
seed_vec.append(seed)
# tmp = model.predict(X_test)
# y_lower = tmp[:,0]
# y_upper = tmp[:,1]
# if plot_results:
# helper.plot_func_data(y_test,y_lower,y_upper,"QRF")
# coverage_qforest, length_qforest = helper.compute_coverage(y_test,y_lower,y_upper,significance,"QRF")
#
# dataset_name_vec.append(dataset_name)
# method_vec.append('QRF')
# coverage_vec.append(coverage_qforest)
# length_vec.append(length_qforest)
# seed_vec.append(seed)
############### Summary
coverage_str = 'Coverage (expected ' + str(100 - significance*100) + '%)'
results = np.array([[dataset_name, coverage_str, 'Avg. Length', 'Seed'],
['CP Linear', coverage_linear, length_linear, seed],
['CP Linear Local', coverage_linear_local, length_linear_local, seed],
['CP Neural Net', coverage_net, length_net, seed],
['CP Neural Net Local', coverage_net_local, length_net_local, seed],
['CP Random Forest', coverage_forest, length_forest, seed],
['CP Random Forest Local', coverage_forest_local, length_forest_local, seed],
['CP Quantile Net', coverage_cp_qnet, length_cp_qnet, seed],
['CP Asymmetric Quantile Net', coverage_cp_sign_qnet, length_cp_sign_qnet, seed],
['Quantile Net', coverage_qnet, length_qnet, seed],
['CP Rearrange Quantile Net', coverage_cp_re_qnet, length_cp_re_qnet, seed],
['CP Asymmetric Rearrange Quantile Net', coverage_cp_sign_re_qnet, length_cp_sign_re_qnet, seed],
['Rearrange Quantile Net', coverage_re_qnet, length_re_qnet, seed],
['CP Quantile Random Forest', coverage_cp_qforest, length_cp_qforest, seed],
['CP Asymmetric Quantile Random Forest', coverage_cp_sign_qforest, length_cp_sign_qforest, seed],
['Quantile Random Forest', coverage_qforest, length_qforest, seed]])
results_ = pd.DataFrame(data=results[1:,1:],
index=results[1:,0],
columns=results[0,1:])
print("== SUMMARY == ")
print("dataset name: " + dataset_name)
print(results_)
sys.stdout.flush()
if save_to_csv:
results = pd.DataFrame(results)
outdir = './results/'
if not os.path.exists(outdir):
os.mkdir(outdir)
out_name = outdir + 'results.csv'
df = pd.DataFrame({'name': dataset_name_vec,
'method': method_vec,
coverage_str : coverage_vec,
'Avg. Length' : length_vec,
'seed': seed_vec})
if os.path.isfile(out_name):
df2 = pd.read_csv(out_name)
df = pd.concat([df2, df], ignore_index=True)
df.to_csv(out_name, index=False)
| 30,512 | 41.915612 | 139 | py |
GXN | GXN-main/main.py | import sys
import os
import torch
import random
import datetime
import numpy as np
from tqdm import tqdm
import torch.nn as nn
import torch.optim as optim
import math
from network import GXN
from mlp_dropout import MLPClassifier
from sklearn import metrics
from util import cmd_args, load_data, sep_data
sys.path.append('%s/pytorch_structure2vec-master/s2v_lib' % os.path.dirname(os.path.realpath(__file__)))
class Classifier(nn.Module):
def __init__(self):
super(Classifier, self).__init__()
model = GXN
print("latent dim is ", cmd_args.latent_dim)
self.s2v = model(latent_dim=cmd_args.latent_dim,
output_dim=cmd_args.out_dim,
num_node_feats=cmd_args.feat_dim+cmd_args.attr_dim,
num_edge_feats=0,
k=cmd_args.sortpooling_k,
ks=[cmd_args.k1, cmd_args.k2],
cross_weight=cmd_args.cross_weight,
fuse_weight=cmd_args.fuse_weight,
R=cmd_args.Rhop)
print("num_node_feats: ", cmd_args.feat_dim+cmd_args.attr_dim)
out_dim = cmd_args.out_dim
if out_dim == 0:
out_dim = self.s2v.dense_dim
self.mlp = MLPClassifier(input_size=out_dim,
hidden_size=cmd_args.hidden,
num_class=cmd_args.num_class,
with_dropout=cmd_args.dropout)
def PrepareFeatureLabel(self, batch_graph):
labels = torch.LongTensor(len(batch_graph))
n_nodes = 0
if batch_graph[0].node_tags is not None:
node_tag_flag = True
concat_tag = []
else:
node_tag_flag = False
if batch_graph[0].node_features is not None:
node_feat_flag = True
concat_feat = []
else:
node_feat_flag = False
for i in range(len(batch_graph)):
labels[i] = batch_graph[i].label
n_nodes += batch_graph[i].num_nodes
if node_tag_flag:
concat_tag += batch_graph[i].node_tags
if node_feat_flag:
tmp = batch_graph[i].node_features.type('torch.FloatTensor')
concat_feat.append(tmp)
if node_tag_flag:
concat_tag = torch.LongTensor(concat_tag).view(-1, 1)
node_tag = torch.zeros(n_nodes, cmd_args.feat_dim)
node_tag.scatter_(1, concat_tag, 1)
if node_feat_flag:
node_feat = torch.cat(concat_feat, 0)
if node_feat_flag and node_tag_flag:
node_feat = torch.cat([node_tag.type_as(node_feat), node_feat], 1)
elif node_feat_flag is False and node_tag_flag:
node_feat = node_tag
elif node_feat_flag and node_tag_flag is False:
pass
else:
node_feat = torch.ones(n_nodes, 1)
node_feat = node_feat.to(device)
labels = labels.to(device)
return node_feat, labels
def forward(self, batch_graph, device=torch.device('cpu')):
node_feat, labels = self.PrepareFeatureLabel(batch_graph) # node_feat的尺寸是 [N, D] (DD: n*82)
N, D = node_feat.shape
labels = labels.to(device)
embed, ret_s1, ret_s2 = self.s2v(batch_graph, node_feat, None)
lbl_t_s1 = torch.ones(N)
lbl_f_s1 = torch.zeros(N)
lbl_t_s2 = torch.ones(ret_s2.shape[0]//2)
lbl_f_s2 = torch.zeros(ret_s2.shape[0]//2)
milbl_s1 = torch.cat((lbl_t_s1, lbl_f_s1), 0).to(device)
milbl_s2 = torch.cat((lbl_t_s2, lbl_f_s2), 0).to(device)
logits, cls_loss, acc = self.mlp(embed, labels)
return logits, cls_loss, acc, ret_s1, milbl_s1, ret_s2, milbl_s2
def loop_dataset(g_list, classifier, mi_loss, sample_idxes, epoch, optimizer=None,
bsize=cmd_args.batch_size, device=torch.device('cpu')):
total_loss = []
total_iters = (len(sample_idxes) + (bsize - 1) * (optimizer is None)) // bsize
pbar = tqdm(range(total_iters), unit='batch')
all_targets = []
all_scores = []
n_samples = 0
for pos in pbar:
selected_idx = sample_idxes[pos * bsize: (pos + 1) * bsize]
batch_graph = [g_list[idx] for idx in selected_idx]
targets = [g_list[idx].label for idx in selected_idx]
all_targets += targets
logits, cls_loss, acc, ret_s1, milbl_s1, ret_s2, milbl_s2 = classifier(batch_graph, device)
all_scores.append(logits[:, 1].detach()) # for binary classification
miloss_s1 = mi_loss[0](ret_s1, milbl_s1)
miloss_s2 = mi_loss[1](ret_s2, milbl_s2)
miloss = (miloss_s1 + miloss_s2)/2
loss = cls_loss + miloss*(2-epoch/cmd_args.num_epochs)
if optimizer is not None:
optimizer.zero_grad()
loss.backward()
optimizer.step()
cls_loss = cls_loss.data.cpu().numpy()
miloss = miloss.data.cpu().numpy()
loss = loss.data.cpu().numpy()
pbar.set_description('cls_loss: %0.5f miloss: %0.5f loss: %0.5f acc: %0.5f' % (cls_loss, miloss, loss, acc))
total_loss.append(np.array([cls_loss, miloss, loss, acc]) * len(selected_idx))
n_samples += len(selected_idx)
# ------------------------------------------------------------------------------------------------------------------
if optimizer is None:
print(acc)
# ------------------------------------------------------------------------------------------------------------------
if optimizer is None:
assert n_samples == len(sample_idxes)
total_loss = np.array(total_loss)
avg_loss = np.sum(total_loss, 0) / n_samples
all_scores = torch.cat(all_scores).cpu().numpy()
all_targets = np.array(all_targets)
fpr, tpr, _ = metrics.roc_curve(all_targets, all_scores, pos_label=1)
auc = metrics.auc(fpr, tpr)
avg_loss = np.concatenate((avg_loss, [auc]))
return avg_loss
def count_parameters(model):
total_param = 0
for name, param in model.named_parameters():
if param.requires_grad:
num_param = np.prod(param.size())
if param.dim() > 1:
print(name, ':', 'x'.join(str(x) for x in list(param.size())), '=', num_param)
else:
print(name, ':', num_param)
total_param += num_param
return total_param
def set_randomseed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
def model_run(cmd_args, g_list, device, foldidx, first_timstr):
train_graphs, test_graphs = sep_data(cmd_args.data_root, g_list, foldidx)
if cmd_args.sortpooling_k <= 1:
num_nodes_list = sorted([g.num_nodes for g in train_graphs + test_graphs])
cmd_args.sortpooling_k = num_nodes_list[int(math.ceil(cmd_args.sortpooling_k * len(num_nodes_list))) - 1]
cmd_args.sortpooling_k = max(10, cmd_args.sortpooling_k)
print('k used in SortPooling is: ' + str(cmd_args.sortpooling_k))
classifier = Classifier().to(device)
print("Number of Model Parameters: ", count_parameters(classifier))
optimizer = optim.Adam(classifier.parameters(),
lr=cmd_args.learning_rate,
amsgrad=True,
weight_decay=0.001)
train_idxes = list(range(len(train_graphs)))
best_loss = None
max_acc = 0.0
mi_loss = [nn.BCEWithLogitsLoss(), nn.BCEWithLogitsLoss()]
timstr = datetime.datetime.now().strftime("%m%d-%H%M%S")
logfile = './log_%s/log_%s/testlog_%s_%s.txt' % (cmd_args.data, first_timstr, cmd_args.data, timstr)
if not os.path.exists('./log_%s/log_%s' % (cmd_args.data, first_timstr)):
os.makedirs('./log_%s/log_%s' % (cmd_args.data, first_timstr))
if not os.path.exists('./result_%s/result_%s' % (cmd_args.data, first_timstr)):
os.makedirs('./result_%s/result_%s' % (cmd_args.data, first_timstr))
with open('./result_%s/result_%s/acc_result_%s_%s.txt' % (cmd_args.data, first_timstr, cmd_args.data, first_timstr), 'a+') as f:
f.write(str(cmd_args) + '\n')
if not os.path.exists('./checkpoint_%s/time_%s/FOLD%s' % (cmd_args.data, first_timstr, foldidx)):
os.makedirs('./checkpoint_%s/time_%s/FOLD%s' % (cmd_args.data, first_timstr, foldidx))
if cmd_args.weight is not None:
classifier.load_state_dict(torch.load(cmd_args.weight))
classifier.eval()
test_loss = loop_dataset(test_graphs, classifier, mi_loss, list(range(len(test_graphs))), epoch=0, device=device)
with open(logfile, 'a+') as log:
log.write('clsloss: %.5f miloss: %.5f loss %.5f acc %.5f auc %.5f'
% (test_loss[0], test_loss[1], test_loss[2], test_loss[3], test_loss[4]) + '\n')
print('Best Acc:', test_loss[3])
raise ValueError('Stop Testing')
with open(logfile, 'a+') as log:
log.write(str(cmd_args) + '\n')
log.write('Fold index: ' + str(foldidx) + '\n')
for epoch in range(cmd_args.num_epochs):
random.shuffle(train_idxes)
classifier.train()
avg_loss = loop_dataset(train_graphs, classifier, mi_loss, train_idxes, epoch, optimizer=optimizer, device=device)
avg_loss[4] = 0.0
print('\033[92maverage training of epoch %d: clsloss: %.5f miloss: %.5f loss %.5f acc %.5f auc %.5f\033[0m'
% (epoch, avg_loss[0], avg_loss[1], avg_loss[2], avg_loss[3], avg_loss[4])) # noqa
classifier.eval()
test_loss = loop_dataset(test_graphs, classifier, mi_loss, list(range(len(test_graphs))), epoch, device=device)
test_loss[4] = 0.0
print('\033[93maverage test of epoch %d: clsloss: %.5f miloss: %.5f loss %.5f acc %.5f auc %.5f\033[0m'
% (epoch, test_loss[0], test_loss[1], test_loss[2], test_loss[3], test_loss[4])) # noqa
with open(logfile, 'a+') as log:
log.write('test of epoch %d: clsloss: %.5f miloss: %.5f loss %.5f acc %.5f auc %.5f'
% (epoch, test_loss[0], test_loss[1], test_loss[2], test_loss[3], test_loss[4]) + '\n')
if test_loss[3] > max_acc:
max_acc = test_loss[3]
fname = './checkpoint_%s/time_%s/FOLD%s/model_epoch%s.pt' % (cmd_args.data, first_timstr, foldidx, str(epoch))
torch.save(classifier.state_dict(), fname)
with open('./result_%s/result_%s/acc_result_%s_%s.txt' % (cmd_args.data, first_timstr, cmd_args.data, first_timstr), 'a+') as f:
f.write('\n')
f.write('Fold index: ' + str(foldidx) + '\t')
f.write(str(max_acc) + '\n')
if cmd_args.extract_features:
features, labels = classifier.output_features(train_graphs)
labels = labels.type('torch.FloatTensor')
np.savetxt('extracted_features_train.txt', torch.cat([labels.unsqueeze(1), features.cpu()], dim=1).detach().numpy(), '%.4f')
features, labels = classifier.output_features(test_graphs)
labels = labels.type('torch.FloatTensor')
np.savetxt('extracted_features_test.txt', torch.cat([labels.unsqueeze(1), features.cpu()], dim=1).detach().numpy(), '%.4f')
return max_acc
if __name__ == '__main__':
set_randomseed(cmd_args.seed)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
first_timstr = datetime.datetime.now().strftime("%m%d-%H%M%S")
if cmd_args.data in ['DD', 'PROTEINS']:
g_list = load_data(cmd_args.data_root, degree_as_tag=False)
elif cmd_args.data in ['COLLAB', 'IMDBBINARY', 'IMDBMULTI', 'ENZYMES']:
g_list = load_data(cmd_args.data_root, degree_as_tag=True)
else:
raise ValueError('No such dataset')
# print('# train: %d, # test: %d' % (len(train_graphs), len(test_graphs)))
print('# num of classes: ', cmd_args.num_class)
print('Lets start a single-fold validation')
print('start training ------> fold', cmd_args.fold)
model_run(cmd_args, g_list, device, cmd_args.fold, first_timstr)
| 12,080 | 39.676768 | 136 | py |
GXN | GXN-main/network.py | from __future__ import print_function
import os
import ops
import sys
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
sys.path.append('%s/pytorch_structure2vec-master/s2v_lib' % os.path.dirname(os.path.realpath(__file__)))
from s2v_lib import S2VLIB # noqa
from pytorch_util import weights_init, gnn_spmm # noqa
class GXN(nn.Module):
def __init__(self, output_dim, num_node_feats, num_edge_feats,
latent_dim=[32, 32, 32, 1], k=30, ks=[0.9,0.7],
conv1d_channels=[16, 32],
conv1d_kws=[0, 5],
cross_weight=1.0, fuse_weight=1.0, R=1):
print('Initializing GXN')
super(GXN, self).__init__()
self.latent_dim = latent_dim
self.output_dim = output_dim
self.num_node_feats = num_node_feats
self.num_edge_feats = num_edge_feats
self.k = k
self.total_latent_dim = sum(latent_dim)
conv1d_kws[0] = self.total_latent_dim
self.conv_params = nn.ModuleList()
self.conv_params.append(nn.Linear(num_node_feats, latent_dim[0]))
for i in range(1, len(latent_dim)):
self.conv_params.append(nn.Linear(latent_dim[i-1], latent_dim[i]))
self.conv1d_params1 = nn.Conv1d(1, conv1d_channels[0], conv1d_kws[0], conv1d_kws[0])
self.maxpool1d = nn.MaxPool1d(2, 2)
self.conv1d_params2 = nn.Conv1d(conv1d_channels[0], conv1d_channels[1], conv1d_kws[1], 1)
dense_dim = int((k-2)/2+1)
self.dense_dim = (dense_dim-conv1d_kws[1]+1)*conv1d_channels[1]
if num_edge_feats > 0:
self.w_e2l = nn.Linear(num_edge_feats, latent_dim)
if output_dim > 0:
self.out_params = nn.Linear(self.dense_dim, output_dim)
self.ks = ks
self.gxn = ops.GraphCrossnet(ks, num_node_feats, 97, cross_weight=cross_weight, fuse_weight=fuse_weight, R=R)
weights_init(self)
def forward(self, graph_list, node_feat, edge_feat):
device = torch.device(node_feat.device)
graph_sizes = [graph_list[i].num_nodes for i in range(len(graph_list))]
node_degs = [torch.Tensor(graph_list[i].degs)+1 for i in range(len(graph_list))]
node_degs = torch.cat(node_degs).unsqueeze(1)
n2n_sp, e2n_sp, subg_sp = S2VLIB.PrepareMeanField(graph_list)
n2n_sp = n2n_sp.to(device)
e2n_sp = e2n_sp.to(device)
subg_sp = subg_sp.to(device)
node_degs = node_degs.to(device)
node_feat = Variable(node_feat)
if edge_feat is not None:
edge_feat = Variable(edge_feat)
n2n_sp = Variable(n2n_sp)
e2n_sp = Variable(e2n_sp)
subg_sp = Variable(subg_sp)
node_degs = Variable(node_degs)
h = self.sortpooling_embedding(node_feat, edge_feat, n2n_sp, e2n_sp, subg_sp, graph_sizes, node_degs)
return h
def sortpooling_embedding(self, node_feat, edge_feat, n2n_sp, e2n_sp, subg_sp, graph_sizes, node_degs):
device = torch.device(node_feat.device)
''' if exists edge feature, concatenate to node feature vector '''
if edge_feat is not None:
input_edge_linear = self.w_e2l(edge_feat)
e2npool_input = gnn_spmm(e2n_sp, input_edge_linear)
node_feat = torch.cat([node_feat, e2npool_input], 1)
''' graph convolution layers '''
A = ops.spec_normalize_adj(n2n_sp).to(device)
ver = 2
if ver == 2:
cur_message_layer, ret_s1, ret_s2 = self.gxn(A, node_feat)
else:
lv = 0
cur_message_layer = node_feat
cat_message_layers = []
while lv < len(self.latent_dim):
n2npool = gnn_spmm(n2n_sp, cur_message_layer) + cur_message_layer # noqa
node_linear = self.conv_params[lv](n2npool) # Y = Y * W
normalized_linear = node_linear.div(node_degs) # Y = D^-1 * Y
cur_message_layer = F.tanh(normalized_linear)
cat_message_layers.append(cur_message_layer)
lv += 1
cur_message_layer = torch.cat(cat_message_layers, 1)
''' sortpooling layer '''
sort_channel = cur_message_layer[:, -1]
batch_sortpooling_graphs = torch.zeros(len(graph_sizes), self.k, self.total_latent_dim).to(device)
batch_sortpooling_graphs = Variable(batch_sortpooling_graphs)
accum_count = 0
for i in range(subg_sp.size()[0]):
to_sort = sort_channel[accum_count: accum_count + graph_sizes[i]]
k = self.k if self.k <= graph_sizes[i] else graph_sizes[i]
_, topk_indices = to_sort.topk(k)
topk_indices += accum_count
sortpooling_graph = cur_message_layer.index_select(0, topk_indices)
if k < self.k:
to_pad = torch.zeros(self.k-k, self.total_latent_dim).to(device)
to_pad = Variable(to_pad)
sortpooling_graph = torch.cat((sortpooling_graph, to_pad), 0)
batch_sortpooling_graphs[i] = sortpooling_graph
accum_count += graph_sizes[i]
''' traditional 1d convlution and dense layers '''
to_conv1d = batch_sortpooling_graphs.view((-1, 1, self.k * self.total_latent_dim))
conv1d_res = self.conv1d_params1(to_conv1d)
conv1d_res = F.relu(conv1d_res)
conv1d_res = self.maxpool1d(conv1d_res)
conv1d_res = self.conv1d_params2(conv1d_res)
conv1d_res = F.relu(conv1d_res)
to_dense = conv1d_res.view(len(graph_sizes), -1)
if self.output_dim > 0:
out_linear = self.out_params(to_dense)
reluact_fp = F.relu(out_linear)
else:
reluact_fp = to_dense
return F.relu(reluact_fp), ret_s1, ret_s2 | 5,853 | 39.652778 | 117 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.