text
stringlengths 0
2.2M
|
|---|
BufHandle ConvResultBuf("ConvResult", {1, 16, 32, 32}, kFloat);
|
BufHandle MatmulResultBuf("MatmulResult", {1, 16, 32, 32}, kFloat);
|
Tensor Input = Compute(
|
"Input",
|
{1, 16, 32, 32},
|
[&](const VarHandle& n,
|
const VarHandle& c,
|
const VarHandle& h,
|
const VarHandle& w) { return FloatImm::make(5.0f); });
|
Tensor Weight = Compute(
|
"Weight",
|
{16, 16, 1, 1},
|
[&](const VarHandle& n,
|
const VarHandle& c,
|
const VarHandle& h,
|
const VarHandle& w) { return FloatImm::make(6.0f); });
|
Tensor ConvResult = Tensor(
|
ConvResultBuf.node(),
|
ExternalCall::make(
|
ConvResultBuf,
|
"nnc_aten_conv2d",
|
{BufHandle(Input.buf()), BufHandle(Weight.buf())},
|
{}));
|
Tensor MatmulResult = Tensor(
|
MatmulResultBuf.node(),
|
ExternalCall::make(
|
MatmulResultBuf,
|
"nnc_aten_matmul",
|
{BufHandle(ConvResult.buf()), BufHandle(ConvResult.buf())},
|
{}));
|
Tensor Result = Compute(
|
"Result",
|
{1, 16, 32, 32},
|
[&](const VarHandle& n,
|
const VarHandle& c,
|
const VarHandle& h,
|
const VarHandle& w) {
|
return ConvResult.load(n, c, h, w) + MatmulResult.load(n, c, h, w);
|
});
|
LoopNest l({Input, Weight, ConvResult, MatmulResult, Result});
|
// Inlining should not inline anything here since all Bufs are either defined
|
// or used in ExternalCalls - we run it just for testing
|
l.inlineIntermediateBufs(true);
|
l.prepareForCodegen();
|
l.simplify();
|
auto options = at::TensorOptions()
|
.dtype(at::kFloat)
|
.layout(at::kStrided)
|
.device(at::kCPU)
|
.requires_grad(false);
|
at::Tensor input = at::ones({1, 16, 32, 32}, options) * 5.f;
|
at::Tensor weight = at::ones({16, 16, 1, 1}, options) * 6.f;
|
at::Tensor t = at::conv2d(input, weight);
|
at::Tensor t2 = at::matmul(t, t);
|
at::Tensor ref = t + t2;
|
at::Tensor nnc_result;
|
std::vector<float> input_buf(1 * 16 * 32 * 32, 5.f);
|
std::vector<float> weight_buf(16 * 16 * 1 * 1, 6.f);
|
std::vector<float> conv_result_buf(1 * 16 * 32 * 32, -1.f);
|
std::vector<float> matmul_result_buf(1 * 16 * 32 * 32, -1.f);
|
std::vector<float> result_buf(1 * 16 * 32 * 32, -1.f);
|
#ifdef TORCH_ENABLE_LLVM
|
LLVMCodeGen llvm_codegen(
|
l.root_stmt(), {Input, Weight, ConvResult, MatmulResult, Result});
|
llvm_codegen.call(
|
{input_buf, weight_buf, conv_result_buf, matmul_result_buf, result_buf});
|
nnc_result = at::from_blob(result_buf.data(), {1, 16, 32, 32}, options);
|
ASSERT_TRUE(at::allclose(nnc_result, ref));
|
#endif
|
SimpleIREvaluator ir_eval(
|
l.root_stmt(), {Input, Weight, ConvResult, MatmulResult, Result});
|
ir_eval.call(
|
{input_buf, weight_buf, conv_result_buf, matmul_result_buf, result_buf});
|
nnc_result = at::from_blob(result_buf.data(), {1, 16, 32, 32}, options);
|
ASSERT_TRUE(at::allclose(nnc_result, ref));
|
}
|
TEST(ExternalCall, Inlining) {
|
// This test verifies that Tensors using external calls can be used by and
|
// can use Tensors built with Compute API.
|
BufHandle MatmulResultBuf("MatmulResult", {8, 8}, kFloat);
|
Tensor A = Compute("A", {8, 8}, [&](const VarHandle& i, const VarHandle& j) {
|
return FloatImm::make(5.0f);
|
});
|
Tensor B = Compute("B", {8, 8}, [&](const VarHandle& i, const VarHandle& j) {
|
return FloatImm::make(4.0f);
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.