ncnn / src /layer /arm /convolution1d_arm_asimdhp.cpp
camenduru's picture
thanks to ncnn ❤
be903e2
// Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#include "convolution1d_arm.h"
#if __ARM_NEON
#include <arm_neon.h>
#endif // __ARM_NEON
#include "arm_activation.h"
#include "arm_usability.h"
#include "cpu.h"
namespace ncnn {
#if __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
#include "convolution1d_packed_fp16s.h"
int Convolution1D_arm::create_pipeline_fp16s(const Option& opt)
{
const int num_input = weight_data_size / kernel_w / num_output;
convolution1d_transform_kernel_packed_fp16s(weight_data, weight_data_tm, num_input, num_output, kernel_w);
ncnn::cast_float32_to_float16(bias_data, bias_data_fp16, opt);
return 0;
}
int Convolution1D_arm::forward_fp16s(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
{
int w = bottom_blob.w;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1;
Mat bottom_blob_bordered;
make_padding(bottom_blob, bottom_blob_bordered, opt);
if (bottom_blob_bordered.empty())
return -100;
w = bottom_blob_bordered.w;
int out_elempack = (opt.use_packing_layout && num_output % 4 == 0) ? 4 : 1;
size_t out_elemsize = elemsize / elempack * out_elempack;
const int outw = (w - kernel_extent_w) / stride_w + 1;
const int outh = num_output / out_elempack;
top_blob.create(outw, outh, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
convolution1d_packed_fp16s(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, kernel_w, dilation_w, stride_w, activation_type, activation_params, opt);
return 0;
}
int Convolution1D_arm::forward_fp16sa(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const
{
int w = bottom_blob.w;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1;
Mat bottom_blob_bordered;
make_padding(bottom_blob, bottom_blob_bordered, opt);
if (bottom_blob_bordered.empty())
return -100;
w = bottom_blob_bordered.w;
int out_elempack = 1;
if (opt.use_packing_layout)
{
out_elempack = opt.use_fp16_arithmetic && num_output % 8 == 0 ? 8 : num_output % 4 == 0 ? 4 : 1;
}
size_t out_elemsize = elemsize / elempack * out_elempack;
const int outw = (w - kernel_extent_w) / stride_w + 1;
const int outh = num_output / out_elempack;
top_blob.create(outw, outh, out_elemsize, out_elempack, opt.blob_allocator);
if (top_blob.empty())
return -100;
convolution1d_packed_fp16sa(bottom_blob_bordered, top_blob, weight_data_tm, bias_data_fp16, kernel_w, dilation_w, stride_w, activation_type, activation_params, opt);
return 0;
}
#endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
} // namespace ncnn