| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | #include "convolution1d_arm.h" |
| |
|
| | #if __ARM_NEON |
| | #include <arm_neon.h> |
| | #endif |
| |
|
| | #include "arm_activation.h" |
| | #include "arm_usability.h" |
| |
|
| | #include "cpu.h" |
| |
|
| | namespace ncnn { |
| |
|
| | #if __ARM_FEATURE_FP16_VECTOR_ARITHMETIC |
| | #include "convolution1d_packed_fp16s.h" |
| |
|
| | int Convolution1D_arm::create_pipeline_fp16s(const Option& opt) |
| | { |
| | const int num_input = weight_data_size / kernel_w / num_output; |
| |
|
| | convolution1d_transform_kernel_packed_fp16s(weight_data, weight_data_tm, num_input, num_output, kernel_w); |
| |
|
| | ncnn::cast_float32_to_float16(bias_data, bias_data_fp16, opt); |
| |
|
| | return 0; |
| | } |
| |
|
| | int Convolution1D_arm::forward_fp16s(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const |
| | { |
| | int w = bottom_blob.w; |
| | size_t elemsize = bottom_blob.elemsize; |
| | int elempack = bottom_blob.elempack; |
| |
|
| | const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1; |
| |
|
| | Mat bottom_blob_bordered; |
| | make_padding(bottom_blob, bottom_blob_bordered, opt); |
| | if (bottom_blob_bordered.empty()) |
| | return -100; |
| |
|
| | w = bottom_blob_bordered.w; |
| |
|
| | int out_elempack = (opt.use_packing_layout && num_output % 4 == 0) ? 4 : 1; |
| | size_t out_elemsize = elemsize / elempack * out_elempack; |
| |
|
| | const int outw = (w - kernel_extent_w) / stride_w + 1; |
| | const int outh = num_output / out_elempack; |
| |
|
| | top_blob.create(outw, outh, out_elemsize, out_elempack, opt.blob_allocator); |
| | if (top_blob.empty()) |
| | return -100; |
| |
|
| | convolution1d_packed_fp16s(bottom_blob_bordered, top_blob, weight_data_tm, bias_data, kernel_w, dilation_w, stride_w, activation_type, activation_params, opt); |
| |
|
| | return 0; |
| | } |
| |
|
| | int Convolution1D_arm::forward_fp16sa(const Mat& bottom_blob, Mat& top_blob, const Option& opt) const |
| | { |
| | int w = bottom_blob.w; |
| | size_t elemsize = bottom_blob.elemsize; |
| | int elempack = bottom_blob.elempack; |
| |
|
| | const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1; |
| |
|
| | Mat bottom_blob_bordered; |
| | make_padding(bottom_blob, bottom_blob_bordered, opt); |
| | if (bottom_blob_bordered.empty()) |
| | return -100; |
| |
|
| | w = bottom_blob_bordered.w; |
| |
|
| | int out_elempack = 1; |
| | if (opt.use_packing_layout) |
| | { |
| | out_elempack = opt.use_fp16_arithmetic && num_output % 8 == 0 ? 8 : num_output % 4 == 0 ? 4 : 1; |
| | } |
| | size_t out_elemsize = elemsize / elempack * out_elempack; |
| |
|
| | const int outw = (w - kernel_extent_w) / stride_w + 1; |
| | const int outh = num_output / out_elempack; |
| |
|
| | top_blob.create(outw, outh, out_elemsize, out_elempack, opt.blob_allocator); |
| | if (top_blob.empty()) |
| | return -100; |
| |
|
| | convolution1d_packed_fp16sa(bottom_blob_bordered, top_blob, weight_data_tm, bias_data_fp16, kernel_w, dilation_w, stride_w, activation_type, activation_params, opt); |
| |
|
| | return 0; |
| | } |
| | #endif |
| |
|
| | } |
| |
|