Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/aarch64/acl_gemm_convolution.hpp +156 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/binary_injector_utils.hpp +75 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/cpu_batch_normalization_pd.hpp +41 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/cpu_batch_normalization_utils.hpp +44 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/cpu_binary_pd.hpp +39 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/cpu_concat_pd.hpp +42 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/cpu_convolution_pd.hpp +83 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/cpu_deconvolution_pd.hpp +48 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/cpu_eltwise_pd.hpp +46 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/cpu_engine.hpp +183 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/cpu_group_normalization_pd.hpp +39 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/cpu_inner_product_pd.hpp +323 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/cpu_layer_normalization_pd.hpp +41 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/cpu_lrn_pd.hpp +43 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/cpu_memory_storage.hpp +108 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/cpu_pooling_pd.hpp +41 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/cpu_prelu_pd.hpp +43 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/cpu_primitive.hpp +133 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/cpu_reduction_pd.hpp +34 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/cpu_resampling_pd.hpp +41 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/cpu_shuffle_pd.hpp +42 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/cpu_softmax_pd.hpp +46 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/cpu_stream.hpp +64 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/cpu_sum_pd.hpp +40 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/dw_convolution_utils.hpp +131 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/gemm_convolution.hpp +229 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/gemm_convolution_utils.hpp +139 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/gemm_inner_product.hpp +200 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/gemm_inner_product_utils.hpp +115 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/gemm_x8s8s32x_conv_zp_src_pad_comp.hpp +42 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/gemm_x8s8s32x_convolution.hpp +181 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/gemm_x8s8s32x_convolution_utils.hpp +62 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/gemm_x8s8s32x_inner_product.hpp +127 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/nchw_pooling.hpp +201 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/ncsp_batch_normalization.hpp +211 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/ncsp_group_normalization.hpp +106 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/nhwc_pooling.hpp +204 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/nspc_batch_normalization.hpp +206 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/platform.hpp +197 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/primitive_attr_postops.hpp +96 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/ref_batch_normalization.hpp +141 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/ref_binary.hpp +94 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/ref_concat.hpp +189 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/ref_convolution.hpp +203 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/ref_convolution_int8.hpp +163 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/ref_convolution_utils.hpp +67 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/ref_deconvolution.hpp +554 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/ref_eltwise.hpp +181 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/ref_group_normalization.hpp +143 -0
- videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/ref_inner_product.hpp +175 -0
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/aarch64/acl_gemm_convolution.hpp
ADDED
|
@@ -0,0 +1,156 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*******************************************************************************
|
| 2 |
+
* Copyright 2020-2023 Arm Ltd. and affiliates
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*******************************************************************************/
|
| 16 |
+
|
| 17 |
+
#ifndef CPU_AARCH64_ACL_GEMM_CONVOLUTION_HPP
|
| 18 |
+
#define CPU_AARCH64_ACL_GEMM_CONVOLUTION_HPP
|
| 19 |
+
|
| 20 |
+
#include "cpu/cpu_convolution_pd.hpp"
|
| 21 |
+
|
| 22 |
+
#include "cpu/aarch64/acl_convolution_utils.hpp"
|
| 23 |
+
#include "cpu/aarch64/acl_post_ops.hpp"
|
| 24 |
+
|
| 25 |
+
namespace dnnl {
|
| 26 |
+
namespace impl {
|
| 27 |
+
namespace cpu {
|
| 28 |
+
namespace aarch64 {
|
| 29 |
+
|
| 30 |
+
struct acl_resource_t : public resource_t {
|
| 31 |
+
acl_resource_t()
|
| 32 |
+
: acl_obj_(utils::make_unique<
|
| 33 |
+
acl_obj_t<arm_compute::NEGEMMConvolutionLayer>>()) {}
|
| 34 |
+
|
| 35 |
+
status_t configure(const acl_conv_conf_t &acp) {
|
| 36 |
+
if (!acl_obj_) return status::out_of_memory;
|
| 37 |
+
|
| 38 |
+
// Init Compute Library tensors based on info from descriptor
|
| 39 |
+
acl_obj_->src_tensor.allocator()->init(acp.src_tensor_info);
|
| 40 |
+
acl_obj_->wei_tensor.allocator()->init(acp.wei_tensor_info);
|
| 41 |
+
acl_obj_->dst_tensor.allocator()->init(acp.dst_tensor_info);
|
| 42 |
+
acl_obj_->bia_tensor.allocator()->init(acp.bia_tensor_info);
|
| 43 |
+
|
| 44 |
+
acl_obj_->conv.configure(&acl_obj_->src_tensor, &acl_obj_->wei_tensor,
|
| 45 |
+
acp.with_bias ? &acl_obj_->bia_tensor : nullptr,
|
| 46 |
+
&acl_obj_->dst_tensor, acp.padstride_info, acp.weights_info,
|
| 47 |
+
acp.dilation_info, acp.act_info, acp.fast_math);
|
| 48 |
+
return status::success;
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
acl_obj_t<arm_compute::NEGEMMConvolutionLayer> &get_acl_obj() const {
|
| 52 |
+
return *acl_obj_;
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
DNNL_DISALLOW_COPY_AND_ASSIGN(acl_resource_t);
|
| 56 |
+
|
| 57 |
+
private:
|
| 58 |
+
std::unique_ptr<acl_obj_t<arm_compute::NEGEMMConvolutionLayer>> acl_obj_;
|
| 59 |
+
|
| 60 |
+
}; // acl_resource_t
|
| 61 |
+
|
| 62 |
+
template <data_type_t src_type, data_type_t wei_type = src_type,
|
| 63 |
+
data_type_t dst_type = src_type, data_type_t bia_type = dst_type>
|
| 64 |
+
struct acl_gemm_convolution_fwd_t : public primitive_t {
|
| 65 |
+
struct pd_t : public cpu_convolution_fwd_pd_t {
|
| 66 |
+
pd_t(const convolution_desc_t *adesc, const primitive_attr_t *attr,
|
| 67 |
+
const typename pd_t::base_class *hint_fwd_pd)
|
| 68 |
+
: cpu_convolution_fwd_pd_t(adesc, attr, hint_fwd_pd), acp_() {}
|
| 69 |
+
|
| 70 |
+
DECLARE_COMMON_PD_T(
|
| 71 |
+
"gemm:acl", acl_gemm_convolution_fwd_t, USE_GLOBAL_SCRATCHPAD);
|
| 72 |
+
|
| 73 |
+
status_t init(engine_t *engine) {
|
| 74 |
+
using namespace data_type;
|
| 75 |
+
using smask_t = primitive_attr_t::skip_mask_t;
|
| 76 |
+
|
| 77 |
+
bool ok = is_fwd()
|
| 78 |
+
&& set_default_alg_kind(alg_kind::convolution_direct)
|
| 79 |
+
&& expect_data_types(
|
| 80 |
+
src_type, wei_type, bia_type, dst_type, undef)
|
| 81 |
+
&& !has_zero_dim_memory()
|
| 82 |
+
&& attr()->has_default_values(smask_t::post_ops, dst_type)
|
| 83 |
+
&& output_scales_mask_ok() && zero_points_ok();
|
| 84 |
+
if (!ok) return status::unimplemented;
|
| 85 |
+
|
| 86 |
+
CHECK(acl_convolution_utils::init_conf_gemm(acp_, src_md_,
|
| 87 |
+
weights_md_, dst_md_, bias_md_, *desc(), *attr()));
|
| 88 |
+
|
| 89 |
+
CHECK(post_ops.init(
|
| 90 |
+
engine, attr_.post_ops_, dst_md_, acp_.act_info));
|
| 91 |
+
acp_.use_dst_acc = post_ops.has_sum();
|
| 92 |
+
|
| 93 |
+
return status::success;
|
| 94 |
+
}
|
| 95 |
+
|
| 96 |
+
acl_conv_conf_t acp_;
|
| 97 |
+
|
| 98 |
+
acl_post_ops_t post_ops;
|
| 99 |
+
|
| 100 |
+
protected:
|
| 101 |
+
bool output_scales_mask_ok() const {
|
| 102 |
+
using namespace data_type;
|
| 103 |
+
const auto &mask = attr()->output_scales_.mask_;
|
| 104 |
+
return IMPLICATION(!utils::one_of(src_type, s8, u8),
|
| 105 |
+
attr()->output_scales_.has_default_values())
|
| 106 |
+
// TODO: add support for per_channel quantization
|
| 107 |
+
&& mask == 0;
|
| 108 |
+
}
|
| 109 |
+
|
| 110 |
+
bool zero_points_ok() const {
|
| 111 |
+
using namespace data_type;
|
| 112 |
+
// TODO: add support for asymmetric quantization
|
| 113 |
+
return attr()->zero_points_.has_default_values();
|
| 114 |
+
}
|
| 115 |
+
};
|
| 116 |
+
|
| 117 |
+
acl_gemm_convolution_fwd_t(const pd_t *apd) : primitive_t(apd) {}
|
| 118 |
+
|
| 119 |
+
status_t create_resource(
|
| 120 |
+
engine_t *engine, resource_mapper_t &mapper) const override {
|
| 121 |
+
if (mapper.has_resource(this)) return status::success;
|
| 122 |
+
|
| 123 |
+
auto r = utils::make_unique<acl_resource_t>();
|
| 124 |
+
if (!r) return status::out_of_memory;
|
| 125 |
+
|
| 126 |
+
// Configure the resource based on information from primitive descriptor
|
| 127 |
+
CHECK(r->configure(pd()->acp_));
|
| 128 |
+
mapper.add(this, std::move(r));
|
| 129 |
+
|
| 130 |
+
CHECK(pd()->post_ops.create_resource(engine, mapper));
|
| 131 |
+
|
| 132 |
+
return status::success;
|
| 133 |
+
}
|
| 134 |
+
|
| 135 |
+
typedef typename prec_traits<src_type>::type src_data_t;
|
| 136 |
+
typedef typename prec_traits<wei_type>::type wei_data_t;
|
| 137 |
+
typedef typename prec_traits<dst_type>::type dst_data_t;
|
| 138 |
+
typedef typename prec_traits<bia_type>::type bia_data_t;
|
| 139 |
+
|
| 140 |
+
status_t execute(const exec_ctx_t &ctx) const override {
|
| 141 |
+
return execute_forward(ctx);
|
| 142 |
+
}
|
| 143 |
+
|
| 144 |
+
private:
|
| 145 |
+
// To guard the const execute_forward(), the mutex must be 'mutable'
|
| 146 |
+
mutable std::mutex mtx;
|
| 147 |
+
status_t execute_forward(const exec_ctx_t &ctx) const;
|
| 148 |
+
const pd_t *pd() const { return (const pd_t *)primitive_t::pd().get(); }
|
| 149 |
+
}; // acl_gemm_convolution_fwd_t
|
| 150 |
+
|
| 151 |
+
} // namespace aarch64
|
| 152 |
+
} // namespace cpu
|
| 153 |
+
} // namespace impl
|
| 154 |
+
} // namespace dnnl
|
| 155 |
+
|
| 156 |
+
#endif
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/binary_injector_utils.hpp
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*******************************************************************************
|
| 2 |
+
* Copyright 2020-2023 Intel Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*******************************************************************************/
|
| 16 |
+
|
| 17 |
+
#ifndef CPU_BINARY_INJECTOR_UTILS_HPP
|
| 18 |
+
#define CPU_BINARY_INJECTOR_UTILS_HPP
|
| 19 |
+
|
| 20 |
+
#include <tuple>
|
| 21 |
+
#include <vector>
|
| 22 |
+
|
| 23 |
+
#include "common/broadcast_strategy.hpp"
|
| 24 |
+
#include "common/c_types_map.hpp"
|
| 25 |
+
#include "common/primitive_attr.hpp"
|
| 26 |
+
#include "common/primitive_exec_types.hpp"
|
| 27 |
+
|
| 28 |
+
namespace dnnl {
|
| 29 |
+
namespace impl {
|
| 30 |
+
namespace cpu {
|
| 31 |
+
namespace binary_injector_utils {
|
| 32 |
+
/*
|
| 33 |
+
* Extracts pointers to tensors passed by user as binary postops rhs (right-hand-side)
|
| 34 |
+
* arguments (arg1 from binary postop) from execution context. Those pointers are placed
|
| 35 |
+
* in vector in order of binary post-op appearance inside post_ops_t structure. Returned vector
|
| 36 |
+
* usually is passed to kernel during execution phase in runtime params.
|
| 37 |
+
* @param first_arg_idx_offset - offset for indexation of binary postop arguments
|
| 38 |
+
* (used for fusions with dw convolutions)
|
| 39 |
+
*/
|
| 40 |
+
std::vector<const void *> prepare_binary_args(const post_ops_t &post_ops,
|
| 41 |
+
const dnnl::impl::exec_ctx_t &ctx,
|
| 42 |
+
const unsigned first_arg_idx_offset = 0);
|
| 43 |
+
|
| 44 |
+
bool bcast_strategy_present(
|
| 45 |
+
const std::vector<broadcasting_strategy_t> &post_ops_bcasts,
|
| 46 |
+
const broadcasting_strategy_t bcast_strategy);
|
| 47 |
+
|
| 48 |
+
std::vector<broadcasting_strategy_t> extract_bcast_strategies(
|
| 49 |
+
const std::vector<dnnl_post_ops::entry_t> &post_ops,
|
| 50 |
+
const memory_desc_wrapper &dst_md);
|
| 51 |
+
|
| 52 |
+
memory_desc_t get_src1_desc(
|
| 53 |
+
const post_ops_t::entry_t &post_op, const memory_desc_wrapper &dst_d);
|
| 54 |
+
|
| 55 |
+
/*
|
| 56 |
+
* Returns a tuple of bools, which size is equal to number of bcast
|
| 57 |
+
* strategies passed in. Values at consecutive positions indicate existence of
|
| 58 |
+
* binary postop with a particular bcast strategy in post_ops vector.
|
| 59 |
+
*/
|
| 60 |
+
template <typename... Str>
|
| 61 |
+
auto bcast_strategies_present_tup(
|
| 62 |
+
const std::vector<dnnl_post_ops::entry_t> &post_ops,
|
| 63 |
+
const memory_desc_wrapper &dst_md, Str... bcast_strategies)
|
| 64 |
+
-> decltype(std::make_tuple((bcast_strategies, false)...)) {
|
| 65 |
+
const auto post_ops_bcasts = extract_bcast_strategies(post_ops, dst_md);
|
| 66 |
+
return std::make_tuple(
|
| 67 |
+
bcast_strategy_present(post_ops_bcasts, bcast_strategies)...);
|
| 68 |
+
}
|
| 69 |
+
|
| 70 |
+
} // namespace binary_injector_utils
|
| 71 |
+
} // namespace cpu
|
| 72 |
+
} // namespace impl
|
| 73 |
+
} // namespace dnnl
|
| 74 |
+
|
| 75 |
+
#endif
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/cpu_batch_normalization_pd.hpp
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*******************************************************************************
|
| 2 |
+
* Copyright 2016-2020 Intel Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*******************************************************************************/
|
| 16 |
+
|
| 17 |
+
#ifndef CPU_CPU_BATCH_NORMALIZATION_PD_HPP
|
| 18 |
+
#define CPU_CPU_BATCH_NORMALIZATION_PD_HPP
|
| 19 |
+
|
| 20 |
+
#include "common/batch_normalization_pd.hpp"
|
| 21 |
+
#include "cpu/cpu_engine.hpp"
|
| 22 |
+
|
| 23 |
+
namespace dnnl {
|
| 24 |
+
namespace impl {
|
| 25 |
+
namespace cpu {
|
| 26 |
+
|
| 27 |
+
struct cpu_batch_normalization_fwd_pd_t : public batch_normalization_fwd_pd_t {
|
| 28 |
+
using batch_normalization_fwd_pd_t::batch_normalization_fwd_pd_t;
|
| 29 |
+
};
|
| 30 |
+
|
| 31 |
+
struct cpu_batch_normalization_bwd_pd_t : public batch_normalization_bwd_pd_t {
|
| 32 |
+
using batch_normalization_bwd_pd_t::batch_normalization_bwd_pd_t;
|
| 33 |
+
};
|
| 34 |
+
|
| 35 |
+
} // namespace cpu
|
| 36 |
+
} // namespace impl
|
| 37 |
+
} // namespace dnnl
|
| 38 |
+
|
| 39 |
+
#endif
|
| 40 |
+
|
| 41 |
+
// vim: et ts=4 sw=4 cindent cino+=l0,\:4,N-s
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/cpu_batch_normalization_utils.hpp
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*******************************************************************************
|
| 2 |
+
* Copyright 2018-2020 Intel Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*******************************************************************************/
|
| 16 |
+
|
| 17 |
+
#ifndef CPU_CPU_BATCH_NORMALIZATION_UTILS_HPP
|
| 18 |
+
#define CPU_CPU_BATCH_NORMALIZATION_UTILS_HPP
|
| 19 |
+
|
| 20 |
+
#include "common/batch_normalization_pd.hpp"
|
| 21 |
+
|
| 22 |
+
namespace dnnl {
|
| 23 |
+
namespace impl {
|
| 24 |
+
namespace cpu {
|
| 25 |
+
namespace bnorm_utils {
|
| 26 |
+
|
| 27 |
+
void cache_balance(size_t working_set_size, dim_t C_blks, dim_t N, int nthr,
|
| 28 |
+
dim_t &C_blks_per_iter, int64_t &iters);
|
| 29 |
+
|
| 30 |
+
bool thread_balance(bool do_blocking, bool spatial_thr_allowed, bool is_nhwc,
|
| 31 |
+
int ithr, int nthr, dim_t N, dim_t C_blks, dim_t SP, int &C_ithr,
|
| 32 |
+
int &C_nthr, dim_t &C_blk_s, dim_t &C_blk_e, int &N_ithr, int &N_nthr,
|
| 33 |
+
dim_t &N_s, dim_t &N_e, int &S_ithr, int &S_nthr, dim_t &S_s,
|
| 34 |
+
dim_t &S_e);
|
| 35 |
+
|
| 36 |
+
bool is_spatial_thr(const batch_normalization_pd_t *bdesc, bool is_nhwc,
|
| 37 |
+
int simd_w, int data_size);
|
| 38 |
+
|
| 39 |
+
} // namespace bnorm_utils
|
| 40 |
+
} // namespace cpu
|
| 41 |
+
} // namespace impl
|
| 42 |
+
} // namespace dnnl
|
| 43 |
+
|
| 44 |
+
#endif
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/cpu_binary_pd.hpp
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*******************************************************************************
|
| 2 |
+
* Copyright 2019-2020 Intel Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*******************************************************************************/
|
| 16 |
+
|
| 17 |
+
#ifndef CPU_CPU_BINARY_PD_HPP
|
| 18 |
+
#define CPU_CPU_BINARY_PD_HPP
|
| 19 |
+
|
| 20 |
+
#include "common/binary_pd.hpp"
|
| 21 |
+
#include "common/c_types_map.hpp"
|
| 22 |
+
#include "common/type_helpers.hpp"
|
| 23 |
+
#include "common/utils.hpp"
|
| 24 |
+
#include "cpu/cpu_engine.hpp"
|
| 25 |
+
|
| 26 |
+
namespace dnnl {
|
| 27 |
+
namespace impl {
|
| 28 |
+
namespace cpu {
|
| 29 |
+
|
| 30 |
+
struct cpu_binary_pd_t : public binary_pd_t {
|
| 31 |
+
using binary_pd_t::binary_pd_t;
|
| 32 |
+
};
|
| 33 |
+
} // namespace cpu
|
| 34 |
+
} // namespace impl
|
| 35 |
+
} // namespace dnnl
|
| 36 |
+
|
| 37 |
+
#endif
|
| 38 |
+
|
| 39 |
+
// vim: et ts=4 sw=4 cindent cino^=l0,\:0,N-s
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/cpu_concat_pd.hpp
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*******************************************************************************
|
| 2 |
+
* Copyright 2016-2020 Intel Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*******************************************************************************/
|
| 16 |
+
|
| 17 |
+
#ifndef CPU_CPU_CONCAT_PD_HPP
|
| 18 |
+
#define CPU_CPU_CONCAT_PD_HPP
|
| 19 |
+
|
| 20 |
+
#include <assert.h>
|
| 21 |
+
|
| 22 |
+
#include "common/c_types_map.hpp"
|
| 23 |
+
#include "common/concat_pd.hpp"
|
| 24 |
+
#include "common/type_helpers.hpp"
|
| 25 |
+
#include "common/utils.hpp"
|
| 26 |
+
#include "cpu/cpu_engine.hpp"
|
| 27 |
+
|
| 28 |
+
namespace dnnl {
|
| 29 |
+
namespace impl {
|
| 30 |
+
namespace cpu {
|
| 31 |
+
|
| 32 |
+
struct cpu_concat_pd_t : public concat_pd_t {
|
| 33 |
+
using concat_pd_t::concat_pd_t;
|
| 34 |
+
};
|
| 35 |
+
|
| 36 |
+
} // namespace cpu
|
| 37 |
+
} // namespace impl
|
| 38 |
+
} // namespace dnnl
|
| 39 |
+
|
| 40 |
+
#endif
|
| 41 |
+
|
| 42 |
+
// vim: et ts=4 sw=4 cindent cino+=l0,\:4,N-s
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/cpu_convolution_pd.hpp
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*******************************************************************************
|
| 2 |
+
* Copyright 2016-2021 Intel Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*******************************************************************************/
|
| 16 |
+
|
| 17 |
+
#ifndef CPU_CPU_CONVOLUTION_PD_HPP
|
| 18 |
+
#define CPU_CPU_CONVOLUTION_PD_HPP
|
| 19 |
+
|
| 20 |
+
#include <assert.h>
|
| 21 |
+
|
| 22 |
+
#include "common/c_types_map.hpp"
|
| 23 |
+
#include "common/convolution_pd.hpp"
|
| 24 |
+
#include "common/type_helpers.hpp"
|
| 25 |
+
#include "common/utils.hpp"
|
| 26 |
+
#include "cpu/cpu_eltwise_pd.hpp"
|
| 27 |
+
#include "cpu/cpu_engine.hpp"
|
| 28 |
+
|
| 29 |
+
namespace dnnl {
|
| 30 |
+
namespace impl {
|
| 31 |
+
namespace cpu {
|
| 32 |
+
|
| 33 |
+
struct cpu_convolution_fwd_pd_t : public convolution_fwd_pd_t {
|
| 34 |
+
using convolution_fwd_pd_t::convolution_fwd_pd_t;
|
| 35 |
+
|
| 36 |
+
bool has_padded_dst() const {
|
| 37 |
+
memory_desc_wrapper dst_d(&dst_md_);
|
| 38 |
+
return OC() != dst_d.padded_dims()[1];
|
| 39 |
+
}
|
| 40 |
+
|
| 41 |
+
bool wants_padded_bias() const {
|
| 42 |
+
if (!with_bias()) return false;
|
| 43 |
+
return has_padded_dst();
|
| 44 |
+
}
|
| 45 |
+
|
| 46 |
+
bool wants_zero_pad_dst() const {
|
| 47 |
+
if (!has_padded_dst()) return false;
|
| 48 |
+
bool is_zero_preserved = true;
|
| 49 |
+
const auto &po = attr()->post_ops_;
|
| 50 |
+
for (int i = 0; i < po.len(); i++) {
|
| 51 |
+
const auto &entry = po.entry_[i];
|
| 52 |
+
if (entry.is_eltwise()) {
|
| 53 |
+
const auto &ee = entry.eltwise;
|
| 54 |
+
is_zero_preserved = is_zero_preserved
|
| 55 |
+
&& cpu_eltwise_fwd_pd_t::eltwise_preserves_zero(
|
| 56 |
+
ee.alg, ee.alpha, ee.beta);
|
| 57 |
+
}
|
| 58 |
+
}
|
| 59 |
+
return !is_zero_preserved;
|
| 60 |
+
}
|
| 61 |
+
};
|
| 62 |
+
|
| 63 |
+
struct cpu_convolution_bwd_data_pd_t : public convolution_bwd_data_pd_t {
|
| 64 |
+
using convolution_bwd_data_pd_t::convolution_bwd_data_pd_t;
|
| 65 |
+
};
|
| 66 |
+
|
| 67 |
+
struct cpu_convolution_bwd_weights_pd_t : public convolution_bwd_weights_pd_t {
|
| 68 |
+
using convolution_bwd_weights_pd_t::convolution_bwd_weights_pd_t;
|
| 69 |
+
|
| 70 |
+
bool wants_padded_bias() const {
|
| 71 |
+
if (!with_bias()) return false;
|
| 72 |
+
memory_desc_wrapper diff_dst_d(&diff_dst_md_);
|
| 73 |
+
return OC() != diff_dst_d.padded_dims()[1];
|
| 74 |
+
}
|
| 75 |
+
};
|
| 76 |
+
|
| 77 |
+
} // namespace cpu
|
| 78 |
+
} // namespace impl
|
| 79 |
+
} // namespace dnnl
|
| 80 |
+
|
| 81 |
+
#endif
|
| 82 |
+
|
| 83 |
+
// vim: et ts=4 sw=4 cindent cino+=l0,\:4,N-s
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/cpu_deconvolution_pd.hpp
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*******************************************************************************
|
| 2 |
+
* Copyright 2018-2020 Intel Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*******************************************************************************/
|
| 16 |
+
|
| 17 |
+
#ifndef CPU_CPU_DECONVOLUTION_PD_HPP
|
| 18 |
+
#define CPU_CPU_DECONVOLUTION_PD_HPP
|
| 19 |
+
|
| 20 |
+
#include <assert.h>
|
| 21 |
+
|
| 22 |
+
#include "common/deconvolution_pd.hpp"
|
| 23 |
+
#include "cpu/cpu_engine.hpp"
|
| 24 |
+
|
| 25 |
+
namespace dnnl {
|
| 26 |
+
namespace impl {
|
| 27 |
+
namespace cpu {
|
| 28 |
+
|
| 29 |
+
struct cpu_deconvolution_fwd_pd_t : public deconvolution_fwd_pd_t {
|
| 30 |
+
using deconvolution_fwd_pd_t::deconvolution_fwd_pd_t;
|
| 31 |
+
};
|
| 32 |
+
|
| 33 |
+
struct cpu_deconvolution_bwd_data_pd_t : public deconvolution_bwd_data_pd_t {
|
| 34 |
+
using deconvolution_bwd_data_pd_t::deconvolution_bwd_data_pd_t;
|
| 35 |
+
};
|
| 36 |
+
|
| 37 |
+
struct cpu_deconvolution_bwd_weights_pd_t
|
| 38 |
+
: public deconvolution_bwd_weights_pd_t {
|
| 39 |
+
using deconvolution_bwd_weights_pd_t::deconvolution_bwd_weights_pd_t;
|
| 40 |
+
};
|
| 41 |
+
|
| 42 |
+
} // namespace cpu
|
| 43 |
+
} // namespace impl
|
| 44 |
+
} // namespace dnnl
|
| 45 |
+
|
| 46 |
+
#endif
|
| 47 |
+
|
| 48 |
+
// vim: et ts=4 sw=4 cindent cino+=l0,\:4,N-s
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/cpu_eltwise_pd.hpp
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*******************************************************************************
|
| 2 |
+
* Copyright 2016-2020 Intel Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*******************************************************************************/
|
| 16 |
+
|
| 17 |
+
#ifndef CPU_CPU_ELTWISE_PD_HPP
|
| 18 |
+
#define CPU_CPU_ELTWISE_PD_HPP
|
| 19 |
+
|
| 20 |
+
#include <assert.h>
|
| 21 |
+
|
| 22 |
+
#include "common/c_types_map.hpp"
|
| 23 |
+
#include "common/eltwise_pd.hpp"
|
| 24 |
+
#include "common/type_helpers.hpp"
|
| 25 |
+
#include "common/utils.hpp"
|
| 26 |
+
#include "cpu/cpu_engine.hpp"
|
| 27 |
+
|
| 28 |
+
namespace dnnl {
|
| 29 |
+
namespace impl {
|
| 30 |
+
namespace cpu {
|
| 31 |
+
|
| 32 |
+
struct cpu_eltwise_fwd_pd_t : public eltwise_fwd_pd_t {
|
| 33 |
+
using eltwise_fwd_pd_t::eltwise_fwd_pd_t;
|
| 34 |
+
};
|
| 35 |
+
|
| 36 |
+
struct cpu_eltwise_bwd_pd_t : public eltwise_bwd_pd_t {
|
| 37 |
+
using eltwise_bwd_pd_t::eltwise_bwd_pd_t;
|
| 38 |
+
};
|
| 39 |
+
|
| 40 |
+
} // namespace cpu
|
| 41 |
+
} // namespace impl
|
| 42 |
+
} // namespace dnnl
|
| 43 |
+
|
| 44 |
+
#endif
|
| 45 |
+
|
| 46 |
+
// vim: et ts=4 sw=4 cindent cino+=l0,\:4,N-s
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/cpu_engine.hpp
ADDED
|
@@ -0,0 +1,183 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*******************************************************************************
|
| 2 |
+
* Copyright 2016-2023 Intel Corporation
|
| 3 |
+
* Copyright 2020-2023 Arm Ltd. and affiliates
|
| 4 |
+
*
|
| 5 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
* you may not use this file except in compliance with the License.
|
| 7 |
+
* You may obtain a copy of the License at
|
| 8 |
+
*
|
| 9 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
*
|
| 11 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
* See the License for the specific language governing permissions and
|
| 15 |
+
* limitations under the License.
|
| 16 |
+
*******************************************************************************/
|
| 17 |
+
|
| 18 |
+
#ifndef CPU_CPU_ENGINE_HPP
|
| 19 |
+
#define CPU_CPU_ENGINE_HPP
|
| 20 |
+
|
| 21 |
+
#include <assert.h>
|
| 22 |
+
|
| 23 |
+
#include "oneapi/dnnl/dnnl.h"
|
| 24 |
+
|
| 25 |
+
#include "common/c_types_map.hpp"
|
| 26 |
+
#include "common/engine.hpp"
|
| 27 |
+
#include "common/engine_id.hpp"
|
| 28 |
+
#include "common/impl_list_item.hpp"
|
| 29 |
+
|
| 30 |
+
#include "cpu/platform.hpp"
|
| 31 |
+
|
| 32 |
+
#if DNNL_AARCH64 && DNNL_AARCH64_USE_ACL
|
| 33 |
+
#include "cpu/aarch64/acl_thread.hpp"
|
| 34 |
+
#endif
|
| 35 |
+
|
| 36 |
+
#define CPU_INSTANCE(...) \
|
| 37 |
+
impl_list_item_t( \
|
| 38 |
+
impl_list_item_t::type_deduction_helper_t<__VA_ARGS__::pd_t>()),
|
| 39 |
+
#define CPU_INSTANCE_X64(...) DNNL_X64_ONLY(CPU_INSTANCE(__VA_ARGS__))
|
| 40 |
+
#define CPU_INSTANCE_SSE41(...) REG_SSE41_ISA(CPU_INSTANCE(__VA_ARGS__))
|
| 41 |
+
#define CPU_INSTANCE_AVX2(...) REG_AVX2_ISA(CPU_INSTANCE(__VA_ARGS__))
|
| 42 |
+
#define CPU_INSTANCE_AVX512(...) REG_AVX512_ISA(CPU_INSTANCE(__VA_ARGS__))
|
| 43 |
+
#define CPU_INSTANCE_AMX(...) REG_AMX_ISA(CPU_INSTANCE(__VA_ARGS__))
|
| 44 |
+
#define CPU_INSTANCE_AARCH64(...) DNNL_AARCH64_ONLY(CPU_INSTANCE(__VA_ARGS__))
|
| 45 |
+
#define CPU_INSTANCE_AARCH64_ACL(...) \
|
| 46 |
+
DNNL_AARCH64_ACL_ONLY(CPU_INSTANCE(__VA_ARGS__))
|
| 47 |
+
#define CPU_INSTANCE_RV64GCV(...) DNNL_RV64GCV_ONLY(CPU_INSTANCE(__VA_ARGS__))
|
| 48 |
+
|
| 49 |
+
namespace dnnl {
|
| 50 |
+
namespace impl {
|
| 51 |
+
namespace cpu {
|
| 52 |
+
|
| 53 |
+
#define DECLARE_IMPL_LIST(kind) \
|
| 54 |
+
const impl_list_item_t *get_##kind##_impl_list(const kind##_desc_t *desc);
|
| 55 |
+
|
| 56 |
+
DECLARE_IMPL_LIST(batch_normalization);
|
| 57 |
+
DECLARE_IMPL_LIST(binary);
|
| 58 |
+
DECLARE_IMPL_LIST(convolution);
|
| 59 |
+
DECLARE_IMPL_LIST(deconvolution);
|
| 60 |
+
DECLARE_IMPL_LIST(eltwise);
|
| 61 |
+
DECLARE_IMPL_LIST(group_normalization);
|
| 62 |
+
DECLARE_IMPL_LIST(inner_product);
|
| 63 |
+
DECLARE_IMPL_LIST(layer_normalization);
|
| 64 |
+
DECLARE_IMPL_LIST(lrn);
|
| 65 |
+
DECLARE_IMPL_LIST(matmul);
|
| 66 |
+
DECLARE_IMPL_LIST(pooling);
|
| 67 |
+
DECLARE_IMPL_LIST(prelu);
|
| 68 |
+
DECLARE_IMPL_LIST(reduction);
|
| 69 |
+
DECLARE_IMPL_LIST(resampling);
|
| 70 |
+
DECLARE_IMPL_LIST(rnn);
|
| 71 |
+
DECLARE_IMPL_LIST(shuffle);
|
| 72 |
+
DECLARE_IMPL_LIST(softmax);
|
| 73 |
+
|
| 74 |
+
#undef DECLARE_IMPL_LIST
|
| 75 |
+
|
| 76 |
+
class cpu_engine_impl_list_t {
|
| 77 |
+
public:
|
| 78 |
+
static const impl_list_item_t *get_concat_implementation_list();
|
| 79 |
+
static const impl_list_item_t *get_reorder_implementation_list(
|
| 80 |
+
const memory_desc_t *src_md, const memory_desc_t *dst_md);
|
| 81 |
+
static const impl_list_item_t *get_sum_implementation_list();
|
| 82 |
+
|
| 83 |
+
static const impl_list_item_t *get_implementation_list(
|
| 84 |
+
const op_desc_t *desc) {
|
| 85 |
+
static const impl_list_item_t empty_list[] = {nullptr};
|
| 86 |
+
|
| 87 |
+
// clang-format off
|
| 88 |
+
#define CASE(kind) \
|
| 89 |
+
case primitive_kind::kind: \
|
| 90 |
+
return get_##kind##_impl_list((const kind##_desc_t *)desc);
|
| 91 |
+
switch (desc->kind) {
|
| 92 |
+
CASE(batch_normalization);
|
| 93 |
+
CASE(binary);
|
| 94 |
+
CASE(convolution);
|
| 95 |
+
CASE(deconvolution);
|
| 96 |
+
CASE(eltwise);
|
| 97 |
+
CASE(group_normalization);
|
| 98 |
+
CASE(inner_product);
|
| 99 |
+
CASE(layer_normalization);
|
| 100 |
+
CASE(lrn);
|
| 101 |
+
CASE(matmul);
|
| 102 |
+
CASE(pooling);
|
| 103 |
+
CASE(prelu);
|
| 104 |
+
CASE(reduction);
|
| 105 |
+
CASE(resampling);
|
| 106 |
+
CASE(rnn);
|
| 107 |
+
CASE(shuffle);
|
| 108 |
+
CASE(softmax);
|
| 109 |
+
default: assert(!"unknown primitive kind"); return empty_list;
|
| 110 |
+
}
|
| 111 |
+
#undef CASE
|
| 112 |
+
}
|
| 113 |
+
// clang-format on
|
| 114 |
+
};
|
| 115 |
+
|
| 116 |
+
class cpu_engine_t : public engine_t {
|
| 117 |
+
public:
|
| 118 |
+
cpu_engine_t() : engine_t(engine_kind::cpu, get_cpu_native_runtime(), 0) {}
|
| 119 |
+
|
| 120 |
+
/* implementation part */
|
| 121 |
+
|
| 122 |
+
status_t create_memory_storage(memory_storage_t **storage, unsigned flags,
|
| 123 |
+
size_t size, void *handle) override;
|
| 124 |
+
|
| 125 |
+
status_t create_stream(stream_t **stream, unsigned flags) override;
|
| 126 |
+
|
| 127 |
+
#if DNNL_CPU_RUNTIME == DNNL_RUNTIME_THREADPOOL
|
| 128 |
+
status_t create_stream(stream_t **stream,
|
| 129 |
+
dnnl::threadpool_interop::threadpool_iface *threadpool) override;
|
| 130 |
+
#endif
|
| 131 |
+
|
| 132 |
+
const impl_list_item_t *get_concat_implementation_list() const override {
|
| 133 |
+
return cpu_engine_impl_list_t::get_concat_implementation_list();
|
| 134 |
+
}
|
| 135 |
+
|
| 136 |
+
const impl_list_item_t *get_reorder_implementation_list(
|
| 137 |
+
const memory_desc_t *src_md,
|
| 138 |
+
const memory_desc_t *dst_md) const override {
|
| 139 |
+
return cpu_engine_impl_list_t::get_reorder_implementation_list(
|
| 140 |
+
src_md, dst_md);
|
| 141 |
+
}
|
| 142 |
+
const impl_list_item_t *get_sum_implementation_list() const override {
|
| 143 |
+
return cpu_engine_impl_list_t::get_sum_implementation_list();
|
| 144 |
+
}
|
| 145 |
+
const impl_list_item_t *get_implementation_list(
|
| 146 |
+
const op_desc_t *desc) const override {
|
| 147 |
+
return cpu_engine_impl_list_t::get_implementation_list(desc);
|
| 148 |
+
}
|
| 149 |
+
|
| 150 |
+
device_id_t device_id() const override { return std::make_tuple(0, 0, 0); }
|
| 151 |
+
|
| 152 |
+
engine_id_t engine_id() const override {
|
| 153 |
+
// Non-sycl CPU engine doesn't have device and context.
|
| 154 |
+
return {};
|
| 155 |
+
}
|
| 156 |
+
|
| 157 |
+
protected:
|
| 158 |
+
~cpu_engine_t() override = default;
|
| 159 |
+
};
|
| 160 |
+
|
| 161 |
+
class cpu_engine_factory_t : public engine_factory_t {
|
| 162 |
+
public:
|
| 163 |
+
size_t count() const override { return 1; }
|
| 164 |
+
status_t engine_create(engine_t **engine, size_t index) const override {
|
| 165 |
+
assert(index == 0);
|
| 166 |
+
*engine = new cpu_engine_t();
|
| 167 |
+
|
| 168 |
+
#if DNNL_AARCH64 && DNNL_AARCH64_USE_ACL
|
| 169 |
+
dnnl::impl::cpu::aarch64::acl_thread_utils::set_acl_threading();
|
| 170 |
+
#endif
|
| 171 |
+
return status::success;
|
| 172 |
+
};
|
| 173 |
+
};
|
| 174 |
+
|
| 175 |
+
engine_t *get_service_engine();
|
| 176 |
+
|
| 177 |
+
} // namespace cpu
|
| 178 |
+
} // namespace impl
|
| 179 |
+
} // namespace dnnl
|
| 180 |
+
|
| 181 |
+
#endif
|
| 182 |
+
|
| 183 |
+
// vim: et ts=4 sw=4 cindent cino+=l0,\:4,N-s
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/cpu_group_normalization_pd.hpp
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*******************************************************************************
|
| 2 |
+
* Copyright 2023 Intel Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*******************************************************************************/
|
| 16 |
+
|
| 17 |
+
#ifndef CPU_CPU_GROUP_NORMALIZATION_PD_HPP
|
| 18 |
+
#define CPU_CPU_GROUP_NORMALIZATION_PD_HPP
|
| 19 |
+
|
| 20 |
+
#include "common/group_normalization_pd.hpp"
|
| 21 |
+
#include "cpu/cpu_engine.hpp"
|
| 22 |
+
|
| 23 |
+
namespace dnnl {
|
| 24 |
+
namespace impl {
|
| 25 |
+
namespace cpu {
|
| 26 |
+
|
| 27 |
+
struct cpu_group_normalization_fwd_pd_t : public group_normalization_fwd_pd_t {
|
| 28 |
+
using group_normalization_fwd_pd_t::group_normalization_fwd_pd_t;
|
| 29 |
+
};
|
| 30 |
+
|
| 31 |
+
struct cpu_group_normalization_bwd_pd_t : public group_normalization_bwd_pd_t {
|
| 32 |
+
using group_normalization_bwd_pd_t::group_normalization_bwd_pd_t;
|
| 33 |
+
};
|
| 34 |
+
|
| 35 |
+
} // namespace cpu
|
| 36 |
+
} // namespace impl
|
| 37 |
+
} // namespace dnnl
|
| 38 |
+
|
| 39 |
+
#endif
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/cpu_inner_product_pd.hpp
ADDED
|
@@ -0,0 +1,323 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*******************************************************************************
|
| 2 |
+
* Copyright 2016-2021 Intel Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*******************************************************************************/
|
| 16 |
+
|
| 17 |
+
#ifndef CPU_CPU_INNER_PRODUCT_PD_HPP
|
| 18 |
+
#define CPU_CPU_INNER_PRODUCT_PD_HPP
|
| 19 |
+
|
| 20 |
+
#include <assert.h>
|
| 21 |
+
|
| 22 |
+
#include "common/c_types_map.hpp"
|
| 23 |
+
#include "common/inner_product_pd.hpp"
|
| 24 |
+
#include "common/utils.hpp"
|
| 25 |
+
#include "cpu/cpu_engine.hpp"
|
| 26 |
+
|
| 27 |
+
namespace dnnl {
|
| 28 |
+
namespace impl {
|
| 29 |
+
namespace cpu {
|
| 30 |
+
|
| 31 |
+
namespace {
|
| 32 |
+
inline bool dense_gemm_consitency_check(const memory_desc_wrapper &src_d,
|
| 33 |
+
const memory_desc_wrapper &wei_d, const memory_desc_wrapper &dst_d) {
|
| 34 |
+
using namespace utils;
|
| 35 |
+
|
| 36 |
+
auto strides_compatible = [&]() {
|
| 37 |
+
bool ok = true;
|
| 38 |
+
auto w_str = wei_d.blocking_desc().strides;
|
| 39 |
+
auto d_str = src_d.blocking_desc().strides;
|
| 40 |
+
for (int i = 1; i < src_d.ndims() - 1; i++) {
|
| 41 |
+
ok = ok && w_str[i] / d_str[i] == w_str[i + 1] / d_str[i + 1];
|
| 42 |
+
}
|
| 43 |
+
return ok && one_of(w_str[1] / d_str[1], 1, wei_d.padded_dims()[0]);
|
| 44 |
+
};
|
| 45 |
+
|
| 46 |
+
auto inner_blk_compatible = [&]() {
|
| 47 |
+
auto d_inner_blks = src_d.blocking_desc().inner_blks;
|
| 48 |
+
auto w_inner_blks = wei_d.blocking_desc().inner_blks;
|
| 49 |
+
auto d_inner_idxs = src_d.blocking_desc().inner_idxs;
|
| 50 |
+
auto w_inner_idxs = wei_d.blocking_desc().inner_idxs;
|
| 51 |
+
|
| 52 |
+
int d_inner_nblks = src_d.blocking_desc().inner_nblks;
|
| 53 |
+
int w_inner_nblks = wei_d.blocking_desc().inner_nblks;
|
| 54 |
+
|
| 55 |
+
bool ok = true;
|
| 56 |
+
|
| 57 |
+
if ((wei_d.blocking_desc().strides[0] == 1) && (w_inner_nblks > 0)) {
|
| 58 |
+
ok = ok && wei_d.dims()[0] / w_inner_blks[w_inner_nblks - 1] == 1
|
| 59 |
+
&& w_inner_idxs[w_inner_nblks - 1] == 0;
|
| 60 |
+
w_inner_nblks--;
|
| 61 |
+
}
|
| 62 |
+
ok = ok && d_inner_nblks == w_inner_nblks;
|
| 63 |
+
|
| 64 |
+
for (int d = 0; d < w_inner_nblks; d++)
|
| 65 |
+
ok = ok && (d_inner_blks[d] == w_inner_blks[d])
|
| 66 |
+
&& (d_inner_idxs[d] == w_inner_idxs[d]);
|
| 67 |
+
|
| 68 |
+
return ok;
|
| 69 |
+
};
|
| 70 |
+
|
| 71 |
+
return true && src_d.is_blocking_desc() && wei_d.is_blocking_desc()
|
| 72 |
+
&& src_d.ndims() == wei_d.ndims() && inner_blk_compatible()
|
| 73 |
+
&& strides_compatible() && dst_d.matches_tag(format_tag::nc)
|
| 74 |
+
&& src_d.only_padded_dim(1) && wei_d.only_padded_dim(1)
|
| 75 |
+
&& src_d.padded_dims()[1] == wei_d.padded_dims()[1]
|
| 76 |
+
&& src_d.is_dense(true) && dst_d.is_dense() && wei_d.is_dense(true);
|
| 77 |
+
}
|
| 78 |
+
|
| 79 |
+
void transpose_md(memory_desc_t &md) {
|
| 80 |
+
// Note: we cannot directly use good leading dimension for a
|
| 81 |
+
// in padded_dims. This is because inner_blks does not
|
| 82 |
+
// account for padding, and should divide the corresponding
|
| 83 |
+
// padded_dim.
|
| 84 |
+
auto put_a_last = [](memory_desc_t &md) {
|
| 85 |
+
auto &md_blk = md.format_desc.blocking;
|
| 86 |
+
md.padded_dims[0] = md.dims[0];
|
| 87 |
+
md_blk.strides[0] = 1;
|
| 88 |
+
for (int d = 1; d < md.ndims; d++)
|
| 89 |
+
md_blk.strides[d] *= md.padded_dims[0];
|
| 90 |
+
if (md_blk.inner_nblks > 0) {
|
| 91 |
+
md_blk.inner_idxs[md_blk.inner_nblks] = 0;
|
| 92 |
+
md_blk.inner_blks[md_blk.inner_nblks] = md.padded_dims[0];
|
| 93 |
+
md_blk.inner_nblks++;
|
| 94 |
+
}
|
| 95 |
+
};
|
| 96 |
+
|
| 97 |
+
auto put_a_first = [](memory_desc_t &md) {
|
| 98 |
+
blocking_desc_t blk = md.format_desc.blocking;
|
| 99 |
+
// make the stride for `a` bigger than any other stride and
|
| 100 |
+
// use the fact that memory_desc_init_by_blocking_desc
|
| 101 |
+
// preserves the strides order but actually changes them to
|
| 102 |
+
// densify the descriptor
|
| 103 |
+
blk.strides[0] = memory_desc_wrapper(md).size();
|
| 104 |
+
memory_desc_init_by_blocking_desc(md, blk);
|
| 105 |
+
};
|
| 106 |
+
|
| 107 |
+
auto is_a_last = [](memory_desc_t &md) {
|
| 108 |
+
auto &md_blk = md.format_desc.blocking;
|
| 109 |
+
// The inner_blks condition makes sure that a is a non blocked dimension
|
| 110 |
+
return (md_blk.strides[0] == 1) && (md_blk.inner_nblks == 0);
|
| 111 |
+
};
|
| 112 |
+
|
| 113 |
+
auto is_a_first = [&](memory_desc_t &md) {
|
| 114 |
+
auto &md_blk = md.format_desc.blocking;
|
| 115 |
+
for (int d = 1; d < md.ndims; d++)
|
| 116 |
+
if (md_blk.strides[0] < md_blk.strides[d]) return false;
|
| 117 |
+
return true;
|
| 118 |
+
};
|
| 119 |
+
|
| 120 |
+
if (is_a_last(md))
|
| 121 |
+
put_a_first(md);
|
| 122 |
+
else if (is_a_first(md))
|
| 123 |
+
put_a_last(md);
|
| 124 |
+
|
| 125 |
+
// here, by default we do not transpose md if it is not
|
| 126 |
+
}
|
| 127 |
+
|
| 128 |
+
format_tag_t get_tag(memory_desc_t &md) {
|
| 129 |
+
using namespace format_tag;
|
| 130 |
+
auto tag = memory_desc_matches_one_of_tag(md, ab, abc, abcd,
|
| 131 |
+
abcde, // NCHW derivatives
|
| 132 |
+
ba, bca, bcda, bcdea, cba, cdba,
|
| 133 |
+
cdeba, // IO and spatial derivatives
|
| 134 |
+
acb, acdb, acdeb, // NHWC derivatives
|
| 135 |
+
aBcd16b, aBcde16b, aBcd8b, aBcde8b, aBcd4b,
|
| 136 |
+
aBcde4b); // blocked layouts
|
| 137 |
+
return tag;
|
| 138 |
+
}
|
| 139 |
+
|
| 140 |
+
inline bool is_ineff_lead_dim(const dim_t dim) {
|
| 141 |
+
return dim % 1024 == 0; // check cache aliasing
|
| 142 |
+
}
|
| 143 |
+
|
| 144 |
+
/* Pick between M and K for the most efficient leading
|
| 145 |
+
* dimension to compute GeMM. */
|
| 146 |
+
bool transpose_leading_dim(const dim_t M, const dim_t K) {
|
| 147 |
+
return IMPLICATION(is_ineff_lead_dim(M), is_ineff_lead_dim(K) && M <= K);
|
| 148 |
+
}
|
| 149 |
+
} // namespace
|
| 150 |
+
|
| 151 |
+
#define INIT_MEM_BY_TAG(tag_init_f, md) \
|
| 152 |
+
do { \
|
| 153 |
+
auto tag = tag_init_f; \
|
| 154 |
+
if (tag == format_tag::undef) return status::unimplemented; \
|
| 155 |
+
CHECK(memory_desc_init_by_tag(md, tag)); \
|
| 156 |
+
} while (0)
|
| 157 |
+
|
| 158 |
+
struct cpu_inner_product_fwd_pd_t : public inner_product_fwd_pd_t {
|
| 159 |
+
using inner_product_fwd_pd_t::inner_product_fwd_pd_t;
|
| 160 |
+
|
| 161 |
+
protected:
|
| 162 |
+
status_t set_default_params(bool allow_all_tags = false) {
|
| 163 |
+
using namespace format_tag;
|
| 164 |
+
|
| 165 |
+
auto set_default_src = [&]() {
|
| 166 |
+
if (weights_md_.format_kind == format_kind::any) {
|
| 167 |
+
INIT_MEM_BY_TAG(utils::pick(ndims() - 2, ab, abc, abcd, abcde),
|
| 168 |
+
src_md_);
|
| 169 |
+
} else {
|
| 170 |
+
format_tag_t weights_tag = get_tag(weights_md_);
|
| 171 |
+
if (allow_all_tags && weights_tag == undef) {
|
| 172 |
+
INIT_MEM_BY_TAG(
|
| 173 |
+
utils::pick(ndims() - 2, ab, abc, abcd, abcde),
|
| 174 |
+
src_md_);
|
| 175 |
+
} else {
|
| 176 |
+
INIT_MEM_BY_TAG(weights_tag, src_md_);
|
| 177 |
+
}
|
| 178 |
+
// transpose weights to improve efficiency of non-copy kernels
|
| 179 |
+
if (src_md_.format_desc.blocking.strides[0] == 1)
|
| 180 |
+
transpose_md(src_md_);
|
| 181 |
+
}
|
| 182 |
+
return status::success;
|
| 183 |
+
};
|
| 184 |
+
|
| 185 |
+
auto set_default_weights = [&]() {
|
| 186 |
+
format_tag_t src_tag = get_tag(src_md_);
|
| 187 |
+
if (allow_all_tags && src_tag == undef) {
|
| 188 |
+
INIT_MEM_BY_TAG(utils::pick(ndims() - 2, ab, abc, abcd, abcde),
|
| 189 |
+
weights_md_);
|
| 190 |
+
} else {
|
| 191 |
+
INIT_MEM_BY_TAG(src_tag, weights_md_);
|
| 192 |
+
}
|
| 193 |
+
/* with batch = 1, no transpose to use the faster gemv kernels */
|
| 194 |
+
/* otherwise, we transpose the weights to improve efficiency of
|
| 195 |
+
* no-copy kernels */
|
| 196 |
+
if (MB() > 1 && transpose_leading_dim(OC(), IC_total()))
|
| 197 |
+
transpose_md(weights_md_);
|
| 198 |
+
return status::success;
|
| 199 |
+
};
|
| 200 |
+
|
| 201 |
+
if (src_md_.format_kind == format_kind::any) CHECK(set_default_src());
|
| 202 |
+
if (weights_md_.format_kind == format_kind::any)
|
| 203 |
+
CHECK(set_default_weights());
|
| 204 |
+
if (dst_md_.format_kind == format_kind::any)
|
| 205 |
+
CHECK(memory_desc_init_by_tag(dst_md_, nc));
|
| 206 |
+
if (bias_md_.format_kind == format_kind::any)
|
| 207 |
+
CHECK(memory_desc_init_by_tag(bias_md_, x));
|
| 208 |
+
return status::success;
|
| 209 |
+
}
|
| 210 |
+
};
|
| 211 |
+
|
| 212 |
+
struct cpu_inner_product_bwd_data_pd_t : public inner_product_bwd_data_pd_t {
|
| 213 |
+
using inner_product_bwd_data_pd_t::inner_product_bwd_data_pd_t;
|
| 214 |
+
|
| 215 |
+
protected:
|
| 216 |
+
status_t set_default_params(bool allow_all_tags = false) {
|
| 217 |
+
using namespace format_tag;
|
| 218 |
+
|
| 219 |
+
auto set_default_diff_src = [&]() {
|
| 220 |
+
if (weights_md_.format_kind == format_kind::any) {
|
| 221 |
+
INIT_MEM_BY_TAG(utils::pick(ndims() - 2, ab, abc, abcd, abcde),
|
| 222 |
+
diff_src_md_);
|
| 223 |
+
} else {
|
| 224 |
+
format_tag_t weights_tag = get_tag(weights_md_);
|
| 225 |
+
if (allow_all_tags && weights_tag == undef) {
|
| 226 |
+
INIT_MEM_BY_TAG(
|
| 227 |
+
utils::pick(ndims() - 2, ab, abc, abcd, abcde),
|
| 228 |
+
diff_src_md_);
|
| 229 |
+
} else {
|
| 230 |
+
INIT_MEM_BY_TAG(weights_tag, diff_src_md_);
|
| 231 |
+
}
|
| 232 |
+
if (diff_src_md_.format_desc.blocking.strides[0] == 1)
|
| 233 |
+
transpose_md(diff_src_md_);
|
| 234 |
+
}
|
| 235 |
+
return status::success;
|
| 236 |
+
};
|
| 237 |
+
|
| 238 |
+
auto set_default_weights = [&]() {
|
| 239 |
+
format_tag_t diff_src_tag = get_tag(diff_src_md_);
|
| 240 |
+
if (allow_all_tags && diff_src_tag == undef) {
|
| 241 |
+
INIT_MEM_BY_TAG(utils::pick(ndims() - 2, ab, abc, abcd, abcde),
|
| 242 |
+
weights_md_);
|
| 243 |
+
} else {
|
| 244 |
+
INIT_MEM_BY_TAG(diff_src_tag, weights_md_);
|
| 245 |
+
}
|
| 246 |
+
/* with batch = 1, no transpose to use the faster gemv kernels */
|
| 247 |
+
/* otherwise, we transpose the weights to improve efficiency of
|
| 248 |
+
* no-copy kernels */
|
| 249 |
+
if (MB() == 1) transpose_md(weights_md_);
|
| 250 |
+
|
| 251 |
+
return status::success;
|
| 252 |
+
};
|
| 253 |
+
|
| 254 |
+
if (diff_src_md_.format_kind == format_kind::any)
|
| 255 |
+
CHECK(set_default_diff_src());
|
| 256 |
+
if (weights_md_.format_kind == format_kind::any)
|
| 257 |
+
CHECK(set_default_weights());
|
| 258 |
+
if (diff_dst_md_.format_kind == format_kind::any)
|
| 259 |
+
CHECK(memory_desc_init_by_tag(diff_dst_md_, nc));
|
| 260 |
+
return status::success;
|
| 261 |
+
}
|
| 262 |
+
};
|
| 263 |
+
|
| 264 |
+
struct cpu_inner_product_bwd_weights_pd_t
|
| 265 |
+
: public inner_product_bwd_weights_pd_t {
|
| 266 |
+
using inner_product_bwd_weights_pd_t::inner_product_bwd_weights_pd_t;
|
| 267 |
+
|
| 268 |
+
protected:
|
| 269 |
+
status_t set_default_params(bool allow_all_tags = false) {
|
| 270 |
+
using namespace format_tag;
|
| 271 |
+
|
| 272 |
+
auto set_default_src = [&]() {
|
| 273 |
+
if (diff_weights_md_.format_kind == format_kind::any) {
|
| 274 |
+
INIT_MEM_BY_TAG(utils::pick(ndims() - 2, ab, abc, abcd, abcde),
|
| 275 |
+
src_md_);
|
| 276 |
+
} else {
|
| 277 |
+
format_tag_t diff_weights_tag = get_tag(diff_weights_md_);
|
| 278 |
+
if (allow_all_tags && diff_weights_tag == undef) {
|
| 279 |
+
INIT_MEM_BY_TAG(
|
| 280 |
+
utils::pick(ndims() - 2, ab, abc, abcd, abcde),
|
| 281 |
+
src_md_);
|
| 282 |
+
} else {
|
| 283 |
+
INIT_MEM_BY_TAG(diff_weights_tag, src_md_);
|
| 284 |
+
}
|
| 285 |
+
if (src_md_.format_desc.blocking.strides[0] == 1)
|
| 286 |
+
transpose_md(src_md_);
|
| 287 |
+
}
|
| 288 |
+
return status::success;
|
| 289 |
+
};
|
| 290 |
+
|
| 291 |
+
auto set_default_diff_weights = [&]() {
|
| 292 |
+
format_tag_t src_tag = get_tag(src_md_);
|
| 293 |
+
if (allow_all_tags && src_tag == undef) {
|
| 294 |
+
INIT_MEM_BY_TAG(utils::pick(ndims() - 2, ab, abc, abcd, abcde),
|
| 295 |
+
diff_weights_md_);
|
| 296 |
+
} else {
|
| 297 |
+
INIT_MEM_BY_TAG(src_tag, diff_weights_md_);
|
| 298 |
+
}
|
| 299 |
+
// Here, we want diff_weights layout to match the fwd weights layout
|
| 300 |
+
if (MB() > 1 && transpose_leading_dim(OC(), MB()))
|
| 301 |
+
transpose_md(diff_weights_md_);
|
| 302 |
+
return status::success;
|
| 303 |
+
};
|
| 304 |
+
|
| 305 |
+
if (src_md_.format_kind == format_kind::any) CHECK(set_default_src());
|
| 306 |
+
if (diff_weights_md_.format_kind == format_kind::any)
|
| 307 |
+
CHECK(set_default_diff_weights());
|
| 308 |
+
if (diff_dst_md_.format_kind == format_kind::any)
|
| 309 |
+
CHECK(memory_desc_init_by_tag(diff_dst_md_, nc));
|
| 310 |
+
if (diff_bias_md_.format_kind == format_kind::any)
|
| 311 |
+
CHECK(memory_desc_init_by_tag(diff_bias_md_, x));
|
| 312 |
+
return status::success;
|
| 313 |
+
}
|
| 314 |
+
};
|
| 315 |
+
#undef INIT_MEM_BY_TAG
|
| 316 |
+
|
| 317 |
+
} // namespace cpu
|
| 318 |
+
} // namespace impl
|
| 319 |
+
} // namespace dnnl
|
| 320 |
+
|
| 321 |
+
#endif
|
| 322 |
+
|
| 323 |
+
// vim: et ts=4 sw=4 cindent cino+=l0,\:4,N-s
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/cpu_layer_normalization_pd.hpp
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*******************************************************************************
|
| 2 |
+
* Copyright 2019-2020 Intel Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*******************************************************************************/
|
| 16 |
+
|
| 17 |
+
#ifndef CPU_CPU_LAYER_NORMALIZATION_PD_HPP
|
| 18 |
+
#define CPU_CPU_LAYER_NORMALIZATION_PD_HPP
|
| 19 |
+
|
| 20 |
+
#include "common/layer_normalization_pd.hpp"
|
| 21 |
+
#include "cpu/cpu_engine.hpp"
|
| 22 |
+
|
| 23 |
+
namespace dnnl {
|
| 24 |
+
namespace impl {
|
| 25 |
+
namespace cpu {
|
| 26 |
+
|
| 27 |
+
struct cpu_layer_normalization_fwd_pd_t : public layer_normalization_fwd_pd_t {
|
| 28 |
+
using layer_normalization_fwd_pd_t::layer_normalization_fwd_pd_t;
|
| 29 |
+
};
|
| 30 |
+
|
| 31 |
+
struct cpu_layer_normalization_bwd_pd_t : public layer_normalization_bwd_pd_t {
|
| 32 |
+
using layer_normalization_bwd_pd_t::layer_normalization_bwd_pd_t;
|
| 33 |
+
};
|
| 34 |
+
|
| 35 |
+
} // namespace cpu
|
| 36 |
+
} // namespace impl
|
| 37 |
+
} // namespace dnnl
|
| 38 |
+
|
| 39 |
+
#endif
|
| 40 |
+
|
| 41 |
+
// vim: et ts=4 sw=4 cindent cino^=l0,\:0,N-s
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/cpu_lrn_pd.hpp
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*******************************************************************************
|
| 2 |
+
* Copyright 2016-2020 Intel Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*******************************************************************************/
|
| 16 |
+
|
| 17 |
+
#ifndef CPU_CPU_LRN_PD_HPP
|
| 18 |
+
#define CPU_CPU_LRN_PD_HPP
|
| 19 |
+
|
| 20 |
+
#include <assert.h>
|
| 21 |
+
|
| 22 |
+
#include "common/lrn_pd.hpp"
|
| 23 |
+
#include "cpu/cpu_engine.hpp"
|
| 24 |
+
|
| 25 |
+
namespace dnnl {
|
| 26 |
+
namespace impl {
|
| 27 |
+
namespace cpu {
|
| 28 |
+
|
| 29 |
+
struct cpu_lrn_fwd_pd_t : public lrn_fwd_pd_t {
|
| 30 |
+
using lrn_fwd_pd_t::lrn_fwd_pd_t;
|
| 31 |
+
};
|
| 32 |
+
|
| 33 |
+
struct cpu_lrn_bwd_pd_t : public lrn_bwd_pd_t {
|
| 34 |
+
using lrn_bwd_pd_t::lrn_bwd_pd_t;
|
| 35 |
+
};
|
| 36 |
+
|
| 37 |
+
} // namespace cpu
|
| 38 |
+
} // namespace impl
|
| 39 |
+
} // namespace dnnl
|
| 40 |
+
|
| 41 |
+
#endif
|
| 42 |
+
|
| 43 |
+
// vim: et ts=4 sw=4 cindent cino+=l0,\:4,N-s
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/cpu_memory_storage.hpp
ADDED
|
@@ -0,0 +1,108 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*******************************************************************************
|
| 2 |
+
* Copyright 2019-2021 Intel Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*******************************************************************************/
|
| 16 |
+
|
| 17 |
+
#ifndef CPU_CPU_MEMORY_STORAGE_HPP
|
| 18 |
+
#define CPU_CPU_MEMORY_STORAGE_HPP
|
| 19 |
+
|
| 20 |
+
#include <memory>
|
| 21 |
+
|
| 22 |
+
#include "common/c_types_map.hpp"
|
| 23 |
+
#include "common/memory.hpp"
|
| 24 |
+
#include "common/memory_storage.hpp"
|
| 25 |
+
#include "common/stream.hpp"
|
| 26 |
+
#include "common/utils.hpp"
|
| 27 |
+
|
| 28 |
+
#include "cpu/platform.hpp"
|
| 29 |
+
|
| 30 |
+
namespace dnnl {
|
| 31 |
+
namespace impl {
|
| 32 |
+
namespace cpu {
|
| 33 |
+
|
| 34 |
+
class cpu_memory_storage_t : public memory_storage_t {
|
| 35 |
+
public:
|
| 36 |
+
cpu_memory_storage_t(engine_t *engine)
|
| 37 |
+
: memory_storage_t(engine), data_(nullptr, release) {}
|
| 38 |
+
|
| 39 |
+
status_t get_data_handle(void **handle) const override {
|
| 40 |
+
*handle = data_.get();
|
| 41 |
+
return status::success;
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
status_t set_data_handle(void *handle) override {
|
| 45 |
+
data_ = decltype(data_)(handle, release);
|
| 46 |
+
return status::success;
|
| 47 |
+
}
|
| 48 |
+
|
| 49 |
+
status_t map_data(
|
| 50 |
+
void **mapped_ptr, stream_t *stream, size_t size) const override {
|
| 51 |
+
UNUSED(size);
|
| 52 |
+
// This function is called for non-SYCL CPU engines only, where the
|
| 53 |
+
// runtime_kind is constant for a specific build, and engine_kind is
|
| 54 |
+
// only cpu. However, at the same time, the stream engine and memory
|
| 55 |
+
// object engine may have different memory locations. Therefore, at
|
| 56 |
+
// most, we need to ensure that the indexes of these engines are
|
| 57 |
+
// identical.
|
| 58 |
+
if (stream != nullptr && stream->engine()->index() != engine()->index())
|
| 59 |
+
return status::invalid_arguments;
|
| 60 |
+
return get_data_handle(mapped_ptr);
|
| 61 |
+
}
|
| 62 |
+
|
| 63 |
+
status_t unmap_data(void *mapped_ptr, stream_t *stream) const override {
|
| 64 |
+
UNUSED(mapped_ptr);
|
| 65 |
+
if (stream != nullptr && stream->engine()->index() != engine()->index())
|
| 66 |
+
return status::invalid_arguments;
|
| 67 |
+
return status::success;
|
| 68 |
+
}
|
| 69 |
+
|
| 70 |
+
bool is_host_accessible() const override { return true; }
|
| 71 |
+
|
| 72 |
+
std::unique_ptr<memory_storage_t> get_sub_storage(
|
| 73 |
+
size_t offset, size_t size) const override {
|
| 74 |
+
void *sub_ptr = reinterpret_cast<uint8_t *>(data_.get()) + offset;
|
| 75 |
+
auto sub_storage = new cpu_memory_storage_t(this->engine());
|
| 76 |
+
sub_storage->init(memory_flags_t::use_runtime_ptr, size, sub_ptr);
|
| 77 |
+
return std::unique_ptr<memory_storage_t>(sub_storage);
|
| 78 |
+
}
|
| 79 |
+
|
| 80 |
+
std::unique_ptr<memory_storage_t> clone() const override {
|
| 81 |
+
auto storage = new cpu_memory_storage_t(engine());
|
| 82 |
+
if (storage)
|
| 83 |
+
storage->init(memory_flags_t::use_runtime_ptr, 0, data_.get());
|
| 84 |
+
return std::unique_ptr<memory_storage_t>(storage);
|
| 85 |
+
}
|
| 86 |
+
|
| 87 |
+
protected:
|
| 88 |
+
status_t init_allocate(size_t size) override {
|
| 89 |
+
void *ptr = malloc(size, platform::get_cache_line_size());
|
| 90 |
+
if (!ptr) return status::out_of_memory;
|
| 91 |
+
data_ = decltype(data_)(ptr, destroy);
|
| 92 |
+
return status::success;
|
| 93 |
+
}
|
| 94 |
+
|
| 95 |
+
private:
|
| 96 |
+
std::unique_ptr<void, void (*)(void *)> data_;
|
| 97 |
+
|
| 98 |
+
DNNL_DISALLOW_COPY_AND_ASSIGN(cpu_memory_storage_t);
|
| 99 |
+
|
| 100 |
+
static void release(void *ptr) {}
|
| 101 |
+
static void destroy(void *ptr) { free(ptr); }
|
| 102 |
+
};
|
| 103 |
+
|
| 104 |
+
} // namespace cpu
|
| 105 |
+
} // namespace impl
|
| 106 |
+
} // namespace dnnl
|
| 107 |
+
|
| 108 |
+
#endif
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/cpu_pooling_pd.hpp
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*******************************************************************************
|
| 2 |
+
* Copyright 2016-2020 Intel Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*******************************************************************************/
|
| 16 |
+
|
| 17 |
+
#ifndef CPU_CPU_POOLING_PD_HPP
|
| 18 |
+
#define CPU_CPU_POOLING_PD_HPP
|
| 19 |
+
|
| 20 |
+
#include "common/pooling_pd.hpp"
|
| 21 |
+
#include "cpu/cpu_engine.hpp"
|
| 22 |
+
|
| 23 |
+
namespace dnnl {
|
| 24 |
+
namespace impl {
|
| 25 |
+
namespace cpu {
|
| 26 |
+
|
| 27 |
+
struct cpu_pooling_fwd_pd_t : public pooling_fwd_pd_t {
|
| 28 |
+
using pooling_fwd_pd_t::pooling_fwd_pd_t;
|
| 29 |
+
};
|
| 30 |
+
|
| 31 |
+
struct cpu_pooling_bwd_pd_t : public pooling_bwd_pd_t {
|
| 32 |
+
using pooling_bwd_pd_t::pooling_bwd_pd_t;
|
| 33 |
+
};
|
| 34 |
+
|
| 35 |
+
} // namespace cpu
|
| 36 |
+
} // namespace impl
|
| 37 |
+
} // namespace dnnl
|
| 38 |
+
|
| 39 |
+
#endif
|
| 40 |
+
|
| 41 |
+
// vim: et ts=4 sw=4 cindent cino+=l0,\:4,N-s
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/cpu_prelu_pd.hpp
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*******************************************************************************
|
| 2 |
+
* Copyright 2020 Intel Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*******************************************************************************/
|
| 16 |
+
|
| 17 |
+
#ifndef CPU_CPU_PRELU_PD_HPP
|
| 18 |
+
#define CPU_CPU_PRELU_PD_HPP
|
| 19 |
+
|
| 20 |
+
#include <assert.h>
|
| 21 |
+
|
| 22 |
+
#include "common/prelu_pd.hpp"
|
| 23 |
+
#include "cpu/cpu_engine.hpp"
|
| 24 |
+
|
| 25 |
+
namespace dnnl {
|
| 26 |
+
namespace impl {
|
| 27 |
+
namespace cpu {
|
| 28 |
+
|
| 29 |
+
struct cpu_prelu_fwd_pd_t : public prelu_fwd_pd_t {
|
| 30 |
+
using prelu_fwd_pd_t::prelu_fwd_pd_t;
|
| 31 |
+
};
|
| 32 |
+
|
| 33 |
+
struct cpu_prelu_bwd_pd_t : public prelu_bwd_pd_t {
|
| 34 |
+
using prelu_bwd_pd_t::prelu_bwd_pd_t;
|
| 35 |
+
};
|
| 36 |
+
|
| 37 |
+
} // namespace cpu
|
| 38 |
+
} // namespace impl
|
| 39 |
+
} // namespace dnnl
|
| 40 |
+
|
| 41 |
+
#endif
|
| 42 |
+
|
| 43 |
+
// vim: et ts=4 sw=4 cindent cino+=l0,\:4,N-s
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/cpu_primitive.hpp
ADDED
|
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*******************************************************************************
|
| 2 |
+
* Copyright 2019-2022 Intel Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*******************************************************************************/
|
| 16 |
+
|
| 17 |
+
#ifndef CPU_CPU_PRIMITIVE_HPP
|
| 18 |
+
#define CPU_CPU_PRIMITIVE_HPP
|
| 19 |
+
|
| 20 |
+
#include <assert.h>
|
| 21 |
+
|
| 22 |
+
#include "oneapi/dnnl/dnnl_types.h"
|
| 23 |
+
|
| 24 |
+
#include "common/c_types_map.hpp"
|
| 25 |
+
#include "common/primitive_attr.hpp"
|
| 26 |
+
#include "common/primitive_exec_types.hpp"
|
| 27 |
+
#include "common/utils.hpp"
|
| 28 |
+
#include "common/z_magic.hpp"
|
| 29 |
+
|
| 30 |
+
#define DEFINE_SCALES_BUFFER_ATTR_ARG(attr, scales, arg) \
|
| 31 |
+
alignas(16) float CONCAT2(scales, _buf16)[16] = {0}; \
|
| 32 |
+
const float *scales {nullptr}; \
|
| 33 |
+
if ((attr)) { \
|
| 34 |
+
if ((attr)->output_scales_.has_default_values()) { \
|
| 35 |
+
utils::array_set(CONCAT2(scales, _buf16), 1.0f, 16); \
|
| 36 |
+
scales = CONCAT2(scales, _buf16); \
|
| 37 |
+
} else { \
|
| 38 |
+
scales = CTX_IN_MEM(const float *, arg); \
|
| 39 |
+
if (scales == nullptr) return status::invalid_arguments; \
|
| 40 |
+
const auto scales_d = ctx.memory_mdw(arg); \
|
| 41 |
+
bool ok = scales_d.data_type() == data_type::f32 \
|
| 42 |
+
&& scales_d.ndims() == 1; \
|
| 43 |
+
if (!ok) return status::invalid_arguments; \
|
| 44 |
+
if (scales_d.dims()[0] == 1) { \
|
| 45 |
+
utils::array_set(CONCAT2(scales, _buf16), scales[0], 16); \
|
| 46 |
+
scales = CONCAT2(scales, _buf16); \
|
| 47 |
+
} \
|
| 48 |
+
} \
|
| 49 |
+
} \
|
| 50 |
+
MAYBE_UNUSED(scales);
|
| 51 |
+
|
| 52 |
+
#define DEFINE_SCALES_BUFFER_ATTR(attr, scales) \
|
| 53 |
+
DEFINE_SCALES_BUFFER_ATTR_ARG(attr, scales, DNNL_ARG_ATTR_OUTPUT_SCALES);
|
| 54 |
+
|
| 55 |
+
#define DEFINE_SCALES_BUFFER(scales) \
|
| 56 |
+
DEFINE_SCALES_BUFFER_ATTR(pd()->attr(), scales)
|
| 57 |
+
|
| 58 |
+
#define DEFINE_ARG_SCALES_BUFFER_ATTR(attr, scales, arg) \
|
| 59 |
+
alignas(16) float CONCAT2(scales, _buf16)[16] = {0}; \
|
| 60 |
+
const float *scales {nullptr}; \
|
| 61 |
+
if ((attr)) { \
|
| 62 |
+
if ((attr)->scales_.get(arg).has_default_values()) { \
|
| 63 |
+
utils::array_set(CONCAT2(scales, _buf16), 1.0f, 16); \
|
| 64 |
+
scales = CONCAT2(scales, _buf16); \
|
| 65 |
+
} else { \
|
| 66 |
+
scales = CTX_IN_MEM(const float *, DNNL_ARG_ATTR_SCALES | arg); \
|
| 67 |
+
if (scales == nullptr) return status::invalid_arguments; \
|
| 68 |
+
const auto scales_d = ctx.memory_mdw(DNNL_ARG_ATTR_SCALES | arg); \
|
| 69 |
+
bool ok = scales_d.data_type() == data_type::f32 \
|
| 70 |
+
&& scales_d.ndims() == 1; \
|
| 71 |
+
if (!ok) return status::invalid_arguments; \
|
| 72 |
+
if (scales_d.dims()[0] == 1) { \
|
| 73 |
+
if (utils::one_of(arg, DNNL_ARG_DST, \
|
| 74 |
+
DNNL_ARG_ATTR_POST_OP_DW | DNNL_ARG_DST)) { \
|
| 75 |
+
utils::array_set( \
|
| 76 |
+
CONCAT2(scales, _buf16), 1.f / scales[0], 16); \
|
| 77 |
+
} else { \
|
| 78 |
+
utils::array_set(CONCAT2(scales, _buf16), scales[0], 16); \
|
| 79 |
+
} \
|
| 80 |
+
scales = CONCAT2(scales, _buf16); \
|
| 81 |
+
} \
|
| 82 |
+
} \
|
| 83 |
+
} \
|
| 84 |
+
MAYBE_UNUSED(scales);
|
| 85 |
+
|
| 86 |
+
#define DEFINE_ARG_SCALES_BUFFER(scales, arg) \
|
| 87 |
+
DEFINE_ARG_SCALES_BUFFER_ATTR(pd()->attr(), scales, arg)
|
| 88 |
+
|
| 89 |
+
#define DEFINE_ZERO_POINTS_BUFFER(zero_points_ptr, mem_arg) \
|
| 90 |
+
int32_t CONCAT2(default_zero_point_, mem_arg) = 0; \
|
| 91 |
+
const int32_t *zero_points_ptr \
|
| 92 |
+
= pd()->attr()->zero_points_.defined(mem_arg) \
|
| 93 |
+
? &CONCAT2(default_zero_point_, mem_arg) \
|
| 94 |
+
: CTX_IN_MEM( \
|
| 95 |
+
const int32_t *, DNNL_ARG_ATTR_ZERO_POINTS | mem_arg); \
|
| 96 |
+
if (zero_points_ptr == nullptr) return status::invalid_arguments; \
|
| 97 |
+
MAYBE_UNUSED(zero_points_ptr);
|
| 98 |
+
|
| 99 |
+
#define ASSIGN_ARG_SCALE_VALUE(scale, mem_arg) \
|
| 100 |
+
alignas(16) float CONCAT2(CONCAT2(scales, _buf16), mem_arg)[16] = {0}; \
|
| 101 |
+
if (pd()->attr()->scales_.get(mem_arg).has_default_values()) { \
|
| 102 |
+
utils::array_set(CONCAT2(CONCAT2(scales, _buf16), mem_arg), 1.0f, 16); \
|
| 103 |
+
scale = CONCAT2(CONCAT2(scales, _buf16), mem_arg); \
|
| 104 |
+
} else { \
|
| 105 |
+
const auto scale_d = ctx.memory_mdw(DNNL_ARG_ATTR_SCALES | mem_arg); \
|
| 106 |
+
bool ok = scale_d.data_type() == data_type::f32 \
|
| 107 |
+
&& scale_d.ndims() == 1 && scale_d.dims()[0] == 1; \
|
| 108 |
+
if (!ok) return status::invalid_arguments; \
|
| 109 |
+
const float *scale_p \
|
| 110 |
+
= CTX_IN_MEM(const float *, DNNL_ARG_ATTR_SCALES | mem_arg); \
|
| 111 |
+
if (scale_p == nullptr) return status::invalid_arguments; \
|
| 112 |
+
scale = scale_p; \
|
| 113 |
+
}
|
| 114 |
+
|
| 115 |
+
#define DEFINE_ZERO_POINT_VALUE_ATTR(attr, zero_point, mem_arg) \
|
| 116 |
+
int32_t zero_point = 0; \
|
| 117 |
+
if (!attr->zero_points_.has_default_values(mem_arg)) { \
|
| 118 |
+
const auto zero_points_d \
|
| 119 |
+
= ctx.memory_mdw(DNNL_ARG_ATTR_ZERO_POINTS | mem_arg); \
|
| 120 |
+
bool ok = zero_points_d.data_type() == data_type::s32 \
|
| 121 |
+
&& zero_points_d.ndims() == 1 && zero_points_d.dims()[0] == 1; \
|
| 122 |
+
if (!ok) return status::invalid_arguments; \
|
| 123 |
+
const int32_t *zero_points_ptr = CTX_IN_MEM( \
|
| 124 |
+
const int32_t *, DNNL_ARG_ATTR_ZERO_POINTS | mem_arg); \
|
| 125 |
+
if (zero_points_ptr == nullptr) return status::invalid_arguments; \
|
| 126 |
+
zero_point = *zero_points_ptr; \
|
| 127 |
+
} \
|
| 128 |
+
MAYBE_UNUSED(zero_point);
|
| 129 |
+
|
| 130 |
+
#define DEFINE_ZERO_POINT_VALUE(zero_point, mem_arg) \
|
| 131 |
+
DEFINE_ZERO_POINT_VALUE_ATTR(pd()->attr(), zero_point, mem_arg)
|
| 132 |
+
|
| 133 |
+
#endif // CPU_CPU_PRIMITIVE_HPP
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/cpu_reduction_pd.hpp
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*******************************************************************************
|
| 2 |
+
* Copyright 2020 Intel Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*******************************************************************************/
|
| 16 |
+
|
| 17 |
+
#ifndef CPU_REDUCTION_PD_HPP
|
| 18 |
+
#define CPU_REDUCTION_PD_HPP
|
| 19 |
+
|
| 20 |
+
#include "common/reduction_pd.hpp"
|
| 21 |
+
|
| 22 |
+
namespace dnnl {
|
| 23 |
+
namespace impl {
|
| 24 |
+
namespace cpu {
|
| 25 |
+
|
| 26 |
+
struct cpu_reduction_pd_t : public reduction_pd_t {
|
| 27 |
+
using reduction_pd_t::reduction_pd_t;
|
| 28 |
+
};
|
| 29 |
+
|
| 30 |
+
} // namespace cpu
|
| 31 |
+
} // namespace impl
|
| 32 |
+
} // namespace dnnl
|
| 33 |
+
|
| 34 |
+
#endif
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/cpu_resampling_pd.hpp
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*******************************************************************************
|
| 2 |
+
* Copyright 2019-2020 Intel Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*******************************************************************************/
|
| 16 |
+
|
| 17 |
+
#ifndef CPU_CPU_RESAMPLING_PD_HPP
|
| 18 |
+
#define CPU_CPU_RESAMPLING_PD_HPP
|
| 19 |
+
|
| 20 |
+
#include "common/resampling_pd.hpp"
|
| 21 |
+
|
| 22 |
+
#include "cpu/cpu_engine.hpp"
|
| 23 |
+
|
| 24 |
+
namespace dnnl {
|
| 25 |
+
namespace impl {
|
| 26 |
+
namespace cpu {
|
| 27 |
+
|
| 28 |
+
struct cpu_resampling_fwd_pd_t : public resampling_fwd_pd_t {
|
| 29 |
+
using resampling_fwd_pd_t::resampling_fwd_pd_t;
|
| 30 |
+
};
|
| 31 |
+
|
| 32 |
+
struct cpu_resampling_bwd_pd_t : public resampling_bwd_pd_t {
|
| 33 |
+
using resampling_bwd_pd_t::resampling_bwd_pd_t;
|
| 34 |
+
};
|
| 35 |
+
} // namespace cpu
|
| 36 |
+
} // namespace impl
|
| 37 |
+
} // namespace dnnl
|
| 38 |
+
|
| 39 |
+
#endif
|
| 40 |
+
|
| 41 |
+
// vim: et ts=4 sw=4 cindent cino^=l0,\:0,N-s
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/cpu_shuffle_pd.hpp
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*******************************************************************************
|
| 2 |
+
* Copyright 2018-2020 Intel Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*******************************************************************************/
|
| 16 |
+
|
| 17 |
+
#ifndef CPU_CPU_SHUFFLE_PD_HPP
|
| 18 |
+
#define CPU_CPU_SHUFFLE_PD_HPP
|
| 19 |
+
|
| 20 |
+
#include <assert.h>
|
| 21 |
+
|
| 22 |
+
#include "common/c_types_map.hpp"
|
| 23 |
+
#include "common/shuffle_pd.hpp"
|
| 24 |
+
#include "common/type_helpers.hpp"
|
| 25 |
+
#include "common/utils.hpp"
|
| 26 |
+
#include "cpu/cpu_engine.hpp"
|
| 27 |
+
|
| 28 |
+
namespace dnnl {
|
| 29 |
+
namespace impl {
|
| 30 |
+
namespace cpu {
|
| 31 |
+
|
| 32 |
+
struct cpu_shuffle_pd_t : public shuffle_pd_t {
|
| 33 |
+
using shuffle_pd_t::shuffle_pd_t;
|
| 34 |
+
};
|
| 35 |
+
|
| 36 |
+
} // namespace cpu
|
| 37 |
+
} // namespace impl
|
| 38 |
+
} // namespace dnnl
|
| 39 |
+
|
| 40 |
+
#endif
|
| 41 |
+
|
| 42 |
+
// vim: et ts=4 sw=4 cindent cino+=l0,\:4,N-s
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/cpu_softmax_pd.hpp
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*******************************************************************************
|
| 2 |
+
* Copyright 2016-2020 Intel Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*******************************************************************************/
|
| 16 |
+
|
| 17 |
+
#ifndef CPU_CPU_SOFTMAX_PD_HPP
|
| 18 |
+
#define CPU_CPU_SOFTMAX_PD_HPP
|
| 19 |
+
|
| 20 |
+
#include <assert.h>
|
| 21 |
+
|
| 22 |
+
#include "common/c_types_map.hpp"
|
| 23 |
+
#include "common/softmax_pd.hpp"
|
| 24 |
+
#include "common/type_helpers.hpp"
|
| 25 |
+
#include "common/utils.hpp"
|
| 26 |
+
#include "cpu/cpu_engine.hpp"
|
| 27 |
+
|
| 28 |
+
namespace dnnl {
|
| 29 |
+
namespace impl {
|
| 30 |
+
namespace cpu {
|
| 31 |
+
|
| 32 |
+
struct cpu_softmax_fwd_pd_t : public softmax_fwd_pd_t {
|
| 33 |
+
using softmax_fwd_pd_t::softmax_fwd_pd_t;
|
| 34 |
+
};
|
| 35 |
+
|
| 36 |
+
struct cpu_softmax_bwd_pd_t : public softmax_bwd_pd_t {
|
| 37 |
+
using softmax_bwd_pd_t::softmax_bwd_pd_t;
|
| 38 |
+
};
|
| 39 |
+
|
| 40 |
+
} // namespace cpu
|
| 41 |
+
} // namespace impl
|
| 42 |
+
} // namespace dnnl
|
| 43 |
+
|
| 44 |
+
#endif
|
| 45 |
+
|
| 46 |
+
// vim: et ts=4 sw=4 cindent cino+=l0,\:4,N-s
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/cpu_stream.hpp
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*******************************************************************************
|
| 2 |
+
* Copyright 2019-2020 Intel Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*******************************************************************************/
|
| 16 |
+
|
| 17 |
+
#ifndef CPU_CPU_STREAM_HPP
|
| 18 |
+
#define CPU_CPU_STREAM_HPP
|
| 19 |
+
|
| 20 |
+
#include "oneapi/dnnl/dnnl_config.h"
|
| 21 |
+
|
| 22 |
+
#if DNNL_CPU_RUNTIME == DNNL_RUNTIME_THREADPOOL
|
| 23 |
+
#include "oneapi/dnnl/dnnl_threadpool_iface.hpp"
|
| 24 |
+
#endif
|
| 25 |
+
|
| 26 |
+
#include "common/c_types_map.hpp"
|
| 27 |
+
#include "common/dnnl_thread.hpp"
|
| 28 |
+
#include "common/stream.hpp"
|
| 29 |
+
|
| 30 |
+
namespace dnnl {
|
| 31 |
+
namespace impl {
|
| 32 |
+
namespace cpu {
|
| 33 |
+
|
| 34 |
+
struct cpu_stream_t : public stream_t {
|
| 35 |
+
cpu_stream_t(engine_t *engine, unsigned flags) : stream_t(engine, flags) {}
|
| 36 |
+
virtual ~cpu_stream_t() = default;
|
| 37 |
+
|
| 38 |
+
dnnl::impl::status_t wait() override {
|
| 39 |
+
// CPU execution is synchronous so return immediately
|
| 40 |
+
return dnnl::impl::status::success;
|
| 41 |
+
}
|
| 42 |
+
|
| 43 |
+
#if DNNL_CPU_RUNTIME == DNNL_RUNTIME_THREADPOOL
|
| 44 |
+
cpu_stream_t(engine_t *engine,
|
| 45 |
+
dnnl::threadpool_interop::threadpool_iface *threadpool)
|
| 46 |
+
: stream_t(engine, threadpool) {}
|
| 47 |
+
|
| 48 |
+
void before_exec_hook() override {
|
| 49 |
+
dnnl::threadpool_interop::threadpool_iface *tp;
|
| 50 |
+
auto rc = this->get_threadpool(&tp);
|
| 51 |
+
if (rc == status::success) threadpool_utils::activate_threadpool(tp);
|
| 52 |
+
}
|
| 53 |
+
|
| 54 |
+
void after_exec_hook() override {
|
| 55 |
+
threadpool_utils::deactivate_threadpool();
|
| 56 |
+
}
|
| 57 |
+
#endif
|
| 58 |
+
};
|
| 59 |
+
|
| 60 |
+
} // namespace cpu
|
| 61 |
+
} // namespace impl
|
| 62 |
+
} // namespace dnnl
|
| 63 |
+
|
| 64 |
+
#endif
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/cpu_sum_pd.hpp
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*******************************************************************************
|
| 2 |
+
* Copyright 2016-2020 Intel Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*******************************************************************************/
|
| 16 |
+
|
| 17 |
+
#ifndef CPU_CPU_SUM_PD_HPP
|
| 18 |
+
#define CPU_CPU_SUM_PD_HPP
|
| 19 |
+
|
| 20 |
+
#include "common/c_types_map.hpp"
|
| 21 |
+
#include "common/sum_pd.hpp"
|
| 22 |
+
#include "common/type_helpers.hpp"
|
| 23 |
+
#include "common/utils.hpp"
|
| 24 |
+
#include "cpu/cpu_engine.hpp"
|
| 25 |
+
|
| 26 |
+
namespace dnnl {
|
| 27 |
+
namespace impl {
|
| 28 |
+
namespace cpu {
|
| 29 |
+
|
| 30 |
+
struct cpu_sum_pd_t : public sum_pd_t {
|
| 31 |
+
using sum_pd_t::sum_pd_t;
|
| 32 |
+
};
|
| 33 |
+
|
| 34 |
+
} // namespace cpu
|
| 35 |
+
} // namespace impl
|
| 36 |
+
} // namespace dnnl
|
| 37 |
+
|
| 38 |
+
#endif
|
| 39 |
+
|
| 40 |
+
// vim: et ts=4 sw=4 cindent cino+=l0,\:4,N-s
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/dw_convolution_utils.hpp
ADDED
|
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*******************************************************************************
|
| 2 |
+
* Copyright 2020-2023 Intel Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*******************************************************************************/
|
| 16 |
+
|
| 17 |
+
#ifndef CPU_DW_CONVOLUTION_UTILS_HPP
|
| 18 |
+
#define CPU_DW_CONVOLUTION_UTILS_HPP
|
| 19 |
+
|
| 20 |
+
#include "common/c_types_map.hpp"
|
| 21 |
+
#include "common/convolution_pd.hpp"
|
| 22 |
+
#include "common/primitive_desc_iterator.hpp"
|
| 23 |
+
#include "common/type_helpers.hpp"
|
| 24 |
+
#include "common/utils.hpp"
|
| 25 |
+
|
| 26 |
+
namespace dnnl {
|
| 27 |
+
namespace impl {
|
| 28 |
+
namespace cpu {
|
| 29 |
+
|
| 30 |
+
inline status_t get_depthwise_conv_desc(convolution_desc_t &cd_dw,
|
| 31 |
+
const memory_desc_t &src_dw_md, const primitive_attr_t &attr_1x1,
|
| 32 |
+
primitive_attr_t &attr_dw, int dw_po_index) {
|
| 33 |
+
|
| 34 |
+
const memory_desc_wrapper src_dw_d(src_dw_md);
|
| 35 |
+
const int ndims = src_dw_d.ndims();
|
| 36 |
+
if (ndims != 4) return status::unimplemented;
|
| 37 |
+
|
| 38 |
+
if (dw_po_index == -1 || dw_po_index >= attr_1x1.post_ops_.len()
|
| 39 |
+
|| !attr_1x1.post_ops_.entry_[dw_po_index].is_convolution())
|
| 40 |
+
return status::invalid_arguments;
|
| 41 |
+
|
| 42 |
+
// Create new attributes with scales from depthwise post-op and copy
|
| 43 |
+
// post-ops after depthwise post-op.
|
| 44 |
+
auto &dw_po = attr_1x1.post_ops_.entry_[dw_po_index].depthwise_conv;
|
| 45 |
+
|
| 46 |
+
// erase 1x1 conv scales
|
| 47 |
+
for (auto arg : {DNNL_ARG_SRC, DNNL_ARG_WEIGHTS, DNNL_ARG_DST}) {
|
| 48 |
+
auto &scale = attr_dw.scales_.get(arg);
|
| 49 |
+
if (!scale.has_default_values()) attr_dw.scales_.reset(arg);
|
| 50 |
+
}
|
| 51 |
+
|
| 52 |
+
const auto &dw_src_scales = attr_1x1.scales_.get(DNNL_ARG_DST);
|
| 53 |
+
const auto &dw_wei_scales
|
| 54 |
+
= attr_1x1.scales_.get(DNNL_ARG_ATTR_POST_OP_DW | DNNL_ARG_WEIGHTS);
|
| 55 |
+
const auto &dw_dst_scales
|
| 56 |
+
= attr_1x1.scales_.get(DNNL_ARG_ATTR_POST_OP_DW | DNNL_ARG_DST);
|
| 57 |
+
if (!dw_src_scales.has_default_values())
|
| 58 |
+
attr_dw.scales_.set(DNNL_ARG_SRC, dw_src_scales.mask_);
|
| 59 |
+
if (!dw_wei_scales.has_default_values())
|
| 60 |
+
attr_dw.scales_.set(DNNL_ARG_WEIGHTS, dw_wei_scales.mask_);
|
| 61 |
+
if (!dw_dst_scales.has_default_values())
|
| 62 |
+
attr_dw.scales_.set(DNNL_ARG_DST, dw_dst_scales.mask_);
|
| 63 |
+
|
| 64 |
+
auto dw_po_len = attr_1x1.post_ops_.len() - (dw_po_index + 1);
|
| 65 |
+
attr_dw.post_ops_.entry_.resize(dw_po_len);
|
| 66 |
+
for (int i = 0; i < dw_po_len; ++i) {
|
| 67 |
+
attr_dw.post_ops_.entry_[i].copy_from(
|
| 68 |
+
attr_1x1.post_ops_.entry_[i + dw_po_index + 1]);
|
| 69 |
+
}
|
| 70 |
+
|
| 71 |
+
attr_dw.scratchpad_mode_ = attr_1x1.scratchpad_mode_;
|
| 72 |
+
|
| 73 |
+
const bool with_bias = dw_po.bias_dt != data_type::undef;
|
| 74 |
+
|
| 75 |
+
const auto n = src_dw_d.dims()[0];
|
| 76 |
+
const auto oc = src_dw_d.dims()[1];
|
| 77 |
+
const auto g = src_dw_d.dims()[1];
|
| 78 |
+
const auto ih = src_dw_d.dims()[ndims - 2];
|
| 79 |
+
const auto iw = src_dw_d.dims()[ndims - 1];
|
| 80 |
+
const auto kernel = dw_po.kernel;
|
| 81 |
+
const auto stride = dw_po.stride;
|
| 82 |
+
const auto padding = dw_po.padding;
|
| 83 |
+
|
| 84 |
+
const dims_t weights_tz = {g, 1, 1, kernel, kernel};
|
| 85 |
+
|
| 86 |
+
// Not following standard convolution formula for output shapes since
|
| 87 |
+
// right/top padding might be greated than left/top one.
|
| 88 |
+
const dim_t oh = utils::div_up(ih, stride);
|
| 89 |
+
const dim_t ow = utils::div_up(iw, stride);
|
| 90 |
+
const dims_t dst_tz = {n, oc, oh, ow};
|
| 91 |
+
|
| 92 |
+
const dims_t bias_tz = {oc};
|
| 93 |
+
const dims_t pad_tz = {padding, padding};
|
| 94 |
+
const dims_t stride_tz = {stride, stride};
|
| 95 |
+
|
| 96 |
+
const dim_t pad_h_r = (oh - 1) * stride - ih + kernel - padding;
|
| 97 |
+
const dim_t pad_w_r = (ow - 1) * stride - iw + kernel - padding;
|
| 98 |
+
const dims_t pad_r_tz = {pad_h_r, pad_w_r};
|
| 99 |
+
|
| 100 |
+
memory_desc_t src_md, weights_md, bias_md, dst_md;
|
| 101 |
+
|
| 102 |
+
const auto src_dw_tag = src_dw_d.matches_one_of_tag(
|
| 103 |
+
format_tag::nChw16c, format_tag::nChw8c, format_tag::nhwc);
|
| 104 |
+
const auto data_tag
|
| 105 |
+
= (src_dw_tag == format_tag::undef) ? format_tag::any : src_dw_tag;
|
| 106 |
+
|
| 107 |
+
memory_desc_init_by_tag(
|
| 108 |
+
src_md, ndims, src_dw_md.dims, src_dw_md.data_type, data_tag);
|
| 109 |
+
|
| 110 |
+
memory_desc_init_by_tag(
|
| 111 |
+
weights_md, ndims + 1, weights_tz, dw_po.wei_dt, format_tag::any);
|
| 112 |
+
|
| 113 |
+
if (with_bias)
|
| 114 |
+
memory_desc_init_by_tag(
|
| 115 |
+
bias_md, 1, bias_tz, dw_po.bias_dt, format_tag::a);
|
| 116 |
+
|
| 117 |
+
memory_desc_init_by_tag(dst_md, ndims, dst_tz, dw_po.dst_dt, data_tag);
|
| 118 |
+
|
| 119 |
+
CHECK(conv_desc_init(&cd_dw, prop_kind::forward_inference,
|
| 120 |
+
alg_kind::convolution_auto, &src_md, &weights_md,
|
| 121 |
+
with_bias ? &bias_md : nullptr, &dst_md, stride_tz, nullptr, pad_tz,
|
| 122 |
+
pad_r_tz));
|
| 123 |
+
|
| 124 |
+
return status::success;
|
| 125 |
+
}
|
| 126 |
+
|
| 127 |
+
} // namespace cpu
|
| 128 |
+
} // namespace impl
|
| 129 |
+
} // namespace dnnl
|
| 130 |
+
|
| 131 |
+
#endif
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/gemm_convolution.hpp
ADDED
|
@@ -0,0 +1,229 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*******************************************************************************
|
| 2 |
+
* Copyright 2016-2023 Intel Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*******************************************************************************/
|
| 16 |
+
|
| 17 |
+
#ifndef CPU_GEMM_CONVOLUTION_HPP
|
| 18 |
+
#define CPU_GEMM_CONVOLUTION_HPP
|
| 19 |
+
|
| 20 |
+
#include "common/broadcast_strategy.hpp"
|
| 21 |
+
#include "common/c_types_map.hpp"
|
| 22 |
+
#include "common/memory_tracking.hpp"
|
| 23 |
+
#include "common/primitive.hpp"
|
| 24 |
+
|
| 25 |
+
#include "cpu/binary_injector_utils.hpp"
|
| 26 |
+
#include "cpu/cpu_convolution_pd.hpp"
|
| 27 |
+
#include "cpu/gemm/gemm.hpp"
|
| 28 |
+
#include "cpu/gemm_convolution_utils.hpp"
|
| 29 |
+
#include "cpu/primitive_attr_postops.hpp"
|
| 30 |
+
|
| 31 |
+
namespace dnnl {
|
| 32 |
+
namespace impl {
|
| 33 |
+
namespace cpu {
|
| 34 |
+
|
| 35 |
+
struct gemm_convolution_fwd_t : public primitive_t {
|
| 36 |
+
struct pd_t : public cpu_convolution_fwd_pd_t {
|
| 37 |
+
pd_t(const convolution_desc_t *adesc, const primitive_attr_t *attr,
|
| 38 |
+
const typename pd_t::base_class *hint_fwd_pd)
|
| 39 |
+
: cpu_convolution_fwd_pd_t(adesc, attr, hint_fwd_pd), jcp_() {}
|
| 40 |
+
|
| 41 |
+
DECLARE_COMMON_PD_T(
|
| 42 |
+
GEMM_IMPL_STR, gemm_convolution_fwd_t, USE_GLOBAL_SCRATCHPAD);
|
| 43 |
+
|
| 44 |
+
status_t init(engine_t *engine) {
|
| 45 |
+
using namespace data_type;
|
| 46 |
+
|
| 47 |
+
bool ok = is_fwd()
|
| 48 |
+
&& set_default_alg_kind(alg_kind::convolution_direct)
|
| 49 |
+
&& expect_data_types(f32, f32, f32, f32, f32)
|
| 50 |
+
&& !has_zero_dim_memory()
|
| 51 |
+
&& attr()->has_default_values(
|
| 52 |
+
primitive_attr_t::skip_mask_t::post_ops, f32)
|
| 53 |
+
&& post_ops_ok();
|
| 54 |
+
if (!ok) return status::unimplemented;
|
| 55 |
+
|
| 56 |
+
auto scratchpad = scratchpad_registry().registrar();
|
| 57 |
+
return jit_gemm_convolution_utils::init_conf(jcp_, scratchpad,
|
| 58 |
+
*desc(), src_md_, weights_md_, dst_md_, bias_md_, attr_,
|
| 59 |
+
dnnl_get_max_threads());
|
| 60 |
+
}
|
| 61 |
+
|
| 62 |
+
conv_gemm_conf_t jcp_;
|
| 63 |
+
|
| 64 |
+
protected:
|
| 65 |
+
bool post_ops_ok() const {
|
| 66 |
+
auto const &po = attr()->post_ops_;
|
| 67 |
+
auto is_sum_ok = [&](int idx) {
|
| 68 |
+
return IMPLICATION(po.entry_[idx].kind == primitive_kind::sum,
|
| 69 |
+
idx == 0 && po.entry_[idx].is_sum());
|
| 70 |
+
};
|
| 71 |
+
auto is_binary
|
| 72 |
+
= [&](int idx) { return po.entry_[idx].is_binary(); };
|
| 73 |
+
auto is_prelu = [&](int idx) { return po.entry_[idx].is_prelu(); };
|
| 74 |
+
auto is_binary_or_prelu_supported = [&](int idx) {
|
| 75 |
+
bool ok = dnnl::impl::get_rhs_arg_broadcasting_strategy(
|
| 76 |
+
binary_injector_utils::get_src1_desc(
|
| 77 |
+
po.entry_[idx], dst_md_),
|
| 78 |
+
dst_md_,
|
| 79 |
+
{broadcasting_strategy_t::scalar,
|
| 80 |
+
broadcasting_strategy_t::per_oc})
|
| 81 |
+
!= broadcasting_strategy_t::unsupported;
|
| 82 |
+
return ok;
|
| 83 |
+
};
|
| 84 |
+
|
| 85 |
+
if (!ref_post_ops_t::primitive_kind_ok(attr()->post_ops_))
|
| 86 |
+
return false;
|
| 87 |
+
|
| 88 |
+
for (int idx = 0; idx < po.len(); idx++) {
|
| 89 |
+
bool ok = is_sum_ok(idx)
|
| 90 |
+
&& IMPLICATION(is_binary(idx) || is_prelu(idx),
|
| 91 |
+
is_binary_or_prelu_supported(idx));
|
| 92 |
+
if (!ok) return false;
|
| 93 |
+
}
|
| 94 |
+
|
| 95 |
+
return true;
|
| 96 |
+
}
|
| 97 |
+
};
|
| 98 |
+
|
| 99 |
+
gemm_convolution_fwd_t(const pd_t *apd)
|
| 100 |
+
: primitive_t(apd), post_ops_(nullptr) {}
|
| 101 |
+
|
| 102 |
+
status_t init(engine_t *engine) override {
|
| 103 |
+
const data_t one = 1.0, zero = 0.0;
|
| 104 |
+
const auto &jcp = pd()->jcp_;
|
| 105 |
+
beta_ = jcp.with_sum ? one : zero;
|
| 106 |
+
|
| 107 |
+
if (jcp.with_eltwise || jcp.with_binary) {
|
| 108 |
+
CHECK(safe_ptr_assign(post_ops_, new ref_post_ops_t(jcp.post_ops)));
|
| 109 |
+
CHECK(post_ops_->init(pd()->dst_md()));
|
| 110 |
+
}
|
| 111 |
+
return status::success;
|
| 112 |
+
}
|
| 113 |
+
|
| 114 |
+
typedef typename prec_traits<data_type::f32>::type data_t;
|
| 115 |
+
|
| 116 |
+
status_t execute(const exec_ctx_t &ctx) const override {
|
| 117 |
+
bool is_nspc = pd()->jcp_.is_nspc;
|
| 118 |
+
return is_nspc ? execute_forward_nspc(ctx) : execute_forward_ncsp(ctx);
|
| 119 |
+
}
|
| 120 |
+
|
| 121 |
+
private:
|
| 122 |
+
status_t execute_forward_ncsp(const exec_ctx_t &ctx) const;
|
| 123 |
+
status_t execute_forward_nspc(const exec_ctx_t &ctx) const;
|
| 124 |
+
status_t execute_forward_thr_nspc(const exec_ctx_t &ctx, const int ithr,
|
| 125 |
+
const int nthr, const data_t *src_base, const data_t *wei_base,
|
| 126 |
+
const data_t *bia_base, data_t *dst_base,
|
| 127 |
+
const memory_tracking::grantor_t &scratchpad) const;
|
| 128 |
+
const pd_t *pd() const { return (const pd_t *)primitive_t::pd().get(); }
|
| 129 |
+
|
| 130 |
+
data_t beta_;
|
| 131 |
+
|
| 132 |
+
std::unique_ptr<ref_post_ops_t> post_ops_;
|
| 133 |
+
};
|
| 134 |
+
|
| 135 |
+
struct gemm_convolution_bwd_data_t : public primitive_t {
|
| 136 |
+
struct pd_t : public cpu_convolution_bwd_data_pd_t {
|
| 137 |
+
pd_t(const convolution_desc_t *adesc, const primitive_attr_t *attr,
|
| 138 |
+
const convolution_fwd_pd_t *hint_fwd_pd)
|
| 139 |
+
: cpu_convolution_bwd_data_pd_t(adesc, attr, hint_fwd_pd), jcp_() {}
|
| 140 |
+
|
| 141 |
+
DECLARE_COMMON_PD_T(GEMM_IMPL_STR, gemm_convolution_bwd_data_t,
|
| 142 |
+
USE_GLOBAL_SCRATCHPAD);
|
| 143 |
+
|
| 144 |
+
status_t init(engine_t *engine) {
|
| 145 |
+
bool ok = true && desc()->prop_kind == prop_kind::backward_data
|
| 146 |
+
&& set_default_alg_kind(alg_kind::convolution_direct)
|
| 147 |
+
&& expect_data_types(data_type::f32, data_type::f32,
|
| 148 |
+
data_type::undef, data_type::f32, data_type::f32)
|
| 149 |
+
&& !has_zero_dim_memory() && attr()->has_default_values();
|
| 150 |
+
if (!ok) return status::unimplemented;
|
| 151 |
+
|
| 152 |
+
auto scratchpad = scratchpad_registry().registrar();
|
| 153 |
+
return jit_gemm_convolution_utils::init_conf(jcp_, scratchpad,
|
| 154 |
+
*desc(), diff_src_md_, weights_md_, diff_dst_md_, bias_md_,
|
| 155 |
+
attr_, dnnl_get_max_threads());
|
| 156 |
+
}
|
| 157 |
+
|
| 158 |
+
conv_gemm_conf_t jcp_;
|
| 159 |
+
};
|
| 160 |
+
|
| 161 |
+
gemm_convolution_bwd_data_t(const pd_t *apd) : primitive_t(apd) {}
|
| 162 |
+
|
| 163 |
+
typedef typename prec_traits<data_type::f32>::type data_t;
|
| 164 |
+
|
| 165 |
+
status_t execute(const exec_ctx_t &ctx) const override {
|
| 166 |
+
bool is_nspc = pd()->jcp_.is_nspc;
|
| 167 |
+
return is_nspc ? execute_backward_data_nspc(ctx)
|
| 168 |
+
: execute_backward_data_ncsp(ctx);
|
| 169 |
+
}
|
| 170 |
+
|
| 171 |
+
private:
|
| 172 |
+
status_t execute_backward_data_nspc(const exec_ctx_t &ctx) const;
|
| 173 |
+
status_t execute_backward_data_ncsp(const exec_ctx_t &ctx) const;
|
| 174 |
+
status_t execute_backward_data_thr_nspc(const int ithr, const int nthr,
|
| 175 |
+
const data_t *diff_dst_base, const data_t *wei_base,
|
| 176 |
+
const data_t *bia_base, data_t *diff_src_base,
|
| 177 |
+
const memory_tracking::grantor_t &scratchpad) const;
|
| 178 |
+
|
| 179 |
+
const pd_t *pd() const { return (const pd_t *)primitive_t::pd().get(); }
|
| 180 |
+
};
|
| 181 |
+
|
| 182 |
+
struct gemm_convolution_bwd_weights_t : public primitive_t {
|
| 183 |
+
struct pd_t : public cpu_convolution_bwd_weights_pd_t {
|
| 184 |
+
pd_t(const convolution_desc_t *adesc, const primitive_attr_t *attr,
|
| 185 |
+
const convolution_fwd_pd_t *hint_fwd_pd)
|
| 186 |
+
: cpu_convolution_bwd_weights_pd_t(adesc, attr, hint_fwd_pd)
|
| 187 |
+
, jcp_() {}
|
| 188 |
+
|
| 189 |
+
DECLARE_COMMON_PD_T(GEMM_IMPL_STR, gemm_convolution_bwd_weights_t,
|
| 190 |
+
USE_GLOBAL_SCRATCHPAD);
|
| 191 |
+
|
| 192 |
+
status_t init(engine_t *engine) {
|
| 193 |
+
bool ok = true && desc()->prop_kind == prop_kind::backward_weights
|
| 194 |
+
&& set_default_alg_kind(alg_kind::convolution_direct)
|
| 195 |
+
&& expect_data_types(data_type::f32, data_type::f32,
|
| 196 |
+
data_type::f32, data_type::f32, data_type::f32)
|
| 197 |
+
&& !has_zero_dim_memory() && attr()->has_default_values();
|
| 198 |
+
if (!ok) return status::unimplemented;
|
| 199 |
+
|
| 200 |
+
auto scratchpad = scratchpad_registry().registrar();
|
| 201 |
+
return jit_gemm_convolution_utils::init_conf(jcp_, scratchpad,
|
| 202 |
+
*desc(), src_md_, diff_weights_md_, diff_dst_md_,
|
| 203 |
+
diff_bias_md_, attr_, dnnl_get_max_threads());
|
| 204 |
+
}
|
| 205 |
+
|
| 206 |
+
conv_gemm_conf_t jcp_;
|
| 207 |
+
};
|
| 208 |
+
|
| 209 |
+
gemm_convolution_bwd_weights_t(const pd_t *apd) : primitive_t(apd) {}
|
| 210 |
+
|
| 211 |
+
typedef typename prec_traits<data_type::f32>::type data_t;
|
| 212 |
+
|
| 213 |
+
status_t execute(const exec_ctx_t &ctx) const override {
|
| 214 |
+
const bool is_nspc = pd()->jcp_.is_nspc;
|
| 215 |
+
return is_nspc ? execute_backward_weights_nspc(ctx)
|
| 216 |
+
: execute_backward_weights_ncsp(ctx);
|
| 217 |
+
}
|
| 218 |
+
|
| 219 |
+
private:
|
| 220 |
+
status_t execute_backward_weights_ncsp(const exec_ctx_t &ctx) const;
|
| 221 |
+
status_t execute_backward_weights_nspc(const exec_ctx_t &ctx) const;
|
| 222 |
+
const pd_t *pd() const { return (const pd_t *)primitive_t::pd().get(); }
|
| 223 |
+
};
|
| 224 |
+
|
| 225 |
+
} // namespace cpu
|
| 226 |
+
} // namespace impl
|
| 227 |
+
} // namespace dnnl
|
| 228 |
+
|
| 229 |
+
#endif
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/gemm_convolution_utils.hpp
ADDED
|
@@ -0,0 +1,139 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*******************************************************************************
|
| 2 |
+
* Copyright 2016-2022 Intel Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*******************************************************************************/
|
| 16 |
+
|
| 17 |
+
#ifndef CPU_GEMM_CONVOLUTION_UTILS_HPP
|
| 18 |
+
#define CPU_GEMM_CONVOLUTION_UTILS_HPP
|
| 19 |
+
|
| 20 |
+
#include "common/c_types_map.hpp"
|
| 21 |
+
#include "common/dnnl_thread.hpp"
|
| 22 |
+
#include "common/memory_tracking.hpp"
|
| 23 |
+
|
| 24 |
+
#include "cpu/cpu_convolution_pd.hpp"
|
| 25 |
+
#include "cpu/cpu_engine.hpp"
|
| 26 |
+
#include "cpu/zero_point_utils.hpp"
|
| 27 |
+
|
| 28 |
+
namespace dnnl {
|
| 29 |
+
namespace impl {
|
| 30 |
+
namespace cpu {
|
| 31 |
+
|
| 32 |
+
enum conv_gemm_loop_order_t { gemm_loop_rlb, gemm_loop_lrb, gemm_loop_lbr };
|
| 33 |
+
struct conv_gemm_conf_t {
|
| 34 |
+
prop_kind_t prop_kind;
|
| 35 |
+
|
| 36 |
+
dim_t mb;
|
| 37 |
+
dim_t ngroups, ic, oc;
|
| 38 |
+
dim_t iw, ih, id, ow, oh, od;
|
| 39 |
+
dim_t l_pad, t_pad, f_pad, e_pad, b_pad, r_pad;
|
| 40 |
+
dim_t kh, kw, kd;
|
| 41 |
+
dim_t stride_h, stride_w, stride_d;
|
| 42 |
+
dim_t dilate_h, dilate_w, dilate_d;
|
| 43 |
+
bool with_bias;
|
| 44 |
+
bool with_eltwise;
|
| 45 |
+
bool with_binary;
|
| 46 |
+
bool with_sum;
|
| 47 |
+
post_ops_t post_ops;
|
| 48 |
+
bool is_nspc;
|
| 49 |
+
|
| 50 |
+
dim_t is, os, ks;
|
| 51 |
+
dim_t ic_block, oc_block;
|
| 52 |
+
|
| 53 |
+
int nthr;
|
| 54 |
+
ptrdiff_t im2col_sz;
|
| 55 |
+
bool need_wei_reduction;
|
| 56 |
+
bool signed_input;
|
| 57 |
+
dim_t oh_block;
|
| 58 |
+
dim_t ow_block;
|
| 59 |
+
dim_t os_block, os_nb_block;
|
| 60 |
+
bool outer_threading;
|
| 61 |
+
conv_gemm_loop_order_t loop_order;
|
| 62 |
+
int nthr_oc;
|
| 63 |
+
|
| 64 |
+
zero_point_config_t zp;
|
| 65 |
+
|
| 66 |
+
data_type_t bias_data_type;
|
| 67 |
+
data_type_t dst_data_type;
|
| 68 |
+
data_type_t sum_data_type;
|
| 69 |
+
size_t dst_os_stride;
|
| 70 |
+
size_t scale_idx_mult;
|
| 71 |
+
bool with_dst_scale;
|
| 72 |
+
};
|
| 73 |
+
|
| 74 |
+
struct single_gemm_conv_chunk_desc_t {
|
| 75 |
+
single_gemm_conv_chunk_desc_t() = default;
|
| 76 |
+
single_gemm_conv_chunk_desc_t(dim_t d_off, dim_t d_size, dim_t h_off,
|
| 77 |
+
dim_t h_size, dim_t w_off, dim_t w_size);
|
| 78 |
+
|
| 79 |
+
dim_t d_off_ = 0;
|
| 80 |
+
dim_t d_size_ = 0;
|
| 81 |
+
dim_t h_off_ = 0;
|
| 82 |
+
dim_t h_size_ = 0;
|
| 83 |
+
dim_t w_off_ = 0;
|
| 84 |
+
dim_t w_size_ = 0;
|
| 85 |
+
};
|
| 86 |
+
|
| 87 |
+
namespace jit_gemm_convolution_utils {
|
| 88 |
+
template <typename data_type_t>
|
| 89 |
+
void im2col_3d(const conv_gemm_conf_t &jcp, const data_type_t *im,
|
| 90 |
+
data_type_t *col, dim_t od, int spatial_step, int spatial_block);
|
| 91 |
+
|
| 92 |
+
template <typename T>
|
| 93 |
+
void transpose_dt(const conv_gemm_conf_t &jcp, const T *__restrict im,
|
| 94 |
+
T *__restrict imtr);
|
| 95 |
+
|
| 96 |
+
template <typename im_dt, typename col_dt>
|
| 97 |
+
void im2col_dt_3d(const conv_gemm_conf_t &jcp, const void *__restrict im,
|
| 98 |
+
col_dt *__restrict col, dim_t od);
|
| 99 |
+
|
| 100 |
+
template <typename data_type_t>
|
| 101 |
+
void im2col(const conv_gemm_conf_t &jcp, const data_type_t *__restrict im,
|
| 102 |
+
data_type_t *__restrict col, dim_t ss, dim_t sb, dim_t cs, dim_t cb);
|
| 103 |
+
|
| 104 |
+
template <typename im_dt, typename col_dt>
|
| 105 |
+
void im2col_dt(const conv_gemm_conf_t &jcp, const void *__restrict im,
|
| 106 |
+
void *__restrict imtr, col_dt *__restrict col, dim_t hs, dim_t hb,
|
| 107 |
+
dim_t ws, dim_t wb);
|
| 108 |
+
|
| 109 |
+
template <typename T>
|
| 110 |
+
void col2im_dt(
|
| 111 |
+
const conv_gemm_conf_t &jcp, const T *__restrict col, T *__restrict im);
|
| 112 |
+
void col2im_3d(const conv_gemm_conf_t &jcp, const float *col, float *im,
|
| 113 |
+
dim_t od, int spatial_step, int spatial_block);
|
| 114 |
+
void col2im(const conv_gemm_conf_t &jcp, const float *col, float *im,
|
| 115 |
+
int spatial_step, int spatial_block);
|
| 116 |
+
|
| 117 |
+
status_t init_conf(conv_gemm_conf_t &jcp,
|
| 118 |
+
memory_tracking::registrar_t &scratchpad, const convolution_desc_t &cd,
|
| 119 |
+
memory_desc_t &src_md, memory_desc_t &weights_md, memory_desc_t &dst_md,
|
| 120 |
+
memory_desc_t &bias_md, primitive_attr_t &attr, int max_threads);
|
| 121 |
+
|
| 122 |
+
void bwd_weights_balance(int ithr, int nthr, int ngroups, int mb, int &ithr_g,
|
| 123 |
+
int &nthr_g, int &ithr_mb, int &nthr_mb);
|
| 124 |
+
void bwd_weights_reduction_par_ncsp(int ithr, int nthr,
|
| 125 |
+
const conv_gemm_conf_t &jcp, const float *weights_reduce_ws,
|
| 126 |
+
float *weights);
|
| 127 |
+
void bwd_weights_reduction_par_nspc(int ithr, int nthr, size_t g_start,
|
| 128 |
+
size_t g_end, const conv_gemm_conf_t &jcp,
|
| 129 |
+
const float *weights_reduce_base, float *diff_weights);
|
| 130 |
+
|
| 131 |
+
bool padding_exists(const conv_gemm_conf_t &jcp) noexcept;
|
| 132 |
+
|
| 133 |
+
} // namespace jit_gemm_convolution_utils
|
| 134 |
+
|
| 135 |
+
} // namespace cpu
|
| 136 |
+
} // namespace impl
|
| 137 |
+
} // namespace dnnl
|
| 138 |
+
|
| 139 |
+
#endif
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/gemm_inner_product.hpp
ADDED
|
@@ -0,0 +1,200 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*******************************************************************************
|
| 2 |
+
* Copyright 2016-2023 Intel Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*******************************************************************************/
|
| 16 |
+
|
| 17 |
+
#ifndef CPU_GEMM_INNER_PRODUCT_HPP
|
| 18 |
+
#define CPU_GEMM_INNER_PRODUCT_HPP
|
| 19 |
+
|
| 20 |
+
#include <assert.h>
|
| 21 |
+
|
| 22 |
+
#include <memory>
|
| 23 |
+
|
| 24 |
+
#include "common/c_types_map.hpp"
|
| 25 |
+
#include "common/primitive.hpp"
|
| 26 |
+
#include "common/type_helpers.hpp"
|
| 27 |
+
#include "common/utils.hpp"
|
| 28 |
+
|
| 29 |
+
#include "cpu/gemm/gemm.hpp"
|
| 30 |
+
#include "cpu/gemm_inner_product_utils.hpp"
|
| 31 |
+
|
| 32 |
+
#include "cpu/cpu_inner_product_pd.hpp"
|
| 33 |
+
|
| 34 |
+
namespace dnnl {
|
| 35 |
+
namespace impl {
|
| 36 |
+
namespace cpu {
|
| 37 |
+
|
| 38 |
+
template <impl::data_type_t data_type>
|
| 39 |
+
struct gemm_inner_product_fwd_t : public primitive_t {
|
| 40 |
+
struct pd_t : public cpu_inner_product_fwd_pd_t {
|
| 41 |
+
using cpu_inner_product_fwd_pd_t::cpu_inner_product_fwd_pd_t;
|
| 42 |
+
|
| 43 |
+
DECLARE_COMMON_PD_T(GEMM_IMPL_STR, gemm_inner_product_fwd_t);
|
| 44 |
+
|
| 45 |
+
status_t init(engine_t *engine) {
|
| 46 |
+
using namespace utils;
|
| 47 |
+
|
| 48 |
+
const bool ok = true && is_fwd() && !has_zero_dim_memory()
|
| 49 |
+
&& everyone_is(data_type, src_md()->data_type,
|
| 50 |
+
weights_md()->data_type, dst_md()->data_type,
|
| 51 |
+
with_bias() ? weights_md(1)->data_type : data_type)
|
| 52 |
+
&& attr()->has_default_values(
|
| 53 |
+
primitive_attr_t::skip_mask_t::post_ops
|
| 54 |
+
| primitive_attr_t::skip_mask_t::sum_dt)
|
| 55 |
+
&& attr()->post_ops_.check_sum_consistency(
|
| 56 |
+
dst_md()->data_type, /* is_int8 */ false)
|
| 57 |
+
&& set_default_params() == status::success
|
| 58 |
+
&& dense_gemm_consitency_check(
|
| 59 |
+
src_md(), weights_md(), dst_md())
|
| 60 |
+
&& inner_product_utils::post_ops_ok(
|
| 61 |
+
attr()->post_ops_, &dst_md_)
|
| 62 |
+
&& attr_.set_default_formats(dst_md(0)) == status::success;
|
| 63 |
+
if (!ok) return status::unimplemented;
|
| 64 |
+
|
| 65 |
+
const auto sum_idx = attr()->post_ops_.find(primitive_kind::sum);
|
| 66 |
+
// Native GeMM doesn't support sum with a dt other than dst_dt.
|
| 67 |
+
sum_through_pp_kernel_ = sum_idx >= 0
|
| 68 |
+
&& !utils::one_of(attr()->post_ops_.entry_[sum_idx].sum.dt,
|
| 69 |
+
data_type::undef, dst_md()->data_type);
|
| 70 |
+
init_scratchpad();
|
| 71 |
+
|
| 72 |
+
return status::success;
|
| 73 |
+
}
|
| 74 |
+
|
| 75 |
+
bool sum_through_pp_kernel_ = false;
|
| 76 |
+
|
| 77 |
+
private:
|
| 78 |
+
void init_scratchpad() {
|
| 79 |
+
using namespace memory_tracking::names;
|
| 80 |
+
if (sum_through_pp_kernel_) {
|
| 81 |
+
auto scratchpad = scratchpad_registry().registrar();
|
| 82 |
+
const memory_desc_wrapper dst_d(dst_md());
|
| 83 |
+
scratchpad.template book<char>(
|
| 84 |
+
key_gemm_tmp_buffer, dst_d.size());
|
| 85 |
+
}
|
| 86 |
+
}
|
| 87 |
+
};
|
| 88 |
+
|
| 89 |
+
gemm_inner_product_fwd_t(const pd_t *apd)
|
| 90 |
+
: primitive_t(apd), postops_in_ip_(false) {}
|
| 91 |
+
|
| 92 |
+
status_t init(engine_t *engine) override {
|
| 93 |
+
const bool has_bias = pd()->with_bias();
|
| 94 |
+
const bool has_eltwise
|
| 95 |
+
= pd()->attr()->post_ops_.find(primitive_kind::eltwise) >= 0;
|
| 96 |
+
const bool has_binary
|
| 97 |
+
= pd()->attr()->post_ops_.find(primitive_kind::binary) >= 0;
|
| 98 |
+
const bool has_prelu
|
| 99 |
+
= pd()->attr()->post_ops_.find(primitive_kind::prelu) >= 0;
|
| 100 |
+
|
| 101 |
+
const bool has_sum = pd()->sum_through_pp_kernel_;
|
| 102 |
+
postops_in_ip_
|
| 103 |
+
= has_bias || has_eltwise || has_binary || has_prelu || has_sum;
|
| 104 |
+
|
| 105 |
+
CHECK(safe_ptr_assign(pp_kernel_,
|
| 106 |
+
inner_product_utils::pp_kernel_t::create(pd(), !has_sum)));
|
| 107 |
+
|
| 108 |
+
return pp_kernel_->create_kernel();
|
| 109 |
+
}
|
| 110 |
+
|
| 111 |
+
typedef typename prec_traits<data_type>::type data_t;
|
| 112 |
+
|
| 113 |
+
status_t execute(const exec_ctx_t &ctx) const override {
|
| 114 |
+
return execute_forward(ctx);
|
| 115 |
+
}
|
| 116 |
+
|
| 117 |
+
private:
|
| 118 |
+
status_t execute_forward(const exec_ctx_t &ctx) const;
|
| 119 |
+
const pd_t *pd() const { return (const pd_t *)primitive_t::pd().get(); }
|
| 120 |
+
|
| 121 |
+
std::unique_ptr<inner_product_utils::pp_kernel_t> pp_kernel_;
|
| 122 |
+
bool postops_in_ip_;
|
| 123 |
+
};
|
| 124 |
+
|
| 125 |
+
template <impl::data_type_t data_type>
|
| 126 |
+
struct gemm_inner_product_bwd_data_t : public primitive_t {
|
| 127 |
+
struct pd_t : public cpu_inner_product_bwd_data_pd_t {
|
| 128 |
+
using cpu_inner_product_bwd_data_pd_t::cpu_inner_product_bwd_data_pd_t;
|
| 129 |
+
|
| 130 |
+
DECLARE_COMMON_PD_T(GEMM_IMPL_STR, gemm_inner_product_bwd_data_t);
|
| 131 |
+
|
| 132 |
+
status_t init(engine_t *engine) {
|
| 133 |
+
bool ok = true && desc()->prop_kind == prop_kind::backward_data
|
| 134 |
+
&& !has_zero_dim_memory()
|
| 135 |
+
&& utils::everyone_is(data_type, diff_src_md()->data_type,
|
| 136 |
+
weights_md()->data_type, diff_dst_md()->data_type)
|
| 137 |
+
&& attr()->has_default_values()
|
| 138 |
+
&& set_default_params() == status::success
|
| 139 |
+
&& dense_gemm_consitency_check(
|
| 140 |
+
diff_src_md(), weights_md(), diff_dst_md());
|
| 141 |
+
return ok ? status::success : status::unimplemented;
|
| 142 |
+
}
|
| 143 |
+
};
|
| 144 |
+
|
| 145 |
+
gemm_inner_product_bwd_data_t(const pd_t *apd) : primitive_t(apd) {}
|
| 146 |
+
typedef typename prec_traits<data_type>::type data_t;
|
| 147 |
+
|
| 148 |
+
status_t execute(const exec_ctx_t &ctx) const override {
|
| 149 |
+
return execute_backward_data(ctx);
|
| 150 |
+
}
|
| 151 |
+
|
| 152 |
+
private:
|
| 153 |
+
status_t execute_backward_data(const exec_ctx_t &ctx) const;
|
| 154 |
+
const pd_t *pd() const { return (const pd_t *)primitive_t::pd().get(); }
|
| 155 |
+
};
|
| 156 |
+
|
| 157 |
+
template <impl::data_type_t data_type>
|
| 158 |
+
struct gemm_inner_product_bwd_weights_t : public primitive_t {
|
| 159 |
+
struct pd_t : public cpu_inner_product_bwd_weights_pd_t {
|
| 160 |
+
using cpu_inner_product_bwd_weights_pd_t::
|
| 161 |
+
cpu_inner_product_bwd_weights_pd_t;
|
| 162 |
+
|
| 163 |
+
DECLARE_COMMON_PD_T(GEMM_IMPL_STR, gemm_inner_product_bwd_weights_t);
|
| 164 |
+
|
| 165 |
+
status_t init(engine_t *engine) {
|
| 166 |
+
bool ok = true && desc()->prop_kind == prop_kind::backward_weights
|
| 167 |
+
&& !has_zero_dim_memory()
|
| 168 |
+
&& utils::everyone_is(data_type, src_md()->data_type,
|
| 169 |
+
diff_weights_md()->data_type,
|
| 170 |
+
diff_dst_md()->data_type,
|
| 171 |
+
with_bias() ? diff_weights_md(1)->data_type
|
| 172 |
+
: data_type)
|
| 173 |
+
&& attr()->has_default_values()
|
| 174 |
+
&& set_default_params() == status::success
|
| 175 |
+
&& dense_gemm_consitency_check(
|
| 176 |
+
src_md(), diff_weights_md(), diff_dst_md());
|
| 177 |
+
|
| 178 |
+
return ok ? status::success : status::unimplemented;
|
| 179 |
+
}
|
| 180 |
+
};
|
| 181 |
+
|
| 182 |
+
gemm_inner_product_bwd_weights_t(const pd_t *apd) : primitive_t(apd) {}
|
| 183 |
+
typedef typename prec_traits<data_type>::type data_t;
|
| 184 |
+
|
| 185 |
+
status_t execute(const exec_ctx_t &ctx) const override {
|
| 186 |
+
return execute_backward_weights(ctx);
|
| 187 |
+
}
|
| 188 |
+
|
| 189 |
+
private:
|
| 190 |
+
status_t execute_backward_weights(const exec_ctx_t &ctx) const;
|
| 191 |
+
const pd_t *pd() const { return (const pd_t *)primitive_t::pd().get(); }
|
| 192 |
+
};
|
| 193 |
+
|
| 194 |
+
} // namespace cpu
|
| 195 |
+
} // namespace impl
|
| 196 |
+
} // namespace dnnl
|
| 197 |
+
|
| 198 |
+
#endif
|
| 199 |
+
|
| 200 |
+
// vim: et ts=4 sw=4 cindent cino+=l0,\:4,N-s
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/gemm_inner_product_utils.hpp
ADDED
|
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*******************************************************************************
|
| 2 |
+
* Copyright 2019-2023 Intel Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*******************************************************************************/
|
| 16 |
+
|
| 17 |
+
#ifndef CPU_GEMM_INNER_PRODUCT_UTILS_HPP
|
| 18 |
+
#define CPU_GEMM_INNER_PRODUCT_UTILS_HPP
|
| 19 |
+
|
| 20 |
+
#include "common/broadcast_strategy.hpp"
|
| 21 |
+
#include "common/c_types_map.hpp"
|
| 22 |
+
#include "common/type_helpers.hpp"
|
| 23 |
+
#include "common/utils.hpp"
|
| 24 |
+
|
| 25 |
+
#include "cpu/cpu_inner_product_pd.hpp"
|
| 26 |
+
|
| 27 |
+
namespace dnnl {
|
| 28 |
+
namespace impl {
|
| 29 |
+
namespace cpu {
|
| 30 |
+
namespace inner_product_utils {
|
| 31 |
+
|
| 32 |
+
struct pp_kernel_t {
|
| 33 |
+
static pp_kernel_t *create(size_t OC, size_t MB, dim_t dst_mb_stride,
|
| 34 |
+
const primitive_attr_t *attr, data_type_t bias_dt,
|
| 35 |
+
data_type_t acc_dt, const memory_desc_t *dst_md, bool skip_sum);
|
| 36 |
+
static pp_kernel_t *create(
|
| 37 |
+
const cpu_inner_product_fwd_pd_t *pd, bool skip_sum) {
|
| 38 |
+
return create(pd->OC(), pd->MB(), pd->OC(), pd->attr(),
|
| 39 |
+
pd->desc()->bias_desc.data_type, pd->desc()->accum_data_type,
|
| 40 |
+
pd->dst_md(), skip_sum);
|
| 41 |
+
}
|
| 42 |
+
|
| 43 |
+
virtual ~pp_kernel_t() = default;
|
| 44 |
+
|
| 45 |
+
// mb kernel only supports single-threaded execution where performance
|
| 46 |
+
// degradation is larger
|
| 47 |
+
bool sequential_kernel() const { return mb_blk_kernel_; }
|
| 48 |
+
|
| 49 |
+
virtual void operator()(void *dst, const void *acc, const char *bias,
|
| 50 |
+
const float *scales, float dst_scale, size_t start,
|
| 51 |
+
size_t dst_logical_off, size_t dim1_off, size_t end,
|
| 52 |
+
size_t runtime_oc, dim_t dst_mb_stride,
|
| 53 |
+
const float *dst_zero_points,
|
| 54 |
+
const void *post_ops_binary_rhs_arg_vec, const void *dst_orig,
|
| 55 |
+
size_t first_mb_matrix_addr_off, const exec_ctx_t &ctx,
|
| 56 |
+
const memory_desc_t &dst_md) const = 0;
|
| 57 |
+
|
| 58 |
+
virtual status_t create_kernel() { return status::success; }
|
| 59 |
+
|
| 60 |
+
protected:
|
| 61 |
+
pp_kernel_t(size_t OC, size_t MB, dim_t dst_mb_stride,
|
| 62 |
+
const primitive_attr_t *attr, data_type_t bias_dt,
|
| 63 |
+
data_type_t acc_dt, const memory_desc_t *dst_md, bool skip_sum);
|
| 64 |
+
|
| 65 |
+
size_t OC_;
|
| 66 |
+
size_t MB_;
|
| 67 |
+
dim_t dst_mb_stride_;
|
| 68 |
+
data_type_t bias_data_type_;
|
| 69 |
+
data_type_t acc_data_type_;
|
| 70 |
+
data_type_t dst_data_type_;
|
| 71 |
+
size_t bias_data_type_size_ = 0;
|
| 72 |
+
size_t acc_data_type_size_ = 4;
|
| 73 |
+
size_t dst_data_type_size_ = 0;
|
| 74 |
+
bool do_scale_ = false;
|
| 75 |
+
size_t scale_idx_mult_ = 0;
|
| 76 |
+
bool do_eltwise_ = false;
|
| 77 |
+
bool do_binary_ = false;
|
| 78 |
+
bool do_prelu_ = false;
|
| 79 |
+
bool do_sum_ = false;
|
| 80 |
+
bool do_dst_scale_ = false;
|
| 81 |
+
bool do_dst_zero_points_ = false;
|
| 82 |
+
float sum_scale_ = 0.f;
|
| 83 |
+
int32_t sum_zp_ = 0;
|
| 84 |
+
data_type_t sum_data_type_;
|
| 85 |
+
bool mb_blk_kernel_ = false;
|
| 86 |
+
post_ops_t post_ops_;
|
| 87 |
+
int ndims_;
|
| 88 |
+
|
| 89 |
+
bool has_trivial_mb_stride() const {
|
| 90 |
+
return (!runtime_oc()) && (OC_ == (size_t)dst_mb_stride_);
|
| 91 |
+
}
|
| 92 |
+
bool do_bias() const { return bias_data_type_ != data_type::undef; }
|
| 93 |
+
bool runtime_oc() const { return OC_ == (size_t)DNNL_RUNTIME_DIM_VAL; }
|
| 94 |
+
bool runtime_mb() const { return MB_ == (size_t)DNNL_RUNTIME_DIM_VAL; }
|
| 95 |
+
};
|
| 96 |
+
|
| 97 |
+
inline const bcast_set_t &gemm_default_strategies() {
|
| 98 |
+
static const bcast_set_t s
|
| 99 |
+
= {broadcasting_strategy_t::scalar, broadcasting_strategy_t::per_oc,
|
| 100 |
+
broadcasting_strategy_t::per_oc_spatial,
|
| 101 |
+
broadcasting_strategy_t::no_broadcast};
|
| 102 |
+
return s;
|
| 103 |
+
}
|
| 104 |
+
|
| 105 |
+
bool post_ops_ok(const post_ops_t &post_ops, const memory_desc_wrapper *dst_d,
|
| 106 |
+
const bcast_set_t &enabled_bcast_strategy = gemm_default_strategies());
|
| 107 |
+
bool post_ops_ok(const post_ops_t &post_ops, const memory_desc_t *dst_d,
|
| 108 |
+
const bcast_set_t &enabled_bcast_strategy = gemm_default_strategies());
|
| 109 |
+
|
| 110 |
+
} // namespace inner_product_utils
|
| 111 |
+
} // namespace cpu
|
| 112 |
+
} // namespace impl
|
| 113 |
+
} // namespace dnnl
|
| 114 |
+
|
| 115 |
+
#endif
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/gemm_x8s8s32x_conv_zp_src_pad_comp.hpp
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*******************************************************************************
|
| 2 |
+
* Copyright 2020 Intel Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*******************************************************************************/
|
| 16 |
+
|
| 17 |
+
#ifndef CPU_GEMM_X8S8S32X_CONV_ZP_SRC_PAD_COMP_HPP_
|
| 18 |
+
#define CPU_GEMM_X8S8S32X_CONV_ZP_SRC_PAD_COMP_HPP_
|
| 19 |
+
|
| 20 |
+
#include "common/c_types_map.hpp"
|
| 21 |
+
#include "cpu/gemm_convolution_utils.hpp"
|
| 22 |
+
|
| 23 |
+
namespace dnnl {
|
| 24 |
+
namespace impl {
|
| 25 |
+
namespace cpu {
|
| 26 |
+
|
| 27 |
+
void compute_zp_src_comp_pad(const conv_gemm_conf_t &jcp,
|
| 28 |
+
int32_t *const zp_src_pad_buf, const int32_t *const zp_src,
|
| 29 |
+
const int8_t *weights, const memory_desc_wrapper &weights_md,
|
| 30 |
+
const bool with_groups);
|
| 31 |
+
|
| 32 |
+
void apply_zp_src_comp_pad(const conv_gemm_conf_t &jcp, const dim_t g,
|
| 33 |
+
const dim_t d_offset, const dim_t h_offset, const dim_t w_offset,
|
| 34 |
+
const dim_t h_size, const dim_t w_size,
|
| 35 |
+
int32_t *__restrict gemm_conv_result,
|
| 36 |
+
const int32_t *__restrict zp_src_pad_buf);
|
| 37 |
+
|
| 38 |
+
} // namespace cpu
|
| 39 |
+
} // namespace impl
|
| 40 |
+
} // namespace dnnl
|
| 41 |
+
|
| 42 |
+
#endif
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/gemm_x8s8s32x_convolution.hpp
ADDED
|
@@ -0,0 +1,181 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*******************************************************************************
|
| 2 |
+
* Copyright 2017-2023 Intel Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*******************************************************************************/
|
| 16 |
+
|
| 17 |
+
#ifndef CPU_GEMM_X8S8S32X_CONVOLUTION_HPP
|
| 18 |
+
#define CPU_GEMM_X8S8S32X_CONVOLUTION_HPP
|
| 19 |
+
|
| 20 |
+
#include <memory>
|
| 21 |
+
|
| 22 |
+
#include "common/c_types_map.hpp"
|
| 23 |
+
#include "common/memory_tracking.hpp"
|
| 24 |
+
#include "common/primitive.hpp"
|
| 25 |
+
|
| 26 |
+
#include "cpu/platform.hpp"
|
| 27 |
+
|
| 28 |
+
#include "cpu/cpu_convolution_pd.hpp"
|
| 29 |
+
|
| 30 |
+
#include "cpu/gemm_convolution_utils.hpp"
|
| 31 |
+
#include "cpu/gemm_x8s8s32x_convolution_utils.hpp"
|
| 32 |
+
|
| 33 |
+
#include "cpu/gemm/gemm.hpp"
|
| 34 |
+
#include "cpu/zero_point_utils.hpp"
|
| 35 |
+
|
| 36 |
+
namespace dnnl {
|
| 37 |
+
namespace impl {
|
| 38 |
+
namespace cpu {
|
| 39 |
+
|
| 40 |
+
struct gemm_x8s8s32x_convolution_fwd_t : public primitive_t {
|
| 41 |
+
struct pd_t : public cpu_convolution_fwd_pd_t {
|
| 42 |
+
pd_t(const convolution_desc_t *adesc, const primitive_attr_t *attr,
|
| 43 |
+
const typename pd_t::base_class *hint_fwd_pd)
|
| 44 |
+
: cpu_convolution_fwd_pd_t(adesc, attr, hint_fwd_pd), jcp_() {}
|
| 45 |
+
|
| 46 |
+
DECLARE_COMMON_PD_T(src_md()->data_type == data_type::u8
|
| 47 |
+
? IGEMM_S8U8S32_IMPL_STR
|
| 48 |
+
: IGEMM_S8S8S32_IMPL_STR,
|
| 49 |
+
gemm_x8s8s32x_convolution_fwd_t, USE_GLOBAL_SCRATCHPAD);
|
| 50 |
+
|
| 51 |
+
status_t init(engine_t *engine) {
|
| 52 |
+
using namespace data_type;
|
| 53 |
+
using skip_mask_t = primitive_attr_t::skip_mask_t;
|
| 54 |
+
const auto dst_type = dst_md(0)->data_type;
|
| 55 |
+
|
| 56 |
+
bool ok = is_fwd()
|
| 57 |
+
&& set_default_alg_kind(alg_kind::convolution_direct)
|
| 58 |
+
&& utils::one_of(src_md()->data_type, s8, u8)
|
| 59 |
+
&& weights_md()->data_type == s8
|
| 60 |
+
&& utils::one_of(
|
| 61 |
+
dst_md()->data_type, f32, bf16, s32, s8, u8)
|
| 62 |
+
&& IMPLICATION(with_bias(),
|
| 63 |
+
utils::one_of(weights_md(1)->data_type, f32, bf16,
|
| 64 |
+
s32, s8, u8))
|
| 65 |
+
&& !has_zero_dim_memory()
|
| 66 |
+
&& attr()->has_default_values(skip_mask_t::scales_runtime
|
| 67 |
+
| skip_mask_t::zero_points_runtime
|
| 68 |
+
| skip_mask_t::post_ops
|
| 69 |
+
| skip_mask_t::sum_dt,
|
| 70 |
+
dst_type)
|
| 71 |
+
&& attr()->post_ops_.check_sum_consistency(dst_type,
|
| 72 |
+
/* is_int8 */ true)
|
| 73 |
+
&& attr_scales_ok() && zero_points_valid(attr());
|
| 74 |
+
if (!ok) return status::unimplemented;
|
| 75 |
+
|
| 76 |
+
auto scratchpad = scratchpad_registry().registrar();
|
| 77 |
+
CHECK(jit_gemm_convolution_utils::init_conf(jcp_, scratchpad,
|
| 78 |
+
*desc(), src_md_, weights_md_, dst_md_, bias_md_, attr_,
|
| 79 |
+
dnnl_get_max_threads()));
|
| 80 |
+
if (!gemm_x8s8s32x_convolution_utils::post_ops_ok(
|
| 81 |
+
attr()->post_ops_, &dst_md_))
|
| 82 |
+
return status::unimplemented;
|
| 83 |
+
return status::success;
|
| 84 |
+
}
|
| 85 |
+
|
| 86 |
+
conv_gemm_conf_t jcp_;
|
| 87 |
+
};
|
| 88 |
+
|
| 89 |
+
gemm_x8s8s32x_convolution_fwd_t(const pd_t *apd) : primitive_t(apd) {}
|
| 90 |
+
|
| 91 |
+
status_t init(engine_t *engine) override {
|
| 92 |
+
CHECK(safe_ptr_assign(pp_ker_, pp_ker_t::create(pd(), pd()->jcp_)));
|
| 93 |
+
return (pp_ker_) ? pp_ker_->create_kernel() : status::success;
|
| 94 |
+
}
|
| 95 |
+
|
| 96 |
+
status_t execute(const exec_ctx_t &ctx) const override {
|
| 97 |
+
return execute_forward(ctx);
|
| 98 |
+
}
|
| 99 |
+
|
| 100 |
+
private:
|
| 101 |
+
const pd_t *pd() const { return (const pd_t *)primitive_t::pd().get(); }
|
| 102 |
+
status_t execute_forward(const exec_ctx_t &ctx) const;
|
| 103 |
+
status_t execute_forward_thr(const int ithr, const int nthr,
|
| 104 |
+
const char *src_base, const int8_t *wei_base, const char *bia_base,
|
| 105 |
+
void *dst_base, const float *scales, const float *dst_scales,
|
| 106 |
+
const zero_point_call_params_t &zp,
|
| 107 |
+
const memory_tracking::grantor_t &scratchpad,
|
| 108 |
+
const void *post_ops_binary_rhs_arg_vec,
|
| 109 |
+
const exec_ctx_t &ctx) const;
|
| 110 |
+
|
| 111 |
+
using pp_ker_t = gemm_x8s8s32x_convolution_utils::pp_ker_t;
|
| 112 |
+
std::unique_ptr<pp_ker_t> pp_ker_;
|
| 113 |
+
};
|
| 114 |
+
|
| 115 |
+
struct gemm_x8s8s32x_convolution_bwd_data_t : public primitive_t {
|
| 116 |
+
struct pd_t : public cpu_convolution_bwd_data_pd_t {
|
| 117 |
+
pd_t(const convolution_desc_t *adesc, const primitive_attr_t *attr,
|
| 118 |
+
const convolution_fwd_pd_t *hint_fwd_pd)
|
| 119 |
+
: cpu_convolution_bwd_data_pd_t(adesc, attr, hint_fwd_pd), jcp_() {}
|
| 120 |
+
|
| 121 |
+
DECLARE_COMMON_PD_T(diff_dst_md()->data_type == data_type::u8
|
| 122 |
+
? IGEMM_S8U8S32_IMPL_STR
|
| 123 |
+
: IGEMM_S8S8S32_IMPL_STR,
|
| 124 |
+
gemm_x8s8s32x_convolution_bwd_data_t, USE_GLOBAL_SCRATCHPAD);
|
| 125 |
+
|
| 126 |
+
status_t init(engine_t *engine) {
|
| 127 |
+
using namespace data_type;
|
| 128 |
+
|
| 129 |
+
bool ok = desc()->prop_kind == prop_kind::backward_data
|
| 130 |
+
&& set_default_alg_kind(alg_kind::convolution_direct)
|
| 131 |
+
&& utils::one_of(diff_dst_md()->data_type, s8, u8)
|
| 132 |
+
&& weights_md()->data_type == s8
|
| 133 |
+
&& utils::one_of(
|
| 134 |
+
diff_src_md()->data_type, f32, bf16, s32, s8, u8)
|
| 135 |
+
&& IMPLICATION(with_bias(),
|
| 136 |
+
utils::one_of(weights_md(1)->data_type, f32, bf16,
|
| 137 |
+
s32, s8, u8))
|
| 138 |
+
&& !has_zero_dim_memory()
|
| 139 |
+
&& attr()->has_default_values(
|
| 140 |
+
primitive_attr_t::skip_mask_t::scales_runtime)
|
| 141 |
+
&& output_scales_mask_ok();
|
| 142 |
+
if (!ok) return status::unimplemented;
|
| 143 |
+
|
| 144 |
+
auto scratchpad = scratchpad_registry().registrar();
|
| 145 |
+
return jit_gemm_convolution_utils::init_conf(jcp_, scratchpad,
|
| 146 |
+
*desc(), diff_src_md_, weights_md_, diff_dst_md_, bias_md_,
|
| 147 |
+
attr_, dnnl_get_max_threads());
|
| 148 |
+
}
|
| 149 |
+
|
| 150 |
+
bool support_bias() const override { return true; }
|
| 151 |
+
|
| 152 |
+
conv_gemm_conf_t jcp_;
|
| 153 |
+
|
| 154 |
+
protected:
|
| 155 |
+
bool output_scales_mask_ok() const {
|
| 156 |
+
const auto &mask = attr()->output_scales_.mask_;
|
| 157 |
+
return mask == 0 || mask == 1 << 1;
|
| 158 |
+
}
|
| 159 |
+
};
|
| 160 |
+
|
| 161 |
+
gemm_x8s8s32x_convolution_bwd_data_t(const pd_t *apd) : primitive_t(apd) {}
|
| 162 |
+
|
| 163 |
+
status_t execute(const exec_ctx_t &ctx) const override {
|
| 164 |
+
return execute_backward_data(ctx);
|
| 165 |
+
}
|
| 166 |
+
|
| 167 |
+
private:
|
| 168 |
+
status_t execute_backward_data(const exec_ctx_t &ctx) const;
|
| 169 |
+
status_t execute_backward_data_thr(const int ithr, const int nthr,
|
| 170 |
+
const char *diff_dst_base, const int8_t *wei_base,
|
| 171 |
+
const char *bia_base, char *diff_src_base,
|
| 172 |
+
const memory_tracking::grantor_t &scratchpad,
|
| 173 |
+
const exec_ctx_t &ctx) const;
|
| 174 |
+
const pd_t *pd() const { return (const pd_t *)primitive_t::pd().get(); }
|
| 175 |
+
};
|
| 176 |
+
|
| 177 |
+
} // namespace cpu
|
| 178 |
+
} // namespace impl
|
| 179 |
+
} // namespace dnnl
|
| 180 |
+
|
| 181 |
+
#endif
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/gemm_x8s8s32x_convolution_utils.hpp
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*******************************************************************************
|
| 2 |
+
* Copyright 2020-2022 Intel Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*******************************************************************************/
|
| 16 |
+
|
| 17 |
+
#ifndef CPU_GEMM_X8S8S32X_CONVOLUTION_UTILS_HPP
|
| 18 |
+
#define CPU_GEMM_X8S8S32X_CONVOLUTION_UTILS_HPP
|
| 19 |
+
|
| 20 |
+
#include "cpu/gemm_convolution_utils.hpp"
|
| 21 |
+
#if DNNL_X64
|
| 22 |
+
#include "cpu/x64/injectors/jit_uni_postops_injector.hpp"
|
| 23 |
+
#endif
|
| 24 |
+
|
| 25 |
+
namespace dnnl {
|
| 26 |
+
namespace impl {
|
| 27 |
+
namespace cpu {
|
| 28 |
+
namespace gemm_x8s8s32x_convolution_utils {
|
| 29 |
+
|
| 30 |
+
struct pp_ker_t {
|
| 31 |
+
static pp_ker_t *create(
|
| 32 |
+
const convolution_pd_t *pd, const conv_gemm_conf_t &jcp);
|
| 33 |
+
virtual ~pp_ker_t() = default;
|
| 34 |
+
|
| 35 |
+
typedef typename prec_traits<data_type::s32>::type acc_data_t;
|
| 36 |
+
|
| 37 |
+
virtual void operator()(void *dst, const acc_data_t *acc, const char *bias,
|
| 38 |
+
const float *scales, float dst_scale, float sum_scale,
|
| 39 |
+
float signed_scale, int g, size_t start, size_t end,
|
| 40 |
+
const zero_point_call_params_t &zp,
|
| 41 |
+
const void *post_ops_binary_rhs_arg_vec, const void *dst_orig,
|
| 42 |
+
const exec_ctx_t &ctx, const memory_desc_t &dst_md,
|
| 43 |
+
const single_gemm_conv_chunk_desc_t &chunk_desc) const = 0;
|
| 44 |
+
|
| 45 |
+
virtual status_t create_kernel() { return status::success; }
|
| 46 |
+
|
| 47 |
+
protected:
|
| 48 |
+
pp_ker_t(const convolution_pd_t *pd, const conv_gemm_conf_t &jcp);
|
| 49 |
+
|
| 50 |
+
const conv_gemm_conf_t &jcp_;
|
| 51 |
+
};
|
| 52 |
+
|
| 53 |
+
bool post_ops_ok(const post_ops_t &post_ops, const memory_desc_wrapper *dst_d);
|
| 54 |
+
bool post_ops_ok(const post_ops_t &post_ops, const memory_desc_t *dst_d);
|
| 55 |
+
bool mayiuse_jit_pp_kernel(data_type_t dst_dt) noexcept;
|
| 56 |
+
|
| 57 |
+
} // namespace gemm_x8s8s32x_convolution_utils
|
| 58 |
+
} // namespace cpu
|
| 59 |
+
} // namespace impl
|
| 60 |
+
} // namespace dnnl
|
| 61 |
+
|
| 62 |
+
#endif
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/gemm_x8s8s32x_inner_product.hpp
ADDED
|
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*******************************************************************************
|
| 2 |
+
* Copyright 2018-2023 Intel Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*******************************************************************************/
|
| 16 |
+
|
| 17 |
+
#ifndef CPU_GEMM_X8S8S32X_INNER_PRODUCT_HPP
|
| 18 |
+
#define CPU_GEMM_X8S8S32X_INNER_PRODUCT_HPP
|
| 19 |
+
|
| 20 |
+
#include <assert.h>
|
| 21 |
+
|
| 22 |
+
#include <memory>
|
| 23 |
+
|
| 24 |
+
#include "common/c_types_map.hpp"
|
| 25 |
+
#include "common/memory_tracking.hpp"
|
| 26 |
+
#include "common/primitive.hpp"
|
| 27 |
+
#include "common/type_helpers.hpp"
|
| 28 |
+
#include "common/utils.hpp"
|
| 29 |
+
|
| 30 |
+
#include "cpu/gemm/gemm.hpp"
|
| 31 |
+
#include "cpu/gemm_inner_product_utils.hpp"
|
| 32 |
+
|
| 33 |
+
#include "cpu/cpu_inner_product_pd.hpp"
|
| 34 |
+
#include "cpu/scale_utils.hpp"
|
| 35 |
+
#if DNNL_X64
|
| 36 |
+
#include "cpu/x64/injectors/jit_uni_postops_injector.hpp"
|
| 37 |
+
#endif
|
| 38 |
+
|
| 39 |
+
namespace dnnl {
|
| 40 |
+
namespace impl {
|
| 41 |
+
namespace cpu {
|
| 42 |
+
|
| 43 |
+
struct gemm_x8s8s32x_inner_product_fwd_t : public primitive_t {
|
| 44 |
+
struct pd_t : public cpu_inner_product_fwd_pd_t {
|
| 45 |
+
using cpu_inner_product_fwd_pd_t::cpu_inner_product_fwd_pd_t;
|
| 46 |
+
|
| 47 |
+
DECLARE_COMMON_PD_T(src_md()->data_type == data_type::u8
|
| 48 |
+
? IGEMM_S8U8S32_IMPL_STR
|
| 49 |
+
: IGEMM_S8S8S32_IMPL_STR,
|
| 50 |
+
gemm_x8s8s32x_inner_product_fwd_t, USE_GLOBAL_SCRATCHPAD);
|
| 51 |
+
|
| 52 |
+
status_t init(engine_t *engine) {
|
| 53 |
+
using namespace data_type;
|
| 54 |
+
|
| 55 |
+
const bool ok = is_fwd() && !has_zero_dim_memory()
|
| 56 |
+
&& utils::one_of(src_md()->data_type, s8, u8)
|
| 57 |
+
&& weights_md()->data_type == s8
|
| 58 |
+
&& utils::one_of(dst_md()->data_type, f32, s32, s8, u8)
|
| 59 |
+
&& IMPLICATION(with_bias(),
|
| 60 |
+
utils::one_of(
|
| 61 |
+
weights_md(1)->data_type, f32, s32, s8, u8))
|
| 62 |
+
&& attr()->has_default_values(
|
| 63 |
+
primitive_attr_t::skip_mask_t::scales_runtime
|
| 64 |
+
| primitive_attr_t::skip_mask_t::post_ops,
|
| 65 |
+
dst_md()->data_type)
|
| 66 |
+
&& attr()->post_ops_.check_sum_consistency(
|
| 67 |
+
dst_md()->data_type, /* is_int */ true)
|
| 68 |
+
&& attr_scales_ok()
|
| 69 |
+
&& set_default_params() == status::success
|
| 70 |
+
&& dense_gemm_consitency_check(
|
| 71 |
+
src_md(), weights_md(), dst_md())
|
| 72 |
+
&& attr_.set_default_formats(dst_md(0)) == status::success
|
| 73 |
+
&& inner_product_utils::post_ops_ok(
|
| 74 |
+
attr()->post_ops_, &dst_md_);
|
| 75 |
+
|
| 76 |
+
if (!ok) return status::unimplemented;
|
| 77 |
+
|
| 78 |
+
bool do_sum = attr()->post_ops_.find(primitive_kind::sum) >= 0;
|
| 79 |
+
dst_is_acc_
|
| 80 |
+
= utils::one_of(dst_md()->data_type, s32, f32) && !do_sum;
|
| 81 |
+
|
| 82 |
+
init_scratchpad();
|
| 83 |
+
|
| 84 |
+
return status::success;
|
| 85 |
+
}
|
| 86 |
+
|
| 87 |
+
bool dst_is_acc_;
|
| 88 |
+
|
| 89 |
+
private:
|
| 90 |
+
void init_scratchpad() {
|
| 91 |
+
auto scratchpad = scratchpad_registry().registrar();
|
| 92 |
+
if (!dst_is_acc_) {
|
| 93 |
+
scratchpad.template book<int32_t>(
|
| 94 |
+
memory_tracking::names::key_iprod_int_dat_in_acc_dt,
|
| 95 |
+
MB() * OC());
|
| 96 |
+
}
|
| 97 |
+
|
| 98 |
+
book_precomputed_scales(scratchpad, attr()->scales_, OC());
|
| 99 |
+
}
|
| 100 |
+
};
|
| 101 |
+
|
| 102 |
+
gemm_x8s8s32x_inner_product_fwd_t(const pd_t *apd) : primitive_t(apd) {}
|
| 103 |
+
|
| 104 |
+
status_t init(engine_t *engine) override {
|
| 105 |
+
CHECK(safe_ptr_assign(pp_kernel_,
|
| 106 |
+
inner_product_utils::pp_kernel_t::create(pd(), false)));
|
| 107 |
+
return pp_kernel_->create_kernel();
|
| 108 |
+
}
|
| 109 |
+
|
| 110 |
+
status_t execute(const exec_ctx_t &ctx) const override {
|
| 111 |
+
return execute_forward(ctx);
|
| 112 |
+
}
|
| 113 |
+
|
| 114 |
+
private:
|
| 115 |
+
status_t execute_forward(const exec_ctx_t &ctx) const;
|
| 116 |
+
const pd_t *pd() const { return (const pd_t *)primitive_t::pd().get(); }
|
| 117 |
+
|
| 118 |
+
std::unique_ptr<inner_product_utils::pp_kernel_t> pp_kernel_;
|
| 119 |
+
};
|
| 120 |
+
|
| 121 |
+
} // namespace cpu
|
| 122 |
+
} // namespace impl
|
| 123 |
+
} // namespace dnnl
|
| 124 |
+
|
| 125 |
+
#endif
|
| 126 |
+
|
| 127 |
+
// vim: et ts=4 sw=4 cindent cino+=l0,\:4,N-s
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/nchw_pooling.hpp
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*******************************************************************************
|
| 2 |
+
* Copyright 2017-2023 Intel Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*******************************************************************************/
|
| 16 |
+
|
| 17 |
+
#ifndef CPU_NCHW_POOLING_HPP
|
| 18 |
+
#define CPU_NCHW_POOLING_HPP
|
| 19 |
+
|
| 20 |
+
#include <assert.h>
|
| 21 |
+
|
| 22 |
+
#include "common/bfloat16.hpp"
|
| 23 |
+
#include "common/c_types_map.hpp"
|
| 24 |
+
#include "common/dnnl_thread.hpp"
|
| 25 |
+
#include "common/primitive.hpp"
|
| 26 |
+
#include "common/type_helpers.hpp"
|
| 27 |
+
#include "common/utils.hpp"
|
| 28 |
+
|
| 29 |
+
#include "cpu/cpu_pooling_pd.hpp"
|
| 30 |
+
#include "cpu/platform.hpp"
|
| 31 |
+
#include "cpu/primitive_attr_postops.hpp"
|
| 32 |
+
|
| 33 |
+
namespace dnnl {
|
| 34 |
+
namespace impl {
|
| 35 |
+
namespace cpu {
|
| 36 |
+
|
| 37 |
+
template <data_type_t d_type>
|
| 38 |
+
struct nchw_pooling_fwd_t : public primitive_t {
|
| 39 |
+
struct pd_t : public cpu_pooling_fwd_pd_t {
|
| 40 |
+
using cpu_pooling_fwd_pd_t::cpu_pooling_fwd_pd_t;
|
| 41 |
+
|
| 42 |
+
DECLARE_COMMON_PD_T("simple_nchw:any", nchw_pooling_fwd_t);
|
| 43 |
+
|
| 44 |
+
status_t init(engine_t *engine) {
|
| 45 |
+
const format_tag_t desired_fmt_tag = utils::pick(ndims() - 3,
|
| 46 |
+
format_tag::ncw, format_tag::nchw, format_tag::ncdhw);
|
| 47 |
+
|
| 48 |
+
const bool ok = is_fwd()
|
| 49 |
+
&& utils::one_of(desc()->alg_kind, alg_kind::pooling_max,
|
| 50 |
+
alg_kind::pooling_avg_include_padding,
|
| 51 |
+
alg_kind::pooling_avg_exclude_padding)
|
| 52 |
+
&& utils::everyone_is(
|
| 53 |
+
d_type, src_md()->data_type, dst_md()->data_type)
|
| 54 |
+
&& platform::has_data_type_support(d_type)
|
| 55 |
+
&& !has_zero_dim_memory() && !is_dilated()
|
| 56 |
+
&& attr()->has_default_values(
|
| 57 |
+
primitive_attr_t::skip_mask_t::post_ops, d_type)
|
| 58 |
+
&& ref_post_ops_t::primitive_kind_ok(attr()->post_ops_)
|
| 59 |
+
&& set_default_params() == status::success
|
| 60 |
+
&& memory_desc_matches_tag(*src_md(), desired_fmt_tag)
|
| 61 |
+
&& memory_desc_matches_tag(*dst_md(), desired_fmt_tag)
|
| 62 |
+
&& attr_.set_default_formats(dst_md(0)) == status::success;
|
| 63 |
+
if (!ok) return status::unimplemented;
|
| 64 |
+
|
| 65 |
+
const bool is_training
|
| 66 |
+
= desc_.prop_kind == prop_kind::forward_training;
|
| 67 |
+
if (desc()->alg_kind == alg_kind::pooling_max && is_training)
|
| 68 |
+
init_default_ws();
|
| 69 |
+
|
| 70 |
+
init_scratchpad();
|
| 71 |
+
|
| 72 |
+
return status::success;
|
| 73 |
+
}
|
| 74 |
+
|
| 75 |
+
private:
|
| 76 |
+
void init_scratchpad() {
|
| 77 |
+
using namespace memory_tracking::names;
|
| 78 |
+
if (src_md()->data_type != data_type::f32) {
|
| 79 |
+
const size_t src_sz_ = ID() * IH() * IW() * IC() * MB();
|
| 80 |
+
auto scratchpad = scratchpad_registry().registrar();
|
| 81 |
+
scratchpad.template book<float>(key_pool_src_bf16cvt, src_sz_);
|
| 82 |
+
}
|
| 83 |
+
}
|
| 84 |
+
};
|
| 85 |
+
|
| 86 |
+
nchw_pooling_fwd_t(const pd_t *apd) : primitive_t(apd) {}
|
| 87 |
+
|
| 88 |
+
using data_t = typename prec_traits<d_type>::type;
|
| 89 |
+
|
| 90 |
+
status_t init(engine_t *engine) override {
|
| 91 |
+
ref_post_ops_
|
| 92 |
+
= utils::make_unique<ref_post_ops_t>(pd()->attr()->post_ops_);
|
| 93 |
+
if (!ref_post_ops_) return status::out_of_memory;
|
| 94 |
+
CHECK(ref_post_ops_->init(pd()->dst_md()));
|
| 95 |
+
return status::success;
|
| 96 |
+
}
|
| 97 |
+
|
| 98 |
+
status_t execute(const exec_ctx_t &ctx) const override {
|
| 99 |
+
return execute_forward(ctx);
|
| 100 |
+
}
|
| 101 |
+
|
| 102 |
+
private:
|
| 103 |
+
status_t execute_forward(const exec_ctx_t &ctx) const;
|
| 104 |
+
const pd_t *pd() const { return (const pd_t *)primitive_t::pd().get(); }
|
| 105 |
+
std::unique_ptr<ref_post_ops_t> ref_post_ops_;
|
| 106 |
+
};
|
| 107 |
+
|
| 108 |
+
template <data_type_t d_type>
|
| 109 |
+
struct nchw_pooling_bwd_t : public primitive_t {
|
| 110 |
+
struct pd_t : public cpu_pooling_bwd_pd_t {
|
| 111 |
+
using cpu_pooling_bwd_pd_t::cpu_pooling_bwd_pd_t;
|
| 112 |
+
|
| 113 |
+
DECLARE_COMMON_PD_T("simple_nchw:any", nchw_pooling_bwd_t);
|
| 114 |
+
|
| 115 |
+
status_t init(engine_t *engine) {
|
| 116 |
+
const format_tag_t desired_fmt_tag = utils::pick(ndims() - 3,
|
| 117 |
+
format_tag::ncw, format_tag::nchw, format_tag::ncdhw);
|
| 118 |
+
|
| 119 |
+
using namespace prop_kind;
|
| 120 |
+
using namespace alg_kind;
|
| 121 |
+
bool ok = !is_fwd()
|
| 122 |
+
&& utils::one_of(desc()->alg_kind, alg_kind::pooling_max,
|
| 123 |
+
alg_kind::pooling_avg_include_padding,
|
| 124 |
+
alg_kind::pooling_avg_exclude_padding)
|
| 125 |
+
&& utils::everyone_is(d_type, diff_dst_md()->data_type,
|
| 126 |
+
diff_src_md()->data_type)
|
| 127 |
+
&& platform::has_data_type_support(d_type)
|
| 128 |
+
&& !has_zero_dim_memory()
|
| 129 |
+
&& set_default_params() == status::success
|
| 130 |
+
&& attr()->has_default_values()
|
| 131 |
+
&& memory_desc_matches_tag(*diff_dst_md(), desired_fmt_tag)
|
| 132 |
+
&& memory_desc_matches_tag(*diff_src_md(), desired_fmt_tag)
|
| 133 |
+
&& !is_dilated();
|
| 134 |
+
if (!ok) return status::unimplemented;
|
| 135 |
+
|
| 136 |
+
if (desc()->alg_kind == pooling_max) {
|
| 137 |
+
const auto ws_dt = hint_fwd_pd_->workspace_md()->data_type;
|
| 138 |
+
init_default_ws(ws_dt);
|
| 139 |
+
if (!compare_ws(hint_fwd_pd_)) return status::unimplemented;
|
| 140 |
+
}
|
| 141 |
+
|
| 142 |
+
nthr_ = dnnl_get_max_threads();
|
| 143 |
+
calculate_channel_block_size();
|
| 144 |
+
init_scratchpad();
|
| 145 |
+
|
| 146 |
+
return status::success;
|
| 147 |
+
}
|
| 148 |
+
|
| 149 |
+
dim_t channel_block_size_;
|
| 150 |
+
int nthr_; // To not exceed the limit in execute used for set up.
|
| 151 |
+
|
| 152 |
+
private:
|
| 153 |
+
void init_scratchpad() {
|
| 154 |
+
using namespace memory_tracking::names;
|
| 155 |
+
if (diff_dst_md()->data_type != data_type::f32) {
|
| 156 |
+
size_t dst_sz_ = OD() * OH() * OW();
|
| 157 |
+
size_t src_sz_ = ID() * IH() * IW();
|
| 158 |
+
auto scratchpad = scratchpad_registry().registrar();
|
| 159 |
+
|
| 160 |
+
scratchpad.template book<float>(key_pool_src_bf16cvt,
|
| 161 |
+
src_sz_ * nthr_ * channel_block_size_);
|
| 162 |
+
scratchpad.template book<float>(key_pool_dst_bf16cvt,
|
| 163 |
+
dst_sz_ * nthr_ * channel_block_size_);
|
| 164 |
+
}
|
| 165 |
+
}
|
| 166 |
+
|
| 167 |
+
void calculate_channel_block_size() {
|
| 168 |
+
// calculate channels block size at which the data fits into half
|
| 169 |
+
// of L1, it allows to improve performance for problems with small
|
| 170 |
+
// spatial
|
| 171 |
+
dim_t dst_sz_ = OD() * OH() * OW();
|
| 172 |
+
dim_t src_sz_ = ID() * IH() * IW();
|
| 173 |
+
dim_t C_per_thr = nstl::min(MB() * IC() / nthr_, IC());
|
| 174 |
+
const dim_t max_block_size
|
| 175 |
+
= platform::get_per_core_cache_size(1) / 2;
|
| 176 |
+
dim_t data_size_per_ch = (dst_sz_ + src_sz_) * 6; // f32 + bf16
|
| 177 |
+
channel_block_size_ = nstl::max(
|
| 178 |
+
nstl::min(C_per_thr, max_block_size / data_size_per_ch),
|
| 179 |
+
(dim_t)1);
|
| 180 |
+
}
|
| 181 |
+
};
|
| 182 |
+
|
| 183 |
+
nchw_pooling_bwd_t(const pd_t *apd) : primitive_t(apd) {}
|
| 184 |
+
typedef typename prec_traits<d_type>::type data_t;
|
| 185 |
+
|
| 186 |
+
status_t execute(const exec_ctx_t &ctx) const override {
|
| 187 |
+
return execute_backward(ctx);
|
| 188 |
+
}
|
| 189 |
+
|
| 190 |
+
private:
|
| 191 |
+
status_t execute_backward(const exec_ctx_t &ctx) const;
|
| 192 |
+
const pd_t *pd() const { return (const pd_t *)primitive_t::pd().get(); }
|
| 193 |
+
};
|
| 194 |
+
|
| 195 |
+
} // namespace cpu
|
| 196 |
+
} // namespace impl
|
| 197 |
+
} // namespace dnnl
|
| 198 |
+
|
| 199 |
+
#endif
|
| 200 |
+
|
| 201 |
+
// vim: et ts=4 sw=4 cindent cino+=l0,\:4,N-s
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/ncsp_batch_normalization.hpp
ADDED
|
@@ -0,0 +1,211 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*******************************************************************************
|
| 2 |
+
* Copyright 2018-2023 Intel Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*******************************************************************************/
|
| 16 |
+
|
| 17 |
+
#ifndef CPU_NCSP_BATCH_NORMALIZATION_HPP
|
| 18 |
+
#define CPU_NCSP_BATCH_NORMALIZATION_HPP
|
| 19 |
+
|
| 20 |
+
#include <assert.h>
|
| 21 |
+
|
| 22 |
+
#include "common/c_types_map.hpp"
|
| 23 |
+
#include "common/dnnl_thread.hpp"
|
| 24 |
+
#include "common/memory_tracking.hpp"
|
| 25 |
+
#include "common/primitive.hpp"
|
| 26 |
+
#include "common/type_helpers.hpp"
|
| 27 |
+
#include "common/utils.hpp"
|
| 28 |
+
|
| 29 |
+
#include "cpu/platform.hpp"
|
| 30 |
+
|
| 31 |
+
#include "cpu/cpu_batch_normalization_pd.hpp"
|
| 32 |
+
|
| 33 |
+
namespace dnnl {
|
| 34 |
+
namespace impl {
|
| 35 |
+
namespace cpu {
|
| 36 |
+
|
| 37 |
+
template <data_type_t d_type>
|
| 38 |
+
struct ncsp_batch_normalization_fwd_t : public primitive_t {
|
| 39 |
+
struct pd_t : public cpu_batch_normalization_fwd_pd_t {
|
| 40 |
+
using cpu_batch_normalization_fwd_pd_t::
|
| 41 |
+
cpu_batch_normalization_fwd_pd_t;
|
| 42 |
+
|
| 43 |
+
DECLARE_COMMON_PD_T("ncsp_bnorm:any", ncsp_batch_normalization_fwd_t);
|
| 44 |
+
|
| 45 |
+
status_t init(engine_t *engine) {
|
| 46 |
+
using namespace data_type;
|
| 47 |
+
using namespace format_tag;
|
| 48 |
+
|
| 49 |
+
bool ok = is_fwd() && !has_zero_dim_memory()
|
| 50 |
+
&& utils::everyone_is(
|
| 51 |
+
d_type, src_md()->data_type, dst_md()->data_type)
|
| 52 |
+
&& platform::has_data_type_support(d_type)
|
| 53 |
+
&& IMPLICATION(is_training(),
|
| 54 |
+
platform::has_training_support(d_type))
|
| 55 |
+
&& check_scale_shift_data_type()
|
| 56 |
+
&& (attr()->has_default_values()
|
| 57 |
+
|| with_relu_post_op(is_training()))
|
| 58 |
+
&& set_default_formats_common()
|
| 59 |
+
&& memory_desc_wrapper(src_md())
|
| 60 |
+
== memory_desc_wrapper(dst_md())
|
| 61 |
+
&& memory_desc_matches_one_of_tag(
|
| 62 |
+
*src_md(), ncdhw, nchw, ncw);
|
| 63 |
+
if (!ok) return status::unimplemented;
|
| 64 |
+
|
| 65 |
+
// BN+Add+Relu fusion is not currently implemented
|
| 66 |
+
if (fuse_norm_add_relu()) return status::unimplemented;
|
| 67 |
+
|
| 68 |
+
if (is_training() && fuse_norm_relu()) init_default_ws(8);
|
| 69 |
+
|
| 70 |
+
nthr_ = dnnl_get_max_threads();
|
| 71 |
+
init_scratchpad();
|
| 72 |
+
|
| 73 |
+
return status::success;
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
int nthr_; // To not exceed the limit in execute used for set up.
|
| 77 |
+
|
| 78 |
+
private:
|
| 79 |
+
void init_scratchpad() {
|
| 80 |
+
using namespace memory_tracking::names;
|
| 81 |
+
auto scratchpad = scratchpad_registry().registrar();
|
| 82 |
+
if (!stats_is_src()) {
|
| 83 |
+
scratchpad.template book<acc_data_t>(
|
| 84 |
+
key_bnorm_reduction, C() * nthr_);
|
| 85 |
+
|
| 86 |
+
if (!is_training()) {
|
| 87 |
+
scratchpad.template book<acc_data_t>(
|
| 88 |
+
key_bnorm_tmp_mean, C());
|
| 89 |
+
scratchpad.template book<acc_data_t>(
|
| 90 |
+
key_bnorm_tmp_var, C());
|
| 91 |
+
}
|
| 92 |
+
}
|
| 93 |
+
|
| 94 |
+
if (utils::one_of(d_type, data_type::bf16, data_type::f16)) {
|
| 95 |
+
static constexpr dim_t simd_w = 16;
|
| 96 |
+
const dim_t SP = D() * H() * W();
|
| 97 |
+
const int nbufs = 2;
|
| 98 |
+
const size_t cvt_buf_sz
|
| 99 |
+
= nbufs * nthr_ * utils::rnd_up(SP, simd_w);
|
| 100 |
+
scratchpad.template book<acc_data_t>(key_bnorm_cvt, cvt_buf_sz);
|
| 101 |
+
}
|
| 102 |
+
}
|
| 103 |
+
};
|
| 104 |
+
|
| 105 |
+
typedef typename prec_traits<d_type>::type data_t;
|
| 106 |
+
typedef float acc_data_t;
|
| 107 |
+
|
| 108 |
+
ncsp_batch_normalization_fwd_t(const pd_t *apd) : primitive_t(apd) {}
|
| 109 |
+
~ncsp_batch_normalization_fwd_t() {}
|
| 110 |
+
|
| 111 |
+
status_t execute(const exec_ctx_t &ctx) const override {
|
| 112 |
+
return execute_forward(ctx);
|
| 113 |
+
}
|
| 114 |
+
|
| 115 |
+
private:
|
| 116 |
+
status_t execute_forward(const exec_ctx_t &ctx) const;
|
| 117 |
+
const pd_t *pd() const { return (const pd_t *)primitive_t::pd().get(); }
|
| 118 |
+
};
|
| 119 |
+
|
| 120 |
+
template <data_type_t d_type>
|
| 121 |
+
struct ncsp_batch_normalization_bwd_t : public primitive_t {
|
| 122 |
+
struct pd_t : public cpu_batch_normalization_bwd_pd_t {
|
| 123 |
+
using cpu_batch_normalization_bwd_pd_t::
|
| 124 |
+
cpu_batch_normalization_bwd_pd_t;
|
| 125 |
+
|
| 126 |
+
DECLARE_COMMON_PD_T("ncsp_bnorm:any", ncsp_batch_normalization_bwd_t);
|
| 127 |
+
|
| 128 |
+
status_t init(engine_t *engine) {
|
| 129 |
+
using namespace data_type;
|
| 130 |
+
using namespace format_tag;
|
| 131 |
+
|
| 132 |
+
bool ok = !is_fwd() && !has_zero_dim_memory()
|
| 133 |
+
&& utils::everyone_is(d_type, src_md()->data_type,
|
| 134 |
+
diff_dst_md()->data_type, diff_src_md()->data_type)
|
| 135 |
+
&& platform::has_data_type_support(d_type)
|
| 136 |
+
&& platform::has_training_support(d_type)
|
| 137 |
+
&& check_scale_shift_data_type()
|
| 138 |
+
&& attr()->has_default_values()
|
| 139 |
+
&& set_default_formats_common()
|
| 140 |
+
&& memory_desc_wrapper(diff_src_md())
|
| 141 |
+
== memory_desc_wrapper(diff_dst_md())
|
| 142 |
+
&& memory_desc_matches_one_of_tag(
|
| 143 |
+
*src_md(), ncdhw, nchw, ncw)
|
| 144 |
+
&& memory_desc_matches_one_of_tag(
|
| 145 |
+
*diff_src_md(), ncdhw, nchw, ncw);
|
| 146 |
+
if (!ok) return status::unimplemented;
|
| 147 |
+
|
| 148 |
+
// BN+Add+Relu fusion is not currently implemented
|
| 149 |
+
if (fuse_norm_add_relu()) return status::unimplemented;
|
| 150 |
+
|
| 151 |
+
if (fuse_norm_relu()) {
|
| 152 |
+
init_default_ws(8);
|
| 153 |
+
if (!compare_ws(hint_fwd_pd_)) return status::unimplemented;
|
| 154 |
+
}
|
| 155 |
+
|
| 156 |
+
nthr_ = dnnl_get_max_threads();
|
| 157 |
+
init_scratchpad();
|
| 158 |
+
|
| 159 |
+
return status::success;
|
| 160 |
+
}
|
| 161 |
+
|
| 162 |
+
int nthr_; // To not exceed the limit in execute used for set up.
|
| 163 |
+
|
| 164 |
+
private:
|
| 165 |
+
void init_scratchpad() {
|
| 166 |
+
using namespace memory_tracking::names;
|
| 167 |
+
auto scratchpad = scratchpad_registry().registrar();
|
| 168 |
+
scratchpad.template book<acc_data_t>(
|
| 169 |
+
key_bnorm_reduction, 2 * C() * nthr_);
|
| 170 |
+
const auto pk_is_bwd = desc()->prop_kind == prop_kind::backward;
|
| 171 |
+
size_t ss_size = 0;
|
| 172 |
+
if (!use_scale() || !pk_is_bwd) ss_size += C();
|
| 173 |
+
if (!use_shift() || !pk_is_bwd) ss_size += C();
|
| 174 |
+
|
| 175 |
+
if (ss_size)
|
| 176 |
+
scratchpad.template book<acc_data_t>(
|
| 177 |
+
key_bnorm_tmp_diff_ss, ss_size);
|
| 178 |
+
|
| 179 |
+
if (utils::one_of(d_type, data_type::bf16, data_type::f16)) {
|
| 180 |
+
static constexpr dim_t simd_w = 16;
|
| 181 |
+
const dim_t SP = D() * H() * W();
|
| 182 |
+
const int nbufs = 2 + !use_global_stats();
|
| 183 |
+
const size_t cvt_buf_sz
|
| 184 |
+
= nbufs * nthr_ * utils::rnd_up(SP, simd_w);
|
| 185 |
+
scratchpad.template book<acc_data_t>(key_bnorm_cvt, cvt_buf_sz);
|
| 186 |
+
}
|
| 187 |
+
}
|
| 188 |
+
};
|
| 189 |
+
|
| 190 |
+
typedef typename prec_traits<d_type>::type data_t;
|
| 191 |
+
typedef float acc_data_t;
|
| 192 |
+
|
| 193 |
+
ncsp_batch_normalization_bwd_t(const pd_t *apd) : primitive_t(apd) {}
|
| 194 |
+
~ncsp_batch_normalization_bwd_t() {}
|
| 195 |
+
|
| 196 |
+
status_t execute(const exec_ctx_t &ctx) const override {
|
| 197 |
+
return execute_backward(ctx);
|
| 198 |
+
}
|
| 199 |
+
|
| 200 |
+
private:
|
| 201 |
+
status_t execute_backward(const exec_ctx_t &ctx) const;
|
| 202 |
+
const pd_t *pd() const { return (const pd_t *)primitive_t::pd().get(); }
|
| 203 |
+
};
|
| 204 |
+
|
| 205 |
+
} // namespace cpu
|
| 206 |
+
} // namespace impl
|
| 207 |
+
} // namespace dnnl
|
| 208 |
+
|
| 209 |
+
#endif
|
| 210 |
+
|
| 211 |
+
// vim: et ts=4 sw=4 cindent cino+=l0,\:4,N-s
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/ncsp_group_normalization.hpp
ADDED
|
@@ -0,0 +1,106 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*******************************************************************************
|
| 2 |
+
* Copyright 2023 Intel Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*******************************************************************************/
|
| 16 |
+
|
| 17 |
+
#ifndef CPU_NCSP_GROUP_NORMALIZATION_HPP
|
| 18 |
+
#define CPU_NCSP_GROUP_NORMALIZATION_HPP
|
| 19 |
+
|
| 20 |
+
#include "common/c_types_map.hpp"
|
| 21 |
+
#include "common/dnnl_thread.hpp"
|
| 22 |
+
#include "common/memory_tracking.hpp"
|
| 23 |
+
#include "common/primitive.hpp"
|
| 24 |
+
#include "common/type_helpers.hpp"
|
| 25 |
+
#include "common/utils.hpp"
|
| 26 |
+
|
| 27 |
+
#include "cpu/cpu_group_normalization_pd.hpp"
|
| 28 |
+
#include "cpu/platform.hpp"
|
| 29 |
+
|
| 30 |
+
namespace dnnl {
|
| 31 |
+
namespace impl {
|
| 32 |
+
namespace cpu {
|
| 33 |
+
|
| 34 |
+
struct ncsp_group_normalization_fwd_t : public primitive_t {
|
| 35 |
+
using primitive_t::primitive_t;
|
| 36 |
+
|
| 37 |
+
struct pd_t : public cpu_group_normalization_fwd_pd_t {
|
| 38 |
+
using cpu_group_normalization_fwd_pd_t::
|
| 39 |
+
cpu_group_normalization_fwd_pd_t;
|
| 40 |
+
|
| 41 |
+
DECLARE_COMMON_PD_T("ncsp_gnorm:any", ncsp_group_normalization_fwd_t);
|
| 42 |
+
|
| 43 |
+
status_t init(engine_t *engine) {
|
| 44 |
+
using namespace data_type;
|
| 45 |
+
using namespace format_tag;
|
| 46 |
+
using skip_mask_t = primitive_attr_t::skip_mask_t;
|
| 47 |
+
|
| 48 |
+
VDISPATCH_GNORM(is_fwd(), VERBOSE_BAD_PROPKIND);
|
| 49 |
+
VDISPATCH_GNORM(
|
| 50 |
+
!has_zero_dim_memory(), VERBOSE_EMPTY_TENSOR, "src");
|
| 51 |
+
VDISPATCH_GNORM(
|
| 52 |
+
utils::one_of(src_md()->data_type, f32, bf16, f16, s8, u8)
|
| 53 |
+
&& platform::has_data_type_support(
|
| 54 |
+
src_md()->data_type),
|
| 55 |
+
VERBOSE_UNSUPPORTED_DT);
|
| 56 |
+
VDISPATCH_GNORM(
|
| 57 |
+
utils::one_of(dst_md()->data_type, f32, bf16, f16, s8, u8)
|
| 58 |
+
&& platform::has_data_type_support(
|
| 59 |
+
dst_md()->data_type),
|
| 60 |
+
VERBOSE_UNSUPPORTED_DT);
|
| 61 |
+
VDISPATCH_GNORM(
|
| 62 |
+
check_scale_shift_data_type(), VERBOSE_UNSUPPORTED_DT);
|
| 63 |
+
VDISPATCH_GNORM(memory_desc_matches_one_of_tag(
|
| 64 |
+
*src_md(), ncdhw, nchw, ncw, nc),
|
| 65 |
+
VERBOSE_UNSUPPORTED_TAG);
|
| 66 |
+
VDISPATCH_GNORM(memory_desc_matches_one_of_tag(
|
| 67 |
+
*dst_md(), ncdhw, nchw, ncw, nc),
|
| 68 |
+
VERBOSE_UNSUPPORTED_TAG);
|
| 69 |
+
VDISPATCH_GNORM(
|
| 70 |
+
set_default_formats_common(), VERBOSE_UNSUPPORTED_TAG);
|
| 71 |
+
VDISPATCH_GNORM(
|
| 72 |
+
attr()->has_default_values(skip_mask_t::scales_runtime)
|
| 73 |
+
&& attr_scales_ok(),
|
| 74 |
+
VERBOSE_UNSUPPORTED_ATTR);
|
| 75 |
+
nthr_ = dnnl_get_max_threads();
|
| 76 |
+
|
| 77 |
+
using namespace memory_tracking::names;
|
| 78 |
+
auto scratchpad = scratchpad_registry().registrar();
|
| 79 |
+
|
| 80 |
+
const auto src_dt = src_md()->data_type;
|
| 81 |
+
const auto dst_dt = dst_md()->data_type;
|
| 82 |
+
if (!utils::everyone_is(data_type::f32, src_dt, dst_dt)) {
|
| 83 |
+
const size_t cvt_buf_sz = nthr_ * cvt_per_thread_size_;
|
| 84 |
+
scratchpad.template book<float>(key_gnorm_cvt, cvt_buf_sz);
|
| 85 |
+
}
|
| 86 |
+
return status::success;
|
| 87 |
+
}
|
| 88 |
+
|
| 89 |
+
static constexpr size_t cvt_per_thread_size_ = 16;
|
| 90 |
+
int nthr_; // To not exceed the limit in execute used for set up.
|
| 91 |
+
};
|
| 92 |
+
|
| 93 |
+
status_t execute(const exec_ctx_t &ctx) const override {
|
| 94 |
+
return execute_forward(ctx);
|
| 95 |
+
}
|
| 96 |
+
|
| 97 |
+
private:
|
| 98 |
+
status_t execute_forward(const exec_ctx_t &ctx) const;
|
| 99 |
+
const pd_t *pd() const { return (const pd_t *)primitive_t::pd().get(); }
|
| 100 |
+
};
|
| 101 |
+
|
| 102 |
+
} // namespace cpu
|
| 103 |
+
} // namespace impl
|
| 104 |
+
} // namespace dnnl
|
| 105 |
+
|
| 106 |
+
#endif
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/nhwc_pooling.hpp
ADDED
|
@@ -0,0 +1,204 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*******************************************************************************
|
| 2 |
+
* Copyright 2019-2023 Intel Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*******************************************************************************/
|
| 16 |
+
|
| 17 |
+
#ifndef CPU_NHWC_POOLING_HPP
|
| 18 |
+
#define CPU_NHWC_POOLING_HPP
|
| 19 |
+
|
| 20 |
+
#include <assert.h>
|
| 21 |
+
|
| 22 |
+
#include "common/bfloat16.hpp"
|
| 23 |
+
#include "common/c_types_map.hpp"
|
| 24 |
+
#include "common/dnnl_thread.hpp"
|
| 25 |
+
#include "common/primitive.hpp"
|
| 26 |
+
#include "common/type_helpers.hpp"
|
| 27 |
+
#include "common/utils.hpp"
|
| 28 |
+
|
| 29 |
+
#include "cpu/cpu_pooling_pd.hpp"
|
| 30 |
+
#include "cpu/platform.hpp"
|
| 31 |
+
#include "cpu/primitive_attr_postops.hpp"
|
| 32 |
+
|
| 33 |
+
namespace dnnl {
|
| 34 |
+
namespace impl {
|
| 35 |
+
namespace cpu {
|
| 36 |
+
|
| 37 |
+
namespace nhwc_pooling {
|
| 38 |
+
size_t strided_offset(const int _n, const size_t _sn, const int _d,
|
| 39 |
+
const size_t _sd, const int _h, const size_t _sh, const int _w,
|
| 40 |
+
const size_t _sw);
|
| 41 |
+
}
|
| 42 |
+
|
| 43 |
+
template <data_type_t d_type>
|
| 44 |
+
struct nhwc_pooling_fwd_t : public primitive_t {
|
| 45 |
+
struct pd_t : public cpu_pooling_fwd_pd_t {
|
| 46 |
+
using cpu_pooling_fwd_pd_t::cpu_pooling_fwd_pd_t;
|
| 47 |
+
|
| 48 |
+
DECLARE_COMMON_PD_T("simple_nhwc:any", nhwc_pooling_fwd_t);
|
| 49 |
+
|
| 50 |
+
status_t init(engine_t *engine) {
|
| 51 |
+
const format_tag_t desired_fmt_tag = utils::pick(ndims() - 3,
|
| 52 |
+
format_tag::nwc, format_tag::nhwc, format_tag::ndhwc);
|
| 53 |
+
|
| 54 |
+
using namespace prop_kind;
|
| 55 |
+
using namespace alg_kind;
|
| 56 |
+
const bool ok = is_fwd()
|
| 57 |
+
&& utils::one_of(desc()->alg_kind, pooling_max,
|
| 58 |
+
pooling_avg_include_padding,
|
| 59 |
+
pooling_avg_exclude_padding)
|
| 60 |
+
&& utils::everyone_is(
|
| 61 |
+
d_type, src_md()->data_type, dst_md()->data_type)
|
| 62 |
+
&& platform::has_data_type_support(d_type) && !is_dilated()
|
| 63 |
+
&& attr()->has_default_values(
|
| 64 |
+
primitive_attr_t::skip_mask_t::post_ops, d_type)
|
| 65 |
+
&& ref_post_ops_t::primitive_kind_ok(attr()->post_ops_)
|
| 66 |
+
&& set_default_params() == status::success
|
| 67 |
+
&& memory_desc_matches_tag(*src_md(), desired_fmt_tag)
|
| 68 |
+
&& memory_desc_matches_tag(*dst_md(), desired_fmt_tag)
|
| 69 |
+
&& attr_.set_default_formats(dst_md(0)) == status::success;
|
| 70 |
+
if (!ok) return status::unimplemented;
|
| 71 |
+
|
| 72 |
+
const bool is_training = desc_.prop_kind == forward_training;
|
| 73 |
+
if (desc()->alg_kind == pooling_max && is_training) {
|
| 74 |
+
init_default_ws();
|
| 75 |
+
}
|
| 76 |
+
|
| 77 |
+
nthr_ = dnnl_get_max_threads();
|
| 78 |
+
init_scratchpad();
|
| 79 |
+
|
| 80 |
+
return status::success;
|
| 81 |
+
}
|
| 82 |
+
|
| 83 |
+
int nthr_; // To not exceed the limit in execute used for set up.
|
| 84 |
+
|
| 85 |
+
private:
|
| 86 |
+
void init_scratchpad() {
|
| 87 |
+
using namespace memory_tracking::names;
|
| 88 |
+
if (src_md()->data_type != data_type::f32) {
|
| 89 |
+
const size_t bf16cvt_sz_ = IC() * nthr_;
|
| 90 |
+
auto scratchpad = scratchpad_registry().registrar();
|
| 91 |
+
scratchpad.template book<float>(
|
| 92 |
+
key_pool_src_bf16cvt, bf16cvt_sz_);
|
| 93 |
+
scratchpad.template book<float>(
|
| 94 |
+
key_pool_dst_bf16cvt, bf16cvt_sz_);
|
| 95 |
+
}
|
| 96 |
+
}
|
| 97 |
+
};
|
| 98 |
+
|
| 99 |
+
nhwc_pooling_fwd_t(const pd_t *apd) : primitive_t(apd) {}
|
| 100 |
+
|
| 101 |
+
using data_t = typename prec_traits<d_type>::type;
|
| 102 |
+
using ker_data_t = typename prec_traits<data_type::f32>::type;
|
| 103 |
+
|
| 104 |
+
status_t init(engine_t *engine) override {
|
| 105 |
+
ref_post_ops_
|
| 106 |
+
= utils::make_unique<ref_post_ops_t>(pd()->attr()->post_ops_);
|
| 107 |
+
if (!ref_post_ops_) return status::out_of_memory;
|
| 108 |
+
CHECK(ref_post_ops_->init(pd()->dst_md()));
|
| 109 |
+
return status::success;
|
| 110 |
+
}
|
| 111 |
+
|
| 112 |
+
status_t execute(const exec_ctx_t &ctx) const override {
|
| 113 |
+
return execute_forward(ctx);
|
| 114 |
+
}
|
| 115 |
+
|
| 116 |
+
private:
|
| 117 |
+
status_t execute_forward(const exec_ctx_t &ctx) const;
|
| 118 |
+
void array_div_by_const(const int n, const ker_data_t *src,
|
| 119 |
+
const size_t num, ker_data_t *dst) const;
|
| 120 |
+
void array_add(const int n, const ker_data_t *src, ker_data_t *dst) const;
|
| 121 |
+
void array_nhwc_max(const int n, ker_data_t *dst, const ker_data_t *src,
|
| 122 |
+
unsigned char *ws, const size_t ws_offset, const data_type_t ws_dt,
|
| 123 |
+
const int index) const;
|
| 124 |
+
void array_nhwc_initialize(const int n, ker_data_t *dst, unsigned char *ws,
|
| 125 |
+
const size_t ws_offset, const data_type_t ws_dt) const;
|
| 126 |
+
|
| 127 |
+
const pd_t *pd() const { return (const pd_t *)primitive_t::pd().get(); }
|
| 128 |
+
std::unique_ptr<ref_post_ops_t> ref_post_ops_;
|
| 129 |
+
};
|
| 130 |
+
|
| 131 |
+
template <impl::data_type_t d_type>
|
| 132 |
+
struct nhwc_pooling_bwd_t : public primitive_t {
|
| 133 |
+
struct pd_t : public cpu_pooling_bwd_pd_t {
|
| 134 |
+
using cpu_pooling_bwd_pd_t::cpu_pooling_bwd_pd_t;
|
| 135 |
+
|
| 136 |
+
DECLARE_COMMON_PD_T("simple_nhwc:any", nhwc_pooling_bwd_t);
|
| 137 |
+
|
| 138 |
+
status_t init(engine_t *engine) {
|
| 139 |
+
const format_tag_t desired_fmt_tag = utils::pick(ndims() - 3,
|
| 140 |
+
format_tag::nwc, format_tag::nhwc, format_tag::ndhwc);
|
| 141 |
+
|
| 142 |
+
using namespace prop_kind;
|
| 143 |
+
using namespace alg_kind;
|
| 144 |
+
bool ok = !is_fwd()
|
| 145 |
+
&& utils::one_of(desc()->alg_kind, pooling_max,
|
| 146 |
+
pooling_avg_include_padding,
|
| 147 |
+
pooling_avg_exclude_padding)
|
| 148 |
+
&& utils::everyone_is(d_type, diff_dst_md()->data_type,
|
| 149 |
+
diff_src_md()->data_type)
|
| 150 |
+
&& platform::has_data_type_support(d_type)
|
| 151 |
+
&& set_default_params() == status::success && !is_fwd()
|
| 152 |
+
&& attr()->has_default_values()
|
| 153 |
+
&& memory_desc_matches_tag(*diff_dst_md(), desired_fmt_tag)
|
| 154 |
+
&& memory_desc_matches_tag(*diff_src_md(), desired_fmt_tag)
|
| 155 |
+
&& !is_dilated();
|
| 156 |
+
if (!ok) return status::unimplemented;
|
| 157 |
+
|
| 158 |
+
if (desc()->alg_kind == pooling_max) {
|
| 159 |
+
const auto ws_dt = hint_fwd_pd_->workspace_md()->data_type;
|
| 160 |
+
init_default_ws(ws_dt);
|
| 161 |
+
if (!compare_ws(hint_fwd_pd_)) return status::unimplemented;
|
| 162 |
+
}
|
| 163 |
+
|
| 164 |
+
nthr_ = dnnl_get_max_threads();
|
| 165 |
+
init_scratchpad();
|
| 166 |
+
|
| 167 |
+
return status::success;
|
| 168 |
+
}
|
| 169 |
+
|
| 170 |
+
int nthr_; // To not exceed the limit in execute used for set up.
|
| 171 |
+
|
| 172 |
+
private:
|
| 173 |
+
void init_scratchpad() {
|
| 174 |
+
using namespace memory_tracking::names;
|
| 175 |
+
if (diff_src_md()->data_type != data_type::f32) {
|
| 176 |
+
size_t bf16cvt_sz_ = IC() * nthr_;
|
| 177 |
+
auto scratchpad = scratchpad_registry().registrar();
|
| 178 |
+
scratchpad.template book<float>(
|
| 179 |
+
key_pool_src_bf16cvt, bf16cvt_sz_);
|
| 180 |
+
scratchpad.template book<float>(
|
| 181 |
+
key_pool_dst_bf16cvt, bf16cvt_sz_);
|
| 182 |
+
}
|
| 183 |
+
}
|
| 184 |
+
};
|
| 185 |
+
|
| 186 |
+
nhwc_pooling_bwd_t(const pd_t *apd) : primitive_t(apd) {}
|
| 187 |
+
typedef typename prec_traits<d_type>::type data_t;
|
| 188 |
+
|
| 189 |
+
status_t execute(const exec_ctx_t &ctx) const override {
|
| 190 |
+
return execute_backward(ctx);
|
| 191 |
+
}
|
| 192 |
+
|
| 193 |
+
private:
|
| 194 |
+
status_t execute_backward(const exec_ctx_t &ctx) const;
|
| 195 |
+
const pd_t *pd() const { return (const pd_t *)primitive_t::pd().get(); }
|
| 196 |
+
};
|
| 197 |
+
|
| 198 |
+
} // namespace cpu
|
| 199 |
+
} // namespace impl
|
| 200 |
+
} // namespace dnnl
|
| 201 |
+
|
| 202 |
+
#endif
|
| 203 |
+
|
| 204 |
+
// vim: et ts=4 sw=4 cindent cino+=l0,\:4,N-s
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/nspc_batch_normalization.hpp
ADDED
|
@@ -0,0 +1,206 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*******************************************************************************
|
| 2 |
+
* Copyright 2018-2022 Intel Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*******************************************************************************/
|
| 16 |
+
|
| 17 |
+
#ifndef CPU_NSPC_BATCH_NORMALIZATION_HPP
|
| 18 |
+
#define CPU_NSPC_BATCH_NORMALIZATION_HPP
|
| 19 |
+
|
| 20 |
+
#include <assert.h>
|
| 21 |
+
|
| 22 |
+
#include "common/c_types_map.hpp"
|
| 23 |
+
#include "common/dnnl_thread.hpp"
|
| 24 |
+
#include "common/memory_tracking.hpp"
|
| 25 |
+
#include "common/primitive.hpp"
|
| 26 |
+
#include "common/type_helpers.hpp"
|
| 27 |
+
#include "common/utils.hpp"
|
| 28 |
+
|
| 29 |
+
#include "cpu/cpu_batch_normalization_pd.hpp"
|
| 30 |
+
#include "cpu/platform.hpp"
|
| 31 |
+
|
| 32 |
+
namespace dnnl {
|
| 33 |
+
namespace impl {
|
| 34 |
+
namespace cpu {
|
| 35 |
+
|
| 36 |
+
template <data_type_t d_type>
|
| 37 |
+
struct nspc_batch_normalization_fwd_t : public primitive_t {
|
| 38 |
+
struct pd_t : public cpu_batch_normalization_fwd_pd_t {
|
| 39 |
+
pd_t(const batch_normalization_desc_t *adesc,
|
| 40 |
+
const primitive_attr_t *attr,
|
| 41 |
+
const batch_normalization_fwd_pd_t *hint_fwd_pd)
|
| 42 |
+
: cpu_batch_normalization_fwd_pd_t(adesc, attr, hint_fwd_pd) {}
|
| 43 |
+
|
| 44 |
+
DECLARE_COMMON_PD_T("nspc_bnorm:any", nspc_batch_normalization_fwd_t);
|
| 45 |
+
|
| 46 |
+
status_t init(engine_t *engine) {
|
| 47 |
+
using namespace data_type;
|
| 48 |
+
using namespace format_tag;
|
| 49 |
+
|
| 50 |
+
bool ok = is_fwd() && !has_zero_dim_memory()
|
| 51 |
+
&& utils::everyone_is(
|
| 52 |
+
d_type, src_md()->data_type, dst_md()->data_type)
|
| 53 |
+
&& platform::has_data_type_support(d_type)
|
| 54 |
+
&& IMPLICATION(is_training(),
|
| 55 |
+
platform::has_training_support(d_type))
|
| 56 |
+
&& check_scale_shift_data_type()
|
| 57 |
+
&& (attr()->has_default_values()
|
| 58 |
+
|| with_relu_post_op(is_training()))
|
| 59 |
+
&& set_default_formats_common()
|
| 60 |
+
&& memory_desc_wrapper(src_md())
|
| 61 |
+
== memory_desc_wrapper(dst_md())
|
| 62 |
+
&& memory_desc_matches_one_of_tag(
|
| 63 |
+
*src_md(), ndhwc, nhwc, nwc, nc);
|
| 64 |
+
if (!ok) return status::unimplemented;
|
| 65 |
+
|
| 66 |
+
// BN+Add+Relu fusion is not currently implemented
|
| 67 |
+
if (fuse_norm_add_relu()) return status::unimplemented;
|
| 68 |
+
|
| 69 |
+
if (is_training() && fuse_norm_relu()) init_default_ws(8);
|
| 70 |
+
|
| 71 |
+
nthr_ = dnnl_get_max_threads();
|
| 72 |
+
init_scratchpad();
|
| 73 |
+
|
| 74 |
+
return status::success;
|
| 75 |
+
}
|
| 76 |
+
|
| 77 |
+
int nthr_; // To not exceed the limit in execute used for set up.
|
| 78 |
+
|
| 79 |
+
private:
|
| 80 |
+
void init_scratchpad() {
|
| 81 |
+
using namespace memory_tracking::names;
|
| 82 |
+
using namespace data_type;
|
| 83 |
+
|
| 84 |
+
auto scratchpad = scratchpad_registry().registrar();
|
| 85 |
+
if (!stats_is_src()) {
|
| 86 |
+
const size_t stats_buf_sz = nstl::max(C(), dim_t(16)) * nthr_;
|
| 87 |
+
scratchpad.template book<acc_data_t>(
|
| 88 |
+
key_bnorm_reduction, stats_buf_sz);
|
| 89 |
+
scratchpad.template book<acc_data_t>(
|
| 90 |
+
key_bnorm_tmp_mean, stats_buf_sz);
|
| 91 |
+
scratchpad.template book<acc_data_t>(
|
| 92 |
+
key_bnorm_tmp_var, stats_buf_sz);
|
| 93 |
+
}
|
| 94 |
+
if (utils::one_of(d_type, bf16, f16)) {
|
| 95 |
+
const int simd_w = 16;
|
| 96 |
+
const int nbufs = 2;
|
| 97 |
+
const size_t cvt_buf_sz
|
| 98 |
+
= nbufs * nthr_ * utils::rnd_up(C(), simd_w);
|
| 99 |
+
scratchpad.template book<acc_data_t>(key_bnorm_cvt, cvt_buf_sz);
|
| 100 |
+
}
|
| 101 |
+
}
|
| 102 |
+
};
|
| 103 |
+
|
| 104 |
+
typedef typename prec_traits<d_type>::type data_t;
|
| 105 |
+
typedef float acc_data_t;
|
| 106 |
+
|
| 107 |
+
nspc_batch_normalization_fwd_t(const pd_t *apd) : primitive_t(apd) {}
|
| 108 |
+
~nspc_batch_normalization_fwd_t() {}
|
| 109 |
+
|
| 110 |
+
status_t execute(const exec_ctx_t &ctx) const override {
|
| 111 |
+
return execute_forward(ctx);
|
| 112 |
+
}
|
| 113 |
+
|
| 114 |
+
private:
|
| 115 |
+
status_t execute_forward(const exec_ctx_t &ctx) const;
|
| 116 |
+
const pd_t *pd() const { return (const pd_t *)primitive_t::pd().get(); }
|
| 117 |
+
};
|
| 118 |
+
|
| 119 |
+
template <data_type_t d_type>
|
| 120 |
+
struct nspc_batch_normalization_bwd_t : public primitive_t {
|
| 121 |
+
struct pd_t : public cpu_batch_normalization_bwd_pd_t {
|
| 122 |
+
pd_t(const batch_normalization_desc_t *adesc,
|
| 123 |
+
const primitive_attr_t *attr,
|
| 124 |
+
const batch_normalization_fwd_pd_t *hint_fwd_pd)
|
| 125 |
+
: cpu_batch_normalization_bwd_pd_t(adesc, attr, hint_fwd_pd) {}
|
| 126 |
+
|
| 127 |
+
DECLARE_COMMON_PD_T("nspc_bnorm:any", nspc_batch_normalization_bwd_t);
|
| 128 |
+
|
| 129 |
+
status_t init(engine_t *engine) {
|
| 130 |
+
using namespace data_type;
|
| 131 |
+
using namespace format_tag;
|
| 132 |
+
|
| 133 |
+
bool ok = !is_fwd() && !has_zero_dim_memory()
|
| 134 |
+
&& utils::everyone_is(d_type, src_md()->data_type,
|
| 135 |
+
diff_dst_md()->data_type, diff_src_md()->data_type)
|
| 136 |
+
&& platform::has_data_type_support(d_type)
|
| 137 |
+
&& platform::has_training_support(d_type)
|
| 138 |
+
&& check_scale_shift_data_type()
|
| 139 |
+
&& attr()->has_default_values()
|
| 140 |
+
&& set_default_formats_common()
|
| 141 |
+
&& memory_desc_wrapper(diff_src_md())
|
| 142 |
+
== memory_desc_wrapper(diff_dst_md())
|
| 143 |
+
&& memory_desc_matches_one_of_tag(
|
| 144 |
+
*src_md(), ndhwc, nhwc, nwc, nc)
|
| 145 |
+
&& memory_desc_matches_one_of_tag(
|
| 146 |
+
*diff_src_md(), ndhwc, nhwc, nwc, nc);
|
| 147 |
+
if (!ok) return status::unimplemented;
|
| 148 |
+
|
| 149 |
+
// BN+Add+Relu fusion is not currently implemented
|
| 150 |
+
if (fuse_norm_add_relu()) return status::unimplemented;
|
| 151 |
+
|
| 152 |
+
if (fuse_norm_relu()) {
|
| 153 |
+
init_default_ws(8);
|
| 154 |
+
if (!compare_ws(hint_fwd_pd_)) return status::unimplemented;
|
| 155 |
+
}
|
| 156 |
+
|
| 157 |
+
nthr_ = dnnl_get_max_threads();
|
| 158 |
+
init_scratchpad();
|
| 159 |
+
|
| 160 |
+
return status::success;
|
| 161 |
+
}
|
| 162 |
+
|
| 163 |
+
int nthr_; // To not exceed the limit in execute used for set up.
|
| 164 |
+
|
| 165 |
+
private:
|
| 166 |
+
void init_scratchpad() {
|
| 167 |
+
using namespace memory_tracking::names;
|
| 168 |
+
using namespace data_type;
|
| 169 |
+
|
| 170 |
+
auto scratchpad = scratchpad_registry().registrar();
|
| 171 |
+
scratchpad.template book<acc_data_t>(
|
| 172 |
+
key_bnorm_reduction, 2 * C() * nthr_);
|
| 173 |
+
scratchpad.template book<acc_data_t>(
|
| 174 |
+
key_bnorm_tmp_diff_ss, 2 * C() * (nthr_ + 1));
|
| 175 |
+
if (utils::one_of(d_type, bf16, f16)) {
|
| 176 |
+
const int simd_w = 16;
|
| 177 |
+
const int nbufs = 2 + !use_global_stats();
|
| 178 |
+
const size_t cvt_buf_sz
|
| 179 |
+
= nbufs * nthr_ * utils::rnd_up(C(), simd_w);
|
| 180 |
+
scratchpad.template book<acc_data_t>(key_bnorm_cvt, cvt_buf_sz);
|
| 181 |
+
}
|
| 182 |
+
}
|
| 183 |
+
};
|
| 184 |
+
|
| 185 |
+
typedef typename prec_traits<d_type>::type data_t;
|
| 186 |
+
typedef float acc_data_t;
|
| 187 |
+
|
| 188 |
+
nspc_batch_normalization_bwd_t(const pd_t *apd) : primitive_t(apd) {}
|
| 189 |
+
~nspc_batch_normalization_bwd_t() {}
|
| 190 |
+
|
| 191 |
+
status_t execute(const exec_ctx_t &ctx) const override {
|
| 192 |
+
return execute_backward(ctx);
|
| 193 |
+
}
|
| 194 |
+
|
| 195 |
+
private:
|
| 196 |
+
status_t execute_backward(const exec_ctx_t &ctx) const;
|
| 197 |
+
const pd_t *pd() const { return (const pd_t *)primitive_t::pd().get(); }
|
| 198 |
+
};
|
| 199 |
+
|
| 200 |
+
} // namespace cpu
|
| 201 |
+
} // namespace impl
|
| 202 |
+
} // namespace dnnl
|
| 203 |
+
|
| 204 |
+
#endif
|
| 205 |
+
|
| 206 |
+
// vim: et ts=4 sw=4 cindent cino+=l0,\:4,N-s
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/platform.hpp
ADDED
|
@@ -0,0 +1,197 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*******************************************************************************
|
| 2 |
+
* Copyright 2020-2023 Intel Corporation
|
| 3 |
+
* Copyright 2020 Arm Ltd. and affiliates
|
| 4 |
+
*
|
| 5 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
* you may not use this file except in compliance with the License.
|
| 7 |
+
* You may obtain a copy of the License at
|
| 8 |
+
*
|
| 9 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
*
|
| 11 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
* See the License for the specific language governing permissions and
|
| 15 |
+
* limitations under the License.
|
| 16 |
+
*******************************************************************************/
|
| 17 |
+
|
| 18 |
+
#ifndef CPU_PLATFORM_HPP
|
| 19 |
+
#define CPU_PLATFORM_HPP
|
| 20 |
+
|
| 21 |
+
#include "oneapi/dnnl/dnnl_config.h"
|
| 22 |
+
|
| 23 |
+
#include "common/c_types_map.hpp"
|
| 24 |
+
#include "common/impl_registration.hpp"
|
| 25 |
+
#include "common/z_magic.hpp"
|
| 26 |
+
|
| 27 |
+
// Possible architectures:
|
| 28 |
+
// - DNNL_X64
|
| 29 |
+
// - DNNL_AARCH64
|
| 30 |
+
// - DNNL_PPC64
|
| 31 |
+
// - DNNL_S390X
|
| 32 |
+
// - DNNL_RV64
|
| 33 |
+
// - DNNL_ARCH_GENERIC
|
| 34 |
+
// Target architecture macro is set to 1, others to 0. All macros are defined.
|
| 35 |
+
|
| 36 |
+
#if defined(DNNL_X64) + defined(DNNL_AARCH64) + defined(DNNL_PPC64) \
|
| 37 |
+
+ defined(DNNL_S390X) + defined(DNNL_RV64) \
|
| 38 |
+
+ defined(DNNL_ARCH_GENERIC) \
|
| 39 |
+
== 0
|
| 40 |
+
#if defined(__x86_64__) || defined(_M_X64)
|
| 41 |
+
#define DNNL_X64 1
|
| 42 |
+
#elif defined(__aarch64__)
|
| 43 |
+
#define DNNL_AARCH64 1
|
| 44 |
+
#elif defined(__powerpc64__) || defined(__PPC64__) || defined(_ARCH_PPC64)
|
| 45 |
+
#define DNNL_PPC64 1
|
| 46 |
+
#elif defined(__s390x__)
|
| 47 |
+
#define DNNL_S390X 1
|
| 48 |
+
#elif defined(__riscv)
|
| 49 |
+
#define DNNL_RV64 1
|
| 50 |
+
#else
|
| 51 |
+
#define DNNL_ARCH_GENERIC 1
|
| 52 |
+
#endif
|
| 53 |
+
#endif // defined(DNNL_X64) + ... == 0
|
| 54 |
+
|
| 55 |
+
#if defined(DNNL_X64) + defined(DNNL_AARCH64) + defined(DNNL_PPC64) \
|
| 56 |
+
+ defined(DNNL_S390X) + defined(DNNL_RV64) \
|
| 57 |
+
+ defined(DNNL_ARCH_GENERIC) \
|
| 58 |
+
!= 1
|
| 59 |
+
#error One and only one architecture should be defined at a time
|
| 60 |
+
#endif
|
| 61 |
+
|
| 62 |
+
#if !defined(DNNL_X64)
|
| 63 |
+
#define DNNL_X64 0
|
| 64 |
+
#endif
|
| 65 |
+
#if !defined(DNNL_AARCH64)
|
| 66 |
+
#define DNNL_AARCH64 0
|
| 67 |
+
#endif
|
| 68 |
+
#if !defined(DNNL_PPC64)
|
| 69 |
+
#define DNNL_PPC64 0
|
| 70 |
+
#endif
|
| 71 |
+
#if !defined(DNNL_S390X)
|
| 72 |
+
#define DNNL_S390X 0
|
| 73 |
+
#endif
|
| 74 |
+
#if !defined(DNNL_RV64)
|
| 75 |
+
#define DNNL_RV64 0
|
| 76 |
+
#endif
|
| 77 |
+
#if !defined(DNNL_ARCH_GENERIC)
|
| 78 |
+
#define DNNL_ARCH_GENERIC 0
|
| 79 |
+
#endif
|
| 80 |
+
|
| 81 |
+
// Helper macros: expand the parameters only on the corresponding architecture.
|
| 82 |
+
// Equivalent to: #if DNNL_$ARCH ... #endif
|
| 83 |
+
#define DNNL_X64_ONLY(...) Z_CONDITIONAL_DO(DNNL_X64, __VA_ARGS__)
|
| 84 |
+
#define DNNL_PPC64_ONLY(...) Z_CONDITIONAL_DO(DNNL_PPC64_ONLY, __VA_ARGS__)
|
| 85 |
+
#define DNNL_S390X_ONLY(...) Z_CONDITIONAL_DO(DNNL_S390X_ONLY, __VA_ARGS__)
|
| 86 |
+
#define DNNL_AARCH64_ONLY(...) Z_CONDITIONAL_DO(DNNL_AARCH64, __VA_ARGS__)
|
| 87 |
+
|
| 88 |
+
// Using RISC-V implementations optimized with RVV Intrinsics is optional for RISC-V builds
|
| 89 |
+
// and can be enabled with DNNL_ARCH_OPT_FLAGS="-march=<ISA-string>" option, where <ISA-string>
|
| 90 |
+
// contains V extension. If disabled, generic reference implementations will be used.
|
| 91 |
+
#if defined(DNNL_RV64) && defined(DNNL_RISCV_USE_RVV_INTRINSICS)
|
| 92 |
+
#define DNNL_RV64GCV_ONLY(...) __VA_ARGS__
|
| 93 |
+
#else
|
| 94 |
+
#define DNNL_RV64GCV_ONLY(...)
|
| 95 |
+
#endif
|
| 96 |
+
|
| 97 |
+
// Negation of the helper macros above
|
| 98 |
+
#define DNNL_NON_X64_ONLY(...) Z_CONDITIONAL_DO(Z_NOT(DNNL_X64), __VA_ARGS__)
|
| 99 |
+
|
| 100 |
+
// Using Arm Compute Library kernels is optional for AArch64 builds
|
| 101 |
+
// and can be enabled with the DNNL_AARCH64_USE_ACL CMake option
|
| 102 |
+
#if defined(DNNL_AARCH64) && defined(DNNL_AARCH64_USE_ACL)
|
| 103 |
+
#define DNNL_AARCH64_ACL_ONLY(...) __VA_ARGS__
|
| 104 |
+
#else
|
| 105 |
+
#define DNNL_AARCH64_ACL_ONLY(...)
|
| 106 |
+
#endif
|
| 107 |
+
|
| 108 |
+
// Primitive ISA section for configuring knobs.
|
| 109 |
+
// Note: MSVC preprocessor by some reason "eats" symbols it's not supposed to
|
| 110 |
+
// if __VA_ARGS__ is passed as empty. Then things happen like this for non-x64:
|
| 111 |
+
// impl0, AMX(X64_impl1), impl2, ... -> impl0 impl2, ...
|
| 112 |
+
// resulting in compilation error. Such problem happens for lists interleaving
|
| 113 |
+
// X64 impls and non-X64 for non-X64 build.
|
| 114 |
+
#if DNNL_X64
|
| 115 |
+
// Note: unlike workload or primitive set, these macros will work with impl
|
| 116 |
+
// items directly, thus, just make an item disappear, no empty lists.
|
| 117 |
+
#define __BUILD_AMX BUILD_PRIMITIVE_CPU_ISA_ALL || BUILD_AMX
|
| 118 |
+
#define __BUILD_AVX512 __BUILD_AMX || BUILD_AVX512
|
| 119 |
+
#define __BUILD_AVX2 __BUILD_AVX512 || BUILD_AVX2
|
| 120 |
+
#define __BUILD_SSE41 __BUILD_AVX2 || BUILD_SSE41
|
| 121 |
+
#else
|
| 122 |
+
#define __BUILD_AMX 0
|
| 123 |
+
#define __BUILD_AVX512 0
|
| 124 |
+
#define __BUILD_AVX2 0
|
| 125 |
+
#define __BUILD_SSE41 0
|
| 126 |
+
#endif
|
| 127 |
+
|
| 128 |
+
#if __BUILD_AMX
|
| 129 |
+
#define REG_AMX_ISA(...) __VA_ARGS__
|
| 130 |
+
#else
|
| 131 |
+
#define REG_AMX_ISA(...)
|
| 132 |
+
#endif
|
| 133 |
+
|
| 134 |
+
#if __BUILD_AVX512
|
| 135 |
+
#define REG_AVX512_ISA(...) __VA_ARGS__
|
| 136 |
+
#else
|
| 137 |
+
#define REG_AVX512_ISA(...)
|
| 138 |
+
#endif
|
| 139 |
+
|
| 140 |
+
#if __BUILD_AVX2
|
| 141 |
+
#define REG_AVX2_ISA(...) __VA_ARGS__
|
| 142 |
+
#else
|
| 143 |
+
#define REG_AVX2_ISA(...)
|
| 144 |
+
#endif
|
| 145 |
+
|
| 146 |
+
#if __BUILD_SSE41
|
| 147 |
+
#define REG_SSE41_ISA(...) __VA_ARGS__
|
| 148 |
+
#else
|
| 149 |
+
#define REG_SSE41_ISA(...)
|
| 150 |
+
#endif
|
| 151 |
+
|
| 152 |
+
namespace dnnl {
|
| 153 |
+
namespace impl {
|
| 154 |
+
namespace cpu {
|
| 155 |
+
namespace platform {
|
| 156 |
+
|
| 157 |
+
const char *get_isa_info();
|
| 158 |
+
dnnl_cpu_isa_t get_effective_cpu_isa();
|
| 159 |
+
status_t set_max_cpu_isa(dnnl_cpu_isa_t isa);
|
| 160 |
+
status_t set_cpu_isa_hints(dnnl_cpu_isa_hints_t isa_hints);
|
| 161 |
+
dnnl_cpu_isa_hints_t get_cpu_isa_hints();
|
| 162 |
+
|
| 163 |
+
bool DNNL_API prefer_ymm_requested();
|
| 164 |
+
// This call is limited to performing checks on plain C-code implementations
|
| 165 |
+
// (e.g. 'ref' and 'simple_primitive') and should avoid any x64 JIT
|
| 166 |
+
// implementations since these require specific code-path updates.
|
| 167 |
+
bool DNNL_API has_data_type_support(data_type_t data_type);
|
| 168 |
+
bool DNNL_API has_training_support(data_type_t data_type);
|
| 169 |
+
float DNNL_API s8s8_weights_scale_factor();
|
| 170 |
+
|
| 171 |
+
unsigned DNNL_API get_per_core_cache_size(int level);
|
| 172 |
+
unsigned DNNL_API get_num_cores();
|
| 173 |
+
#if DNNL_CPU_THREADING_RUNTIME == DNNL_RUNTIME_THREADPOOL
|
| 174 |
+
unsigned DNNL_API get_max_threads_to_use();
|
| 175 |
+
#endif
|
| 176 |
+
|
| 177 |
+
constexpr int get_cache_line_size() {
|
| 178 |
+
return 64;
|
| 179 |
+
}
|
| 180 |
+
|
| 181 |
+
int get_vector_register_size();
|
| 182 |
+
|
| 183 |
+
size_t get_timestamp();
|
| 184 |
+
|
| 185 |
+
} // namespace platform
|
| 186 |
+
|
| 187 |
+
// XXX: find a better place for these values?
|
| 188 |
+
enum {
|
| 189 |
+
PAGE_4K = 4096,
|
| 190 |
+
PAGE_2M = 2097152,
|
| 191 |
+
};
|
| 192 |
+
|
| 193 |
+
} // namespace cpu
|
| 194 |
+
} // namespace impl
|
| 195 |
+
} // namespace dnnl
|
| 196 |
+
|
| 197 |
+
#endif
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/primitive_attr_postops.hpp
ADDED
|
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*******************************************************************************
|
| 2 |
+
* Copyright 2020-2023 Intel Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*******************************************************************************/
|
| 16 |
+
|
| 17 |
+
#ifndef CPU_PRIMITIVE_ATTR_POSTOPS_HPP
|
| 18 |
+
#define CPU_PRIMITIVE_ATTR_POSTOPS_HPP
|
| 19 |
+
|
| 20 |
+
#include <vector>
|
| 21 |
+
|
| 22 |
+
#include "common/primitive.hpp"
|
| 23 |
+
#include "common/primitive_attr.hpp"
|
| 24 |
+
|
| 25 |
+
namespace dnnl {
|
| 26 |
+
namespace impl {
|
| 27 |
+
namespace cpu {
|
| 28 |
+
|
| 29 |
+
float compute_binary_scalar(alg_kind_t alg, float x, float y);
|
| 30 |
+
float compute_eltwise_scalar_fwd(
|
| 31 |
+
const alg_kind_t alg, float s, float alpha, float beta);
|
| 32 |
+
float compute_eltwise_scalar_bwd(
|
| 33 |
+
const alg_kind_t alg, float dd, float s, float alpha, float beta);
|
| 34 |
+
|
| 35 |
+
struct ref_binary_scalar_t {
|
| 36 |
+
ref_binary_scalar_t(alg_kind_t alg);
|
| 37 |
+
ref_binary_scalar_t(const post_ops_t::entry_t::binary_t &binary);
|
| 38 |
+
|
| 39 |
+
float compute_scalar(float src0, float src1) const;
|
| 40 |
+
|
| 41 |
+
private:
|
| 42 |
+
const alg_kind_t alg_;
|
| 43 |
+
};
|
| 44 |
+
|
| 45 |
+
struct ref_eltwise_scalar_fwd_t {
|
| 46 |
+
ref_eltwise_scalar_fwd_t(
|
| 47 |
+
alg_kind_t alg, float alpha, float beta, float scale);
|
| 48 |
+
ref_eltwise_scalar_fwd_t(const post_ops_t::entry_t::eltwise_t &eltwise);
|
| 49 |
+
|
| 50 |
+
float compute_scalar(float s) const;
|
| 51 |
+
|
| 52 |
+
const alg_kind_t alg_;
|
| 53 |
+
const float alpha_;
|
| 54 |
+
const float beta_;
|
| 55 |
+
const float scale_;
|
| 56 |
+
};
|
| 57 |
+
|
| 58 |
+
struct ref_post_ops_t {
|
| 59 |
+
struct args_t {
|
| 60 |
+
args_t() : dst_val(0.f), ctx(nullptr), l_offset(-1), dst_md(nullptr) {}
|
| 61 |
+
|
| 62 |
+
float dst_val; // sum arg
|
| 63 |
+
const exec_ctx_t *ctx; // binary arg
|
| 64 |
+
dim_t l_offset; // binary arg
|
| 65 |
+
const memory_desc_t *dst_md; // binary arg
|
| 66 |
+
};
|
| 67 |
+
|
| 68 |
+
ref_post_ops_t(const post_ops_t &po, bool skip_sum = false);
|
| 69 |
+
|
| 70 |
+
virtual ~ref_post_ops_t() = default;
|
| 71 |
+
|
| 72 |
+
status_t init(const memory_desc_t *dst_md);
|
| 73 |
+
|
| 74 |
+
void execute(float &res, const args_t &args = args_t()) const;
|
| 75 |
+
|
| 76 |
+
static bool primitive_kind_ok(const post_ops_t &po) {
|
| 77 |
+
using namespace primitive_kind;
|
| 78 |
+
return po.has_default_values({binary, eltwise, prelu, sum});
|
| 79 |
+
}
|
| 80 |
+
|
| 81 |
+
private:
|
| 82 |
+
const post_ops_t &po_;
|
| 83 |
+
// some primitives for example gemm are able to perform sum postop itself,
|
| 84 |
+
// in such cases executing sum should be skipped
|
| 85 |
+
const bool skip_sum_;
|
| 86 |
+
|
| 87 |
+
std::vector<ref_eltwise_scalar_fwd_t> eltwise_po_;
|
| 88 |
+
std::vector<ref_binary_scalar_t> binary_po_;
|
| 89 |
+
std::vector<memory_desc_t> prelu_md_;
|
| 90 |
+
};
|
| 91 |
+
|
| 92 |
+
} // namespace cpu
|
| 93 |
+
} // namespace impl
|
| 94 |
+
} // namespace dnnl
|
| 95 |
+
|
| 96 |
+
#endif
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/ref_batch_normalization.hpp
ADDED
|
@@ -0,0 +1,141 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*******************************************************************************
|
| 2 |
+
* Copyright 2016-2022 Intel Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*******************************************************************************/
|
| 16 |
+
|
| 17 |
+
#ifndef CPU_REF_BATCH_NORMALIZATION_HPP
|
| 18 |
+
#define CPU_REF_BATCH_NORMALIZATION_HPP
|
| 19 |
+
|
| 20 |
+
#include <assert.h>
|
| 21 |
+
|
| 22 |
+
#include "common/c_types_map.hpp"
|
| 23 |
+
#include "common/primitive.hpp"
|
| 24 |
+
#include "common/type_helpers.hpp"
|
| 25 |
+
#include "common/utils.hpp"
|
| 26 |
+
|
| 27 |
+
#include "cpu/platform.hpp"
|
| 28 |
+
|
| 29 |
+
#include "cpu/cpu_batch_normalization_pd.hpp"
|
| 30 |
+
|
| 31 |
+
namespace dnnl {
|
| 32 |
+
namespace impl {
|
| 33 |
+
namespace cpu {
|
| 34 |
+
|
| 35 |
+
template <data_type_t d_type>
|
| 36 |
+
struct ref_batch_normalization_fwd_t : public primitive_t {
|
| 37 |
+
struct pd_t : public cpu_batch_normalization_fwd_pd_t {
|
| 38 |
+
pd_t(const batch_normalization_desc_t *adesc,
|
| 39 |
+
const primitive_attr_t *attr,
|
| 40 |
+
const batch_normalization_fwd_pd_t *hint_fwd_pd)
|
| 41 |
+
: cpu_batch_normalization_fwd_pd_t(adesc, attr, hint_fwd_pd) {}
|
| 42 |
+
|
| 43 |
+
DECLARE_COMMON_PD_T("ref:any", ref_batch_normalization_fwd_t);
|
| 44 |
+
|
| 45 |
+
status_t init(engine_t *engine) {
|
| 46 |
+
using namespace data_type;
|
| 47 |
+
bool ok = is_fwd()
|
| 48 |
+
&& utils::everyone_is(
|
| 49 |
+
d_type, src_md()->data_type, dst_md()->data_type)
|
| 50 |
+
&& platform::has_data_type_support(d_type)
|
| 51 |
+
&& IMPLICATION(is_training(),
|
| 52 |
+
platform::has_training_support(d_type))
|
| 53 |
+
&& check_scale_shift_data_type()
|
| 54 |
+
&& (attr()->has_default_values()
|
| 55 |
+
|| with_relu_post_op(is_training()))
|
| 56 |
+
&& set_default_formats_common()
|
| 57 |
+
&& memory_desc_wrapper(src_md())
|
| 58 |
+
== memory_desc_wrapper(dst_md());
|
| 59 |
+
if (!ok) return status::unimplemented;
|
| 60 |
+
|
| 61 |
+
// BN+Add+Relu fusion is not currently implemented
|
| 62 |
+
if (fuse_norm_add_relu()) return status::unimplemented;
|
| 63 |
+
|
| 64 |
+
if (src_md()->data_type == s8 && !stats_is_src())
|
| 65 |
+
return status::unimplemented;
|
| 66 |
+
|
| 67 |
+
if (is_training() && fuse_norm_relu()) init_default_ws(8);
|
| 68 |
+
|
| 69 |
+
return status::success;
|
| 70 |
+
}
|
| 71 |
+
};
|
| 72 |
+
|
| 73 |
+
ref_batch_normalization_fwd_t(const pd_t *apd) : primitive_t(apd) {}
|
| 74 |
+
|
| 75 |
+
typedef typename prec_traits<d_type>::type data_t;
|
| 76 |
+
|
| 77 |
+
status_t execute(const exec_ctx_t &ctx) const override {
|
| 78 |
+
return execute_forward(ctx);
|
| 79 |
+
}
|
| 80 |
+
|
| 81 |
+
private:
|
| 82 |
+
status_t execute_forward(const exec_ctx_t &ctx) const;
|
| 83 |
+
const pd_t *pd() const { return (const pd_t *)primitive_t::pd().get(); }
|
| 84 |
+
};
|
| 85 |
+
|
| 86 |
+
template <data_type_t d_type>
|
| 87 |
+
struct ref_batch_normalization_bwd_t : public primitive_t {
|
| 88 |
+
struct pd_t : public cpu_batch_normalization_bwd_pd_t {
|
| 89 |
+
pd_t(const batch_normalization_desc_t *adesc,
|
| 90 |
+
const primitive_attr_t *attr,
|
| 91 |
+
const batch_normalization_fwd_pd_t *hint_fwd_pd)
|
| 92 |
+
: cpu_batch_normalization_bwd_pd_t(adesc, attr, hint_fwd_pd) {}
|
| 93 |
+
|
| 94 |
+
DECLARE_COMMON_PD_T("ref:any", ref_batch_normalization_bwd_t);
|
| 95 |
+
|
| 96 |
+
status_t init(engine_t *engine) {
|
| 97 |
+
using namespace data_type;
|
| 98 |
+
|
| 99 |
+
bool ok = !is_fwd()
|
| 100 |
+
&& utils::everyone_is(d_type, src_md()->data_type,
|
| 101 |
+
diff_dst_md()->data_type, diff_src_md()->data_type)
|
| 102 |
+
&& platform::has_data_type_support(d_type)
|
| 103 |
+
&& platform::has_training_support(d_type)
|
| 104 |
+
&& check_scale_shift_data_type()
|
| 105 |
+
&& attr()->has_default_values()
|
| 106 |
+
&& set_default_formats_common()
|
| 107 |
+
&& memory_desc_wrapper(diff_src_md())
|
| 108 |
+
== memory_desc_wrapper(diff_dst_md());
|
| 109 |
+
if (!ok) return status::unimplemented;
|
| 110 |
+
|
| 111 |
+
// BN+Add+Relu fusion is not currently implemented
|
| 112 |
+
if (fuse_norm_add_relu()) return status::unimplemented;
|
| 113 |
+
|
| 114 |
+
if (fuse_norm_relu()) {
|
| 115 |
+
init_default_ws(8);
|
| 116 |
+
if (!compare_ws(hint_fwd_pd_)) return status::unimplemented;
|
| 117 |
+
}
|
| 118 |
+
|
| 119 |
+
return status::success;
|
| 120 |
+
}
|
| 121 |
+
};
|
| 122 |
+
|
| 123 |
+
ref_batch_normalization_bwd_t(const pd_t *apd) : primitive_t(apd) {}
|
| 124 |
+
typedef typename prec_traits<d_type>::type data_t;
|
| 125 |
+
|
| 126 |
+
status_t execute(const exec_ctx_t &ctx) const override {
|
| 127 |
+
return execute_backward(ctx);
|
| 128 |
+
}
|
| 129 |
+
|
| 130 |
+
private:
|
| 131 |
+
status_t execute_backward(const exec_ctx_t &ctx) const;
|
| 132 |
+
const pd_t *pd() const { return (const pd_t *)primitive_t::pd().get(); }
|
| 133 |
+
};
|
| 134 |
+
|
| 135 |
+
} // namespace cpu
|
| 136 |
+
} // namespace impl
|
| 137 |
+
} // namespace dnnl
|
| 138 |
+
|
| 139 |
+
#endif
|
| 140 |
+
|
| 141 |
+
// vim: et ts=4 sw=4 cindent cino+=l0,\:4,N-s
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/ref_binary.hpp
ADDED
|
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*******************************************************************************
|
| 2 |
+
* Copyright 2019-2023 Intel Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*******************************************************************************/
|
| 16 |
+
|
| 17 |
+
#ifndef CPU_REF_BINARY_HPP
|
| 18 |
+
#define CPU_REF_BINARY_HPP
|
| 19 |
+
|
| 20 |
+
#include <assert.h>
|
| 21 |
+
|
| 22 |
+
#include "common/c_types_map.hpp"
|
| 23 |
+
#include "common/primitive.hpp"
|
| 24 |
+
#include "common/type_helpers.hpp"
|
| 25 |
+
#include "common/utils.hpp"
|
| 26 |
+
|
| 27 |
+
#include "cpu/platform.hpp"
|
| 28 |
+
#include "cpu/primitive_attr_postops.hpp"
|
| 29 |
+
|
| 30 |
+
#include "cpu/cpu_binary_pd.hpp"
|
| 31 |
+
|
| 32 |
+
namespace dnnl {
|
| 33 |
+
namespace impl {
|
| 34 |
+
namespace cpu {
|
| 35 |
+
|
| 36 |
+
struct ref_binary_t : public primitive_t {
|
| 37 |
+
struct pd_t : public cpu_binary_pd_t {
|
| 38 |
+
using cpu_binary_pd_t::cpu_binary_pd_t;
|
| 39 |
+
|
| 40 |
+
DECLARE_COMMON_PD_T("ref:any", ref_binary_t);
|
| 41 |
+
|
| 42 |
+
status_t init(engine_t *engine) {
|
| 43 |
+
using namespace data_type;
|
| 44 |
+
using sm = primitive_attr_t::skip_mask_t;
|
| 45 |
+
|
| 46 |
+
const bool ok
|
| 47 |
+
= platform::has_data_type_support(src_md(0)->data_type)
|
| 48 |
+
&& platform::has_data_type_support(src_md(1)->data_type)
|
| 49 |
+
&& platform::has_data_type_support(dst_md()->data_type)
|
| 50 |
+
&& set_default_params() == status::success
|
| 51 |
+
&& attr()->has_default_values(
|
| 52 |
+
sm::post_ops | sm::scales_runtime)
|
| 53 |
+
&& IMPLICATION(!attr()->scales_.has_default_values(),
|
| 54 |
+
check_scales_mask())
|
| 55 |
+
&& ref_post_ops_t::primitive_kind_ok(attr()->post_ops_)
|
| 56 |
+
&& attr_.set_default_formats(dst_md(0)) == status::success;
|
| 57 |
+
if (!ok) return status::unimplemented;
|
| 58 |
+
|
| 59 |
+
return status::success;
|
| 60 |
+
}
|
| 61 |
+
|
| 62 |
+
private:
|
| 63 |
+
bool check_scales_mask() const {
|
| 64 |
+
const std::vector<int> supported_args
|
| 65 |
+
= {DNNL_ARG_SRC_0, DNNL_ARG_SRC_1};
|
| 66 |
+
return attr_scales_ok(supported_args);
|
| 67 |
+
}
|
| 68 |
+
};
|
| 69 |
+
|
| 70 |
+
ref_binary_t(const pd_t *apd) : primitive_t(apd) {}
|
| 71 |
+
|
| 72 |
+
status_t init(engine_t *engine) override {
|
| 73 |
+
ref_post_ops
|
| 74 |
+
= utils::make_unique<ref_post_ops_t>(pd()->attr()->post_ops_);
|
| 75 |
+
if (!ref_post_ops) return status::out_of_memory;
|
| 76 |
+
CHECK(ref_post_ops->init(pd()->dst_md()));
|
| 77 |
+
return status::success;
|
| 78 |
+
}
|
| 79 |
+
|
| 80 |
+
status_t execute(const exec_ctx_t &ctx) const override {
|
| 81 |
+
return execute_ref(ctx);
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
private:
|
| 85 |
+
const pd_t *pd() const { return (const pd_t *)primitive_t::pd().get(); }
|
| 86 |
+
status_t execute_ref(const exec_ctx_t &ctx) const;
|
| 87 |
+
std::unique_ptr<ref_post_ops_t> ref_post_ops;
|
| 88 |
+
};
|
| 89 |
+
|
| 90 |
+
} // namespace cpu
|
| 91 |
+
} // namespace impl
|
| 92 |
+
} // namespace dnnl
|
| 93 |
+
|
| 94 |
+
#endif
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/ref_concat.hpp
ADDED
|
@@ -0,0 +1,189 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*******************************************************************************
|
| 2 |
+
* Copyright 2017-2022 Intel Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*******************************************************************************/
|
| 16 |
+
|
| 17 |
+
#ifndef CPU_REF_CONCAT_HPP
|
| 18 |
+
#define CPU_REF_CONCAT_HPP
|
| 19 |
+
|
| 20 |
+
#include "common/engine.hpp"
|
| 21 |
+
#include "common/primitive.hpp"
|
| 22 |
+
#include "common/reorder.hpp"
|
| 23 |
+
#include "common/reorder_pd.hpp"
|
| 24 |
+
#include "common/stream.hpp"
|
| 25 |
+
|
| 26 |
+
#include "cpu/cpu_concat_pd.hpp"
|
| 27 |
+
|
| 28 |
+
namespace dnnl {
|
| 29 |
+
namespace impl {
|
| 30 |
+
namespace cpu {
|
| 31 |
+
|
| 32 |
+
struct ref_concat_t : public primitive_t {
|
| 33 |
+
struct pd_t : public cpu_concat_pd_t {
|
| 34 |
+
pd_t(const primitive_attr_t *attr, const memory_desc_t *dst_md, int n,
|
| 35 |
+
int concat_dim, const memory_desc_t *const *src_mds)
|
| 36 |
+
: cpu_concat_pd_t(attr, dst_md, n, concat_dim, src_mds)
|
| 37 |
+
, tent_dst_md_(types::zero_md()) {}
|
| 38 |
+
pd_t(const pd_t &rhs) = default;
|
| 39 |
+
~pd_t() = default;
|
| 40 |
+
|
| 41 |
+
DECLARE_CONCAT_PD_T("ref:any", ref_concat_t);
|
| 42 |
+
|
| 43 |
+
status_t init(engine_t *engine) {
|
| 44 |
+
using sm = primitive_attr_t::skip_mask_t;
|
| 45 |
+
if (!attr()->has_default_values(sm::scales_runtime))
|
| 46 |
+
return status::unimplemented;
|
| 47 |
+
status_t status = cpu_concat_pd_t::init();
|
| 48 |
+
if (status != status::success) {
|
| 49 |
+
assert(dst_md_.format_kind != format_kind::undef);
|
| 50 |
+
status = memory_desc_init_by_strides(tent_dst_md_,
|
| 51 |
+
dst_md_.ndims, dst_md_.dims, dst_md_.data_type,
|
| 52 |
+
nullptr);
|
| 53 |
+
if (status != status::success) return status::unimplemented;
|
| 54 |
+
|
| 55 |
+
status = cpu_concat_pd_t::init(&tent_dst_md_);
|
| 56 |
+
if (status != status::success) return status::unimplemented;
|
| 57 |
+
}
|
| 58 |
+
|
| 59 |
+
const auto &sc = attr()->scales_;
|
| 60 |
+
reorder_pds_.resize(n_ + use_tent_dst());
|
| 61 |
+
for (int i = 0; i < n_; ++i) {
|
| 62 |
+
primitive_attr_t r_attr;
|
| 63 |
+
if (!sc.get(DNNL_ARG_MULTIPLE_SRC + i).has_default_values()) {
|
| 64 |
+
int mask = 0;
|
| 65 |
+
CHECK(sc.get(DNNL_ARG_MULTIPLE_SRC + i, &mask, nullptr));
|
| 66 |
+
if (mask != 0) return status::unimplemented;
|
| 67 |
+
r_attr.scales_.set(DNNL_ARG_SRC, mask);
|
| 68 |
+
}
|
| 69 |
+
CHECK(reorder_primitive_desc_create(reorder_pds_[i], engine,
|
| 70 |
+
src_md(i), src_image_md(i), &r_attr));
|
| 71 |
+
}
|
| 72 |
+
if (use_tent_dst()) {
|
| 73 |
+
assert(tent_dst_md_.format_kind != format_kind::undef);
|
| 74 |
+
assert(dst_md_.format_kind != format_kind::undef);
|
| 75 |
+
CHECK(reorder_primitive_desc_create(
|
| 76 |
+
reorder_pds_[n_], engine, &tent_dst_md_, &dst_md_));
|
| 77 |
+
}
|
| 78 |
+
init_scratchpad();
|
| 79 |
+
return status;
|
| 80 |
+
}
|
| 81 |
+
|
| 82 |
+
// if dst is forced and cannot be used directly.
|
| 83 |
+
bool use_tent_dst() const { return !types::is_zero_md(&tent_dst_md_); }
|
| 84 |
+
|
| 85 |
+
std::vector<std::shared_ptr<primitive_desc_t>> reorder_pds_;
|
| 86 |
+
memory_desc_t tent_dst_md_;
|
| 87 |
+
|
| 88 |
+
private:
|
| 89 |
+
void init_scratchpad() {
|
| 90 |
+
using namespace memory_tracking::names;
|
| 91 |
+
auto scratchpad = scratchpad_registry().registrar();
|
| 92 |
+
if (use_tent_dst()) {
|
| 93 |
+
const memory_desc_wrapper tent_dst_d(&tent_dst_md_);
|
| 94 |
+
scratchpad.book(memory_tracking::names::key_concat_tent_dst,
|
| 95 |
+
tent_dst_d.size(), 1, tent_dst_d.data_type_size());
|
| 96 |
+
}
|
| 97 |
+
|
| 98 |
+
for (size_t i = 0; i < reorder_pds_.size(); i++) {
|
| 99 |
+
scratchpad.book(key_nested_multiple + (int)i,
|
| 100 |
+
reorder_pds_[i]->scratchpad_registry());
|
| 101 |
+
}
|
| 102 |
+
}
|
| 103 |
+
};
|
| 104 |
+
|
| 105 |
+
ref_concat_t(const pd_t *apd) : primitive_t(apd) {}
|
| 106 |
+
|
| 107 |
+
status_t init(engine_t *engine) override {
|
| 108 |
+
const size_t n = pd()->reorder_pds_.size();
|
| 109 |
+
reorders_.resize(n);
|
| 110 |
+
for (size_t i = 0; i < n; ++i)
|
| 111 |
+
pd()->reorder_pds_[i]->create_primitive(reorders_[i], engine);
|
| 112 |
+
return status::success;
|
| 113 |
+
}
|
| 114 |
+
|
| 115 |
+
~ref_concat_t() = default;
|
| 116 |
+
|
| 117 |
+
status_t execute(const exec_ctx_t &ctx) const override {
|
| 118 |
+
using namespace memory_tracking::names;
|
| 119 |
+
engine_t *engine = ctx.stream()->engine();
|
| 120 |
+
const auto n = pd()->n_inputs();
|
| 121 |
+
|
| 122 |
+
auto execute_reorder = [&](const std::shared_ptr<primitive_t> &reorder,
|
| 123 |
+
const memory_arg_t &src,
|
| 124 |
+
const memory_arg_t &dst,
|
| 125 |
+
const memory_arg_t *src_scales,
|
| 126 |
+
int r_num) {
|
| 127 |
+
exec_args_t r_args;
|
| 128 |
+
r_args[DNNL_ARG_SRC] = src;
|
| 129 |
+
r_args[DNNL_ARG_DST] = dst;
|
| 130 |
+
if (src_scales)
|
| 131 |
+
r_args[DNNL_ARG_ATTR_SCALES | DNNL_ARG_SRC] = *src_scales;
|
| 132 |
+
exec_ctx_t r_ctx(ctx, std::move(r_args));
|
| 133 |
+
|
| 134 |
+
nested_scratchpad_t ns(ctx, key_nested_multiple + r_num, reorder);
|
| 135 |
+
r_ctx.set_scratchpad_grantor(ns.grantor());
|
| 136 |
+
reorder->execute(r_ctx);
|
| 137 |
+
};
|
| 138 |
+
|
| 139 |
+
if (pd()->use_tent_dst()) {
|
| 140 |
+
using namespace memory_tracking::names;
|
| 141 |
+
auto scratchpad = ctx.get_scratchpad_grantor();
|
| 142 |
+
auto tent_dst_storage
|
| 143 |
+
= scratchpad.get_memory_storage(key_concat_tent_dst);
|
| 144 |
+
|
| 145 |
+
for (int i = 0; i < n; ++i) {
|
| 146 |
+
memory_t tent_dst_i(engine, pd()->src_image_md(i),
|
| 147 |
+
tent_dst_storage->clone());
|
| 148 |
+
const auto &src_scales_arg = ctx.args().find(
|
| 149 |
+
DNNL_ARG_ATTR_SCALES | (DNNL_ARG_MULTIPLE_SRC + i));
|
| 150 |
+
const memory_arg_t *src_scales = nullptr;
|
| 151 |
+
if (src_scales_arg != ctx.args().end())
|
| 152 |
+
src_scales = &src_scales_arg->second;
|
| 153 |
+
execute_reorder(reorders_[i],
|
| 154 |
+
ctx.args().at(DNNL_ARG_MULTIPLE_SRC + i),
|
| 155 |
+
{&tent_dst_i, false}, src_scales, i);
|
| 156 |
+
}
|
| 157 |
+
|
| 158 |
+
memory_t tent_dst(
|
| 159 |
+
engine, &pd()->tent_dst_md_, tent_dst_storage->clone());
|
| 160 |
+
execute_reorder(reorders_[n], {&tent_dst, true},
|
| 161 |
+
ctx.args().at(DNNL_ARG_DST), nullptr, n);
|
| 162 |
+
} else {
|
| 163 |
+
auto &dst_mem_storage = CTX_OUT_STORAGE(DNNL_ARG_DST);
|
| 164 |
+
for (int i = 0; i < n; ++i) {
|
| 165 |
+
memory_t tent_dst_i(
|
| 166 |
+
engine, pd()->src_image_md(i), dst_mem_storage.clone());
|
| 167 |
+
const auto &src_scales_arg = ctx.args().find(
|
| 168 |
+
DNNL_ARG_ATTR_SCALES | (DNNL_ARG_MULTIPLE_SRC + i));
|
| 169 |
+
const memory_arg_t *src_scales = nullptr;
|
| 170 |
+
if (src_scales_arg != ctx.args().end())
|
| 171 |
+
src_scales = &src_scales_arg->second;
|
| 172 |
+
execute_reorder(reorders_[i],
|
| 173 |
+
ctx.args().at(DNNL_ARG_MULTIPLE_SRC + i),
|
| 174 |
+
{&tent_dst_i, false}, src_scales, i);
|
| 175 |
+
}
|
| 176 |
+
}
|
| 177 |
+
return status::success;
|
| 178 |
+
}
|
| 179 |
+
|
| 180 |
+
private:
|
| 181 |
+
const pd_t *pd() const { return (const pd_t *)primitive_t::pd().get(); }
|
| 182 |
+
std::vector<std::shared_ptr<primitive_t>> reorders_;
|
| 183 |
+
};
|
| 184 |
+
|
| 185 |
+
} // namespace cpu
|
| 186 |
+
} // namespace impl
|
| 187 |
+
} // namespace dnnl
|
| 188 |
+
|
| 189 |
+
#endif
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/ref_convolution.hpp
ADDED
|
@@ -0,0 +1,203 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*******************************************************************************
|
| 2 |
+
* Copyright 2016-2023 Intel Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*******************************************************************************/
|
| 16 |
+
|
| 17 |
+
#ifndef CPU_REF_CONVOLUTION_HPP
|
| 18 |
+
#define CPU_REF_CONVOLUTION_HPP
|
| 19 |
+
|
| 20 |
+
#include <assert.h>
|
| 21 |
+
|
| 22 |
+
#include "common/c_types_map.hpp"
|
| 23 |
+
#include "common/primitive.hpp"
|
| 24 |
+
#include "common/type_helpers.hpp"
|
| 25 |
+
#include "common/utils.hpp"
|
| 26 |
+
|
| 27 |
+
#include "cpu/cpu_convolution_pd.hpp"
|
| 28 |
+
#include "cpu/primitive_attr_postops.hpp"
|
| 29 |
+
|
| 30 |
+
namespace dnnl {
|
| 31 |
+
namespace impl {
|
| 32 |
+
namespace cpu {
|
| 33 |
+
|
| 34 |
+
struct ref_convolution_fwd_t : public primitive_t {
|
| 35 |
+
struct pd_t : public cpu_convolution_fwd_pd_t {
|
| 36 |
+
using cpu_convolution_fwd_pd_t::cpu_convolution_fwd_pd_t;
|
| 37 |
+
|
| 38 |
+
DECLARE_COMMON_PD_T("ref:any", ref_convolution_fwd_t);
|
| 39 |
+
|
| 40 |
+
status_t init(engine_t *engine) {
|
| 41 |
+
using namespace data_type;
|
| 42 |
+
using smask_t = primitive_attr_t::skip_mask_t;
|
| 43 |
+
const auto src_type = src_md(0)->data_type;
|
| 44 |
+
const auto wei_type = weights_md(0)->data_type;
|
| 45 |
+
const auto bia_type = weights_md(1)->data_type;
|
| 46 |
+
const auto dst_type = dst_md(0)->data_type;
|
| 47 |
+
|
| 48 |
+
bool ok = is_fwd()
|
| 49 |
+
&& set_default_alg_kind(alg_kind::convolution_direct)
|
| 50 |
+
&& platform::has_data_type_support(src_type)
|
| 51 |
+
&& platform::has_data_type_support(bia_type)
|
| 52 |
+
&& platform::has_data_type_support(dst_type)
|
| 53 |
+
&& utils::one_of(src_type, f32, bf16, f16)
|
| 54 |
+
&& src_type == wei_type
|
| 55 |
+
&& utils::one_of(dst_type, src_type, f32)
|
| 56 |
+
&& utils::one_of(bia_type, data_type::undef, src_type, f32)
|
| 57 |
+
&& set_default_formats()
|
| 58 |
+
&& attr()->has_default_values(
|
| 59 |
+
smask_t::post_ops | smask_t::sum_dt, dst_type)
|
| 60 |
+
&& attr()->post_ops_.check_sum_consistency(
|
| 61 |
+
dst_type, /* is_int8 */ false)
|
| 62 |
+
&& post_ops_ok()
|
| 63 |
+
&& attr_.set_default_formats(dst_md(0)) == status::success;
|
| 64 |
+
return ok ? status::success : status::unimplemented;
|
| 65 |
+
}
|
| 66 |
+
|
| 67 |
+
protected:
|
| 68 |
+
bool set_default_formats() {
|
| 69 |
+
using namespace format_tag;
|
| 70 |
+
auto dat_tag = utils::pick(ndims() - 3, nwc, nhwc, ndhwc);
|
| 71 |
+
auto wei_tag = with_groups()
|
| 72 |
+
? utils::pick(ndims() - 3, goiw, goihw, goidhw)
|
| 73 |
+
: utils::pick(ndims() - 3, oiw, oihw, oidhw);
|
| 74 |
+
return set_default_formats_common(dat_tag, wei_tag, dat_tag);
|
| 75 |
+
}
|
| 76 |
+
|
| 77 |
+
bool post_ops_ok() const {
|
| 78 |
+
return ref_post_ops_t::primitive_kind_ok(attr()->post_ops_);
|
| 79 |
+
}
|
| 80 |
+
};
|
| 81 |
+
|
| 82 |
+
ref_convolution_fwd_t(const pd_t *apd) : primitive_t(apd) {}
|
| 83 |
+
|
| 84 |
+
status_t init(engine_t *engine) override {
|
| 85 |
+
ref_post_ops
|
| 86 |
+
= utils::make_unique<ref_post_ops_t>(pd()->attr()->post_ops_);
|
| 87 |
+
if (!ref_post_ops) return status::out_of_memory;
|
| 88 |
+
CHECK(ref_post_ops->init(pd()->dst_md()));
|
| 89 |
+
return status::success;
|
| 90 |
+
}
|
| 91 |
+
|
| 92 |
+
status_t execute(const exec_ctx_t &ctx) const override {
|
| 93 |
+
return execute_forward(ctx);
|
| 94 |
+
}
|
| 95 |
+
|
| 96 |
+
private:
|
| 97 |
+
status_t execute_forward(const exec_ctx_t &ctx) const;
|
| 98 |
+
const pd_t *pd() const { return (const pd_t *)primitive_t::pd().get(); }
|
| 99 |
+
std::unique_ptr<ref_post_ops_t> ref_post_ops;
|
| 100 |
+
};
|
| 101 |
+
|
| 102 |
+
struct ref_convolution_bwd_data_t : public primitive_t {
|
| 103 |
+
struct pd_t : public cpu_convolution_bwd_data_pd_t {
|
| 104 |
+
using cpu_convolution_bwd_data_pd_t::cpu_convolution_bwd_data_pd_t;
|
| 105 |
+
|
| 106 |
+
DECLARE_COMMON_PD_T("ref:any", ref_convolution_bwd_data_t);
|
| 107 |
+
|
| 108 |
+
status_t init(engine_t *engine) {
|
| 109 |
+
using namespace data_type;
|
| 110 |
+
const auto diff_src_type = diff_src_md(0)->data_type;
|
| 111 |
+
const auto wei_type = weights_md(0)->data_type;
|
| 112 |
+
const auto diff_dst_type = diff_dst_md(0)->data_type;
|
| 113 |
+
|
| 114 |
+
bool ok = desc()->prop_kind == prop_kind::backward_data
|
| 115 |
+
&& set_default_alg_kind(alg_kind::convolution_direct)
|
| 116 |
+
&& platform::has_data_type_support(diff_src_type)
|
| 117 |
+
&& platform::has_data_type_support(diff_dst_type)
|
| 118 |
+
&& utils::one_of(diff_dst_type, f32, bf16, f16)
|
| 119 |
+
&& wei_type == diff_dst_type
|
| 120 |
+
&& utils::one_of(diff_src_type, f32, diff_dst_type)
|
| 121 |
+
&& set_default_formats() && attr()->has_default_values();
|
| 122 |
+
|
| 123 |
+
return ok ? status::success : status::unimplemented;
|
| 124 |
+
}
|
| 125 |
+
|
| 126 |
+
protected:
|
| 127 |
+
bool set_default_formats() {
|
| 128 |
+
using namespace format_tag;
|
| 129 |
+
auto dat_tag = utils::pick(ndims() - 3, nwc, nhwc, ndhwc);
|
| 130 |
+
auto wei_tag = with_groups()
|
| 131 |
+
? utils::pick(ndims() - 3, goiw, goihw, goidhw)
|
| 132 |
+
: utils::pick(ndims() - 3, oiw, oihw, oidhw);
|
| 133 |
+
return set_default_formats_common(dat_tag, wei_tag, dat_tag);
|
| 134 |
+
}
|
| 135 |
+
};
|
| 136 |
+
|
| 137 |
+
ref_convolution_bwd_data_t(const pd_t *apd) : primitive_t(apd) {}
|
| 138 |
+
|
| 139 |
+
status_t execute(const exec_ctx_t &ctx) const override {
|
| 140 |
+
return execute_backward_data(ctx);
|
| 141 |
+
}
|
| 142 |
+
|
| 143 |
+
private:
|
| 144 |
+
status_t execute_backward_data(const exec_ctx_t &ctx) const;
|
| 145 |
+
const pd_t *pd() const { return (const pd_t *)primitive_t::pd().get(); }
|
| 146 |
+
};
|
| 147 |
+
|
| 148 |
+
struct ref_convolution_bwd_weights_t : public primitive_t {
|
| 149 |
+
struct pd_t : public cpu_convolution_bwd_weights_pd_t {
|
| 150 |
+
using cpu_convolution_bwd_weights_pd_t::
|
| 151 |
+
cpu_convolution_bwd_weights_pd_t;
|
| 152 |
+
|
| 153 |
+
DECLARE_COMMON_PD_T("ref:any", ref_convolution_bwd_weights_t);
|
| 154 |
+
|
| 155 |
+
status_t init(engine_t *engine) {
|
| 156 |
+
using namespace data_type;
|
| 157 |
+
const auto src_type = src_md(0)->data_type;
|
| 158 |
+
const auto diff_wei_type = diff_weights_md(0)->data_type;
|
| 159 |
+
const auto diff_bia_type = diff_weights_md(1)->data_type;
|
| 160 |
+
const auto diff_dst_type = diff_dst_md(0)->data_type;
|
| 161 |
+
|
| 162 |
+
bool ok = desc()->prop_kind == prop_kind::backward_weights
|
| 163 |
+
&& set_default_alg_kind(alg_kind::convolution_direct)
|
| 164 |
+
&& platform::has_data_type_support(src_type)
|
| 165 |
+
&& platform::has_data_type_support(diff_wei_type)
|
| 166 |
+
&& utils::one_of(src_type, f32, bf16, f16)
|
| 167 |
+
&& diff_dst_type == src_type
|
| 168 |
+
&& utils::one_of(diff_wei_type, f32, src_type)
|
| 169 |
+
&& utils::one_of(
|
| 170 |
+
diff_bia_type, data_type::undef, f32, src_type)
|
| 171 |
+
&& set_default_formats() && attr()->has_default_values();
|
| 172 |
+
return ok ? status::success : status::unimplemented;
|
| 173 |
+
}
|
| 174 |
+
|
| 175 |
+
protected:
|
| 176 |
+
bool set_default_formats() {
|
| 177 |
+
using namespace format_tag;
|
| 178 |
+
auto dat_tag = utils::pick(ndims() - 3, ncw, nchw, ncdhw);
|
| 179 |
+
auto wei_tag = with_groups()
|
| 180 |
+
? utils::pick(ndims() - 3, goiw, goihw, goidhw)
|
| 181 |
+
: utils::pick(ndims() - 3, oiw, oihw, oidhw);
|
| 182 |
+
return set_default_formats_common(dat_tag, wei_tag, dat_tag);
|
| 183 |
+
}
|
| 184 |
+
};
|
| 185 |
+
|
| 186 |
+
ref_convolution_bwd_weights_t(const pd_t *apd) : primitive_t(apd) {}
|
| 187 |
+
|
| 188 |
+
status_t execute(const exec_ctx_t &ctx) const override {
|
| 189 |
+
return execute_backward_weights(ctx);
|
| 190 |
+
}
|
| 191 |
+
|
| 192 |
+
private:
|
| 193 |
+
status_t execute_backward_weights(const exec_ctx_t &ctx) const;
|
| 194 |
+
const pd_t *pd() const { return (const pd_t *)primitive_t::pd().get(); }
|
| 195 |
+
};
|
| 196 |
+
|
| 197 |
+
} // namespace cpu
|
| 198 |
+
} // namespace impl
|
| 199 |
+
} // namespace dnnl
|
| 200 |
+
|
| 201 |
+
#endif
|
| 202 |
+
|
| 203 |
+
// vim: et ts=4 sw=4 cindent cino+=l0,\:4,N-s
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/ref_convolution_int8.hpp
ADDED
|
@@ -0,0 +1,163 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*******************************************************************************
|
| 2 |
+
* Copyright 2021-2023 Intel Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*******************************************************************************/
|
| 16 |
+
|
| 17 |
+
#ifndef CPU_REF_CONVOLUTION_INT8_HPP
|
| 18 |
+
#define CPU_REF_CONVOLUTION_INT8_HPP
|
| 19 |
+
|
| 20 |
+
#include <assert.h>
|
| 21 |
+
|
| 22 |
+
#include "common/c_types_map.hpp"
|
| 23 |
+
#include "common/primitive.hpp"
|
| 24 |
+
#include "common/type_helpers.hpp"
|
| 25 |
+
#include "common/utils.hpp"
|
| 26 |
+
|
| 27 |
+
#include "cpu/cpu_convolution_pd.hpp"
|
| 28 |
+
#include "cpu/primitive_attr_postops.hpp"
|
| 29 |
+
|
| 30 |
+
namespace dnnl {
|
| 31 |
+
namespace impl {
|
| 32 |
+
namespace cpu {
|
| 33 |
+
|
| 34 |
+
struct ref_convolution_int8_fwd_t : public primitive_t {
|
| 35 |
+
struct pd_t : public cpu_convolution_fwd_pd_t {
|
| 36 |
+
using cpu_convolution_fwd_pd_t::cpu_convolution_fwd_pd_t;
|
| 37 |
+
|
| 38 |
+
DECLARE_COMMON_PD_T("ref:any", ref_convolution_int8_fwd_t);
|
| 39 |
+
|
| 40 |
+
status_t init(engine_t *engine) {
|
| 41 |
+
using namespace data_type;
|
| 42 |
+
using smask_t = primitive_attr_t::skip_mask_t;
|
| 43 |
+
const auto src_type = src_md(0)->data_type;
|
| 44 |
+
const auto wei_type = weights_md(0)->data_type;
|
| 45 |
+
const auto bia_type = weights_md(1)->data_type;
|
| 46 |
+
const auto dst_type = dst_md(0)->data_type;
|
| 47 |
+
|
| 48 |
+
bool ok = is_fwd()
|
| 49 |
+
&& set_default_alg_kind(alg_kind::convolution_direct)
|
| 50 |
+
&& utils::one_of(src_type, s8, u8) && wei_type == s8
|
| 51 |
+
&& IMPLICATION(with_bias(),
|
| 52 |
+
utils::one_of(bia_type, f32, bf16, s32, s8, u8))
|
| 53 |
+
&& utils::one_of(dst_type, f32, bf16, s32, s8, u8)
|
| 54 |
+
&& set_default_formats()
|
| 55 |
+
&& attr()->has_default_values(smask_t::scales_runtime
|
| 56 |
+
| smask_t::zero_points_runtime
|
| 57 |
+
| smask_t::post_ops | smask_t::sum_dt,
|
| 58 |
+
dst_type)
|
| 59 |
+
&& attr()->post_ops_.check_sum_consistency(dst_type,
|
| 60 |
+
/* is_int8 */ true)
|
| 61 |
+
&& attr_scales_ok() && zero_points_ok() && post_ops_ok()
|
| 62 |
+
&& attr_.set_default_formats(dst_md(0)) == status::success;
|
| 63 |
+
return ok ? status::success : status::unimplemented;
|
| 64 |
+
}
|
| 65 |
+
|
| 66 |
+
protected:
|
| 67 |
+
bool set_default_formats() {
|
| 68 |
+
using namespace format_tag;
|
| 69 |
+
auto dat_tag = utils::pick(ndims() - 3, nwc, nhwc, ndhwc);
|
| 70 |
+
auto wei_tag = with_groups()
|
| 71 |
+
? utils::pick(ndims() - 3, goiw, goihw, goidhw)
|
| 72 |
+
: utils::pick(ndims() - 3, oiw, oihw, oidhw);
|
| 73 |
+
return set_default_formats_common(dat_tag, wei_tag, dat_tag);
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
bool zero_points_ok() const {
|
| 77 |
+
int mask_src = 0, mask_dst = 0;
|
| 78 |
+
attr()->zero_points_.get(DNNL_ARG_SRC, &mask_src);
|
| 79 |
+
attr()->zero_points_.get(DNNL_ARG_DST, &mask_dst);
|
| 80 |
+
|
| 81 |
+
return attr()->zero_points_.has_default_values(DNNL_ARG_WEIGHTS)
|
| 82 |
+
&& (mask_src == 0 || mask_src == 1 << 1)
|
| 83 |
+
&& (mask_dst == 0 || mask_dst == 1 << 1);
|
| 84 |
+
}
|
| 85 |
+
|
| 86 |
+
bool post_ops_ok() const {
|
| 87 |
+
return ref_post_ops_t::primitive_kind_ok(attr()->post_ops_);
|
| 88 |
+
}
|
| 89 |
+
};
|
| 90 |
+
|
| 91 |
+
ref_convolution_int8_fwd_t(const pd_t *apd) : primitive_t(apd) {}
|
| 92 |
+
|
| 93 |
+
status_t init(engine_t *engine) override {
|
| 94 |
+
ref_post_ops
|
| 95 |
+
= utils::make_unique<ref_post_ops_t>(pd()->attr()->post_ops_);
|
| 96 |
+
if (!ref_post_ops) return status::out_of_memory;
|
| 97 |
+
CHECK(ref_post_ops->init(pd()->dst_md()));
|
| 98 |
+
return status::success;
|
| 99 |
+
}
|
| 100 |
+
|
| 101 |
+
status_t execute(const exec_ctx_t &ctx) const override {
|
| 102 |
+
return execute_forward(ctx);
|
| 103 |
+
}
|
| 104 |
+
|
| 105 |
+
private:
|
| 106 |
+
status_t execute_forward(const exec_ctx_t &ctx) const;
|
| 107 |
+
const pd_t *pd() const { return (const pd_t *)primitive_t::pd().get(); }
|
| 108 |
+
std::unique_ptr<ref_post_ops_t> ref_post_ops;
|
| 109 |
+
};
|
| 110 |
+
|
| 111 |
+
struct ref_convolution_int8_bwd_data_t : public primitive_t {
|
| 112 |
+
struct pd_t : public cpu_convolution_bwd_data_pd_t {
|
| 113 |
+
using cpu_convolution_bwd_data_pd_t::cpu_convolution_bwd_data_pd_t;
|
| 114 |
+
|
| 115 |
+
DECLARE_COMMON_PD_T("ref:any", ref_convolution_int8_bwd_data_t);
|
| 116 |
+
|
| 117 |
+
status_t init(engine_t *engine) {
|
| 118 |
+
using namespace data_type;
|
| 119 |
+
const auto diff_src_type = diff_src_md(0)->data_type;
|
| 120 |
+
const auto wei_type = weights_md(0)->data_type;
|
| 121 |
+
const auto diff_dst_type = diff_dst_md(0)->data_type;
|
| 122 |
+
|
| 123 |
+
bool ok = desc()->prop_kind == prop_kind::backward_data
|
| 124 |
+
&& set_default_alg_kind(alg_kind::convolution_direct)
|
| 125 |
+
&& utils::one_of(diff_dst_type, s8, u8) && wei_type == s8
|
| 126 |
+
&& utils::one_of(diff_src_type, f32, bf16, s32, s8, u8)
|
| 127 |
+
&& set_default_formats()
|
| 128 |
+
&& attr()->has_default_values(
|
| 129 |
+
primitive_attr_t::skip_mask_t::scales_runtime)
|
| 130 |
+
&& attr_scales_ok();
|
| 131 |
+
|
| 132 |
+
return ok ? status::success : status::unimplemented;
|
| 133 |
+
}
|
| 134 |
+
|
| 135 |
+
protected:
|
| 136 |
+
bool set_default_formats() {
|
| 137 |
+
using namespace format_tag;
|
| 138 |
+
auto dat_tag = utils::pick(ndims() - 3, nwc, nhwc, ndhwc);
|
| 139 |
+
auto wei_tag = with_groups()
|
| 140 |
+
? utils::pick(ndims() - 3, goiw, goihw, goidhw)
|
| 141 |
+
: utils::pick(ndims() - 3, oiw, oihw, oidhw);
|
| 142 |
+
return set_default_formats_common(dat_tag, wei_tag, dat_tag);
|
| 143 |
+
}
|
| 144 |
+
};
|
| 145 |
+
|
| 146 |
+
ref_convolution_int8_bwd_data_t(const pd_t *apd) : primitive_t(apd) {}
|
| 147 |
+
|
| 148 |
+
status_t execute(const exec_ctx_t &ctx) const override {
|
| 149 |
+
return execute_backward_data(ctx);
|
| 150 |
+
}
|
| 151 |
+
|
| 152 |
+
private:
|
| 153 |
+
status_t execute_backward_data(const exec_ctx_t &ctx) const;
|
| 154 |
+
const pd_t *pd() const { return (const pd_t *)primitive_t::pd().get(); }
|
| 155 |
+
};
|
| 156 |
+
|
| 157 |
+
} // namespace cpu
|
| 158 |
+
} // namespace impl
|
| 159 |
+
} // namespace dnnl
|
| 160 |
+
|
| 161 |
+
#endif
|
| 162 |
+
|
| 163 |
+
// vim: et ts=4 sw=4 cindent cino+=l0,\:4,N-s
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/ref_convolution_utils.hpp
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*******************************************************************************
|
| 2 |
+
* Copyright 2021 Intel Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*******************************************************************************/
|
| 16 |
+
|
| 17 |
+
#ifndef CPU_REF_CONVOLUTION_UTILS_HPP
|
| 18 |
+
#define CPU_REF_CONVOLUTION_UTILS_HPP
|
| 19 |
+
|
| 20 |
+
#include <assert.h>
|
| 21 |
+
|
| 22 |
+
#include "common/c_types_map.hpp"
|
| 23 |
+
#include "common/primitive.hpp"
|
| 24 |
+
#include "common/type_helpers.hpp"
|
| 25 |
+
#include "common/utils.hpp"
|
| 26 |
+
|
| 27 |
+
#include "cpu/cpu_convolution_pd.hpp"
|
| 28 |
+
#include "cpu/primitive_attr_postops.hpp"
|
| 29 |
+
|
| 30 |
+
namespace dnnl {
|
| 31 |
+
namespace impl {
|
| 32 |
+
namespace cpu {
|
| 33 |
+
|
| 34 |
+
namespace ref_conv_utils {
|
| 35 |
+
inline dim_t get_data_off(const memory_desc_wrapper &mdw, int ndims, dim_t mb,
|
| 36 |
+
dim_t c, dim_t id, dim_t ih, dim_t iw) {
|
| 37 |
+
switch (ndims) {
|
| 38 |
+
case 5: return mdw.off(mb, c, id, ih, iw);
|
| 39 |
+
case 4: return mdw.off(mb, c, ih, iw);
|
| 40 |
+
case 3: return mdw.off(mb, c, iw);
|
| 41 |
+
default: assert(!"unsupported ndims"); return dim_t(0);
|
| 42 |
+
}
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
inline dim_t get_weights_off(const memory_desc_wrapper &mdw, bool with_groups,
|
| 46 |
+
int ndims, dim_t g, dim_t oc, dim_t ic, dim_t kd, dim_t kh, dim_t kw) {
|
| 47 |
+
switch (ndims) {
|
| 48 |
+
case 5:
|
| 49 |
+
return with_groups ? mdw.off(g, oc, ic, kd, kh, kw)
|
| 50 |
+
: mdw.off(oc, ic, kd, kh, kw);
|
| 51 |
+
case 4:
|
| 52 |
+
return with_groups ? mdw.off(g, oc, ic, kh, kw)
|
| 53 |
+
: mdw.off(oc, ic, kh, kw);
|
| 54 |
+
case 3:
|
| 55 |
+
return with_groups ? mdw.off(g, oc, ic, kw) : mdw.off(oc, ic, kw);
|
| 56 |
+
default: assert(!"unsupported ndims"); return dim_t(0);
|
| 57 |
+
}
|
| 58 |
+
}
|
| 59 |
+
} // namespace ref_conv_utils
|
| 60 |
+
|
| 61 |
+
} // namespace cpu
|
| 62 |
+
} // namespace impl
|
| 63 |
+
} // namespace dnnl
|
| 64 |
+
|
| 65 |
+
#endif
|
| 66 |
+
|
| 67 |
+
// vim: et ts=4 sw=4 cindent cino+=l0,\:4,N-s
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/ref_deconvolution.hpp
ADDED
|
@@ -0,0 +1,554 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*******************************************************************************
|
| 2 |
+
* Copyright 2018-2023 Intel Corporation
|
| 3 |
+
* Copyright 2022 Arm Ltd. and affiliates
|
| 4 |
+
*
|
| 5 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
* you may not use this file except in compliance with the License.
|
| 7 |
+
* You may obtain a copy of the License at
|
| 8 |
+
*
|
| 9 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
*
|
| 11 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
* See the License for the specific language governing permissions and
|
| 15 |
+
* limitations under the License.
|
| 16 |
+
*******************************************************************************/
|
| 17 |
+
|
| 18 |
+
#ifndef CPU_REF_DECONVOLUTION_HPP
|
| 19 |
+
#define CPU_REF_DECONVOLUTION_HPP
|
| 20 |
+
|
| 21 |
+
#include <assert.h>
|
| 22 |
+
#include <string.h>
|
| 23 |
+
|
| 24 |
+
#include "common/c_types_map.hpp"
|
| 25 |
+
#include "common/primitive.hpp"
|
| 26 |
+
#include "common/primitive_desc_iterator.hpp"
|
| 27 |
+
#include "common/stream.hpp"
|
| 28 |
+
#include "common/type_helpers.hpp"
|
| 29 |
+
#include "common/utils.hpp"
|
| 30 |
+
|
| 31 |
+
#include "cpu/primitive_attr_postops.hpp"
|
| 32 |
+
|
| 33 |
+
#include "cpu/cpu_convolution_pd.hpp"
|
| 34 |
+
#include "cpu/cpu_deconvolution_pd.hpp"
|
| 35 |
+
|
| 36 |
+
namespace dnnl {
|
| 37 |
+
namespace impl {
|
| 38 |
+
namespace cpu {
|
| 39 |
+
|
| 40 |
+
static status_t weights_axes_permutation(
|
| 41 |
+
memory_desc_t *o_md, const memory_desc_t *i_md, bool with_groups) {
|
| 42 |
+
int perm[DNNL_MAX_NDIMS] {}; // deconv to conv weight permutation
|
| 43 |
+
for (int d = 0; d < DNNL_MAX_NDIMS; ++d)
|
| 44 |
+
perm[d] = d;
|
| 45 |
+
nstl::swap(perm[0 + with_groups], perm[1 + with_groups]);
|
| 46 |
+
|
| 47 |
+
return memory_desc_permute_axes(*o_md, *i_md, perm);
|
| 48 |
+
}
|
| 49 |
+
|
| 50 |
+
static status_t conv_descr_create(const deconvolution_desc_t *dd,
|
| 51 |
+
convolution_desc_t *cd, const memory_desc_t *bias_md = nullptr,
|
| 52 |
+
data_type_t src_dt = data_type::undef) {
|
| 53 |
+
using namespace prop_kind;
|
| 54 |
+
alg_kind_t alg_kind = dd->alg_kind == alg_kind::deconvolution_direct
|
| 55 |
+
? alg_kind::convolution_direct
|
| 56 |
+
: alg_kind::convolution_winograd;
|
| 57 |
+
|
| 58 |
+
const memory_desc_t *src_md, *dst_md, *d_weights_d;
|
| 59 |
+
memory_desc_t src_md_patched;
|
| 60 |
+
prop_kind_t prop_kind;
|
| 61 |
+
|
| 62 |
+
if (utils::one_of(dd->prop_kind, forward_training, forward_inference)) {
|
| 63 |
+
prop_kind = backward_data;
|
| 64 |
+
assert(src_dt != data_type::undef);
|
| 65 |
+
CHECK(memory_desc_init_by_md_and_dt(
|
| 66 |
+
src_md_patched, dd->dst_desc, src_dt));
|
| 67 |
+
src_md = &src_md_patched;
|
| 68 |
+
dst_md = &dd->src_desc;
|
| 69 |
+
d_weights_d = &dd->weights_desc;
|
| 70 |
+
} else if (dd->prop_kind == backward_data) {
|
| 71 |
+
assert(src_dt == data_type::undef);
|
| 72 |
+
prop_kind = forward_training;
|
| 73 |
+
src_md = &dd->diff_dst_desc;
|
| 74 |
+
dst_md = &dd->diff_src_desc;
|
| 75 |
+
d_weights_d = &dd->weights_desc;
|
| 76 |
+
} else {
|
| 77 |
+
assert(src_dt == data_type::undef);
|
| 78 |
+
prop_kind = dd->prop_kind;
|
| 79 |
+
src_md = &dd->diff_dst_desc;
|
| 80 |
+
dst_md = &dd->src_desc;
|
| 81 |
+
d_weights_d = &dd->diff_weights_desc;
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
/* create weights desc for convolution */
|
| 85 |
+
memory_desc_t c_weights_d;
|
| 86 |
+
const bool with_groups = d_weights_d->ndims == src_md->ndims + 1;
|
| 87 |
+
CHECK(weights_axes_permutation(&c_weights_d, d_weights_d, with_groups));
|
| 88 |
+
|
| 89 |
+
return conv_desc_init(cd, prop_kind, alg_kind, src_md, &c_weights_d,
|
| 90 |
+
bias_md, dst_md, dd->strides, dd->dilates, dd->padding[0],
|
| 91 |
+
dd->padding[1]);
|
| 92 |
+
}
|
| 93 |
+
|
| 94 |
+
struct ref_deconvolution_fwd_t : public primitive_t {
|
| 95 |
+
struct pd_t : public cpu_deconvolution_fwd_pd_t {
|
| 96 |
+
pd_t(const deconvolution_desc_t *adesc, const primitive_attr_t *attr,
|
| 97 |
+
const deconvolution_fwd_pd_t *hint_fwd_pd)
|
| 98 |
+
: cpu_deconvolution_fwd_pd_t(adesc, attr, hint_fwd_pd) {}
|
| 99 |
+
|
| 100 |
+
pd_t(const pd_t &other)
|
| 101 |
+
: cpu_deconvolution_fwd_pd_t(other)
|
| 102 |
+
, conv_pd_(other.conv_pd_->clone())
|
| 103 |
+
, conv_supports_bias_(other.conv_supports_bias_)
|
| 104 |
+
, dst_tag_(other.dst_tag_)
|
| 105 |
+
, name_(other.name_) {}
|
| 106 |
+
|
| 107 |
+
~pd_t() = default;
|
| 108 |
+
|
| 109 |
+
DECLARE_COMMON_PD_T(name_.c_str(), ref_deconvolution_fwd_t);
|
| 110 |
+
|
| 111 |
+
status_t init_convolution(engine_t *engine) {
|
| 112 |
+
using namespace format_tag;
|
| 113 |
+
using namespace data_type;
|
| 114 |
+
|
| 115 |
+
// Create empty attributes for bwd_d conv to pick up the fastest
|
| 116 |
+
// impl available and apply post-ops and/or bias update later in
|
| 117 |
+
// this impl via simple loop.
|
| 118 |
+
primitive_attr_t conv_attr;
|
| 119 |
+
|
| 120 |
+
convolution_desc_t cd;
|
| 121 |
+
// When no attributes were requested, try to find a bwd_d conv impl
|
| 122 |
+
// which supports bias update in-place, if requested, in requested
|
| 123 |
+
// dst_dt. If appropriate conv impl was not found, enforce f32
|
| 124 |
+
// diff_src for conv for correct result. If attributes are
|
| 125 |
+
// requested, enforce conv impl to return f32 output no matter what.
|
| 126 |
+
if (attr()->has_default_values()) {
|
| 127 |
+
CHECK(conv_descr_create(
|
| 128 |
+
desc(), &cd, weights_md(1), dst_md()->data_type));
|
| 129 |
+
primitive_desc_iterator_t it(
|
| 130 |
+
engine, (op_desc_t *)&cd, &conv_attr, nullptr);
|
| 131 |
+
if (!it.is_initialized()) return status::out_of_memory;
|
| 132 |
+
|
| 133 |
+
while (++it != it.end()) {
|
| 134 |
+
conv_pd_ = *it;
|
| 135 |
+
if (with_bias()) {
|
| 136 |
+
conv_supports_bias_ = utils::downcast<
|
| 137 |
+
cpu_convolution_bwd_data_pd_t *>(conv_pd_.get())
|
| 138 |
+
->support_bias();
|
| 139 |
+
if (!conv_supports_bias_) continue;
|
| 140 |
+
}
|
| 141 |
+
bool ok = conv_pd_->weights_md()->extra.flags == 0;
|
| 142 |
+
if (ok) return status::success;
|
| 143 |
+
}
|
| 144 |
+
}
|
| 145 |
+
|
| 146 |
+
// Intermediate f32 buffer is supported only for given condition.
|
| 147 |
+
if (!attr()->has_default_values() || with_bias()) {
|
| 148 |
+
// Enforce f32 dt for diff src and work with f32 output for bias
|
| 149 |
+
// update or post ops after conv execution.
|
| 150 |
+
CHECK(conv_descr_create(desc(), &cd, nullptr, data_type::f32));
|
| 151 |
+
primitive_desc_iterator_t it(
|
| 152 |
+
engine, (op_desc_t *)&cd, &conv_attr, nullptr);
|
| 153 |
+
if (!it.is_initialized()) return status::out_of_memory;
|
| 154 |
+
|
| 155 |
+
while (++it != it.end()) {
|
| 156 |
+
conv_pd_ = *it;
|
| 157 |
+
bool ok = conv_pd_->weights_md()->extra.flags == 0;
|
| 158 |
+
if (ok) return status::success;
|
| 159 |
+
}
|
| 160 |
+
}
|
| 161 |
+
return status::unimplemented;
|
| 162 |
+
}
|
| 163 |
+
|
| 164 |
+
status_t init(engine_t *engine) {
|
| 165 |
+
using namespace format_tag;
|
| 166 |
+
using namespace data_type;
|
| 167 |
+
using smask_t = primitive_attr_t::skip_mask_t;
|
| 168 |
+
auto skip_mask = smask_t::post_ops | smask_t::sum_dt;
|
| 169 |
+
if (utils::one_of(desc()->src_desc.data_type, s8, u8))
|
| 170 |
+
skip_mask |= smask_t::scales_runtime
|
| 171 |
+
| smask_t::zero_points_runtime;
|
| 172 |
+
|
| 173 |
+
const bool ok = is_fwd()
|
| 174 |
+
&& utils::one_of(desc()->alg_kind,
|
| 175 |
+
alg_kind::deconvolution_direct,
|
| 176 |
+
alg_kind::deconvolution_winograd)
|
| 177 |
+
&& attr()->has_default_values(skip_mask) && attr_scales_ok()
|
| 178 |
+
&& post_ops_ok() && zero_points_ok();
|
| 179 |
+
if (!ok) return status::unimplemented;
|
| 180 |
+
|
| 181 |
+
CHECK(init_convolution(engine));
|
| 182 |
+
|
| 183 |
+
if (weights_md_.format_kind == format_kind::any)
|
| 184 |
+
CHECK(weights_axes_permutation(
|
| 185 |
+
&weights_md_, conv_pd_->weights_md(), with_groups()));
|
| 186 |
+
if (src_md_.format_kind == format_kind::any)
|
| 187 |
+
src_md_ = *conv_pd_->diff_dst_md();
|
| 188 |
+
if (dst_md_.format_kind == format_kind::any) {
|
| 189 |
+
// re-apply dt manually since it could be changed due to bias
|
| 190 |
+
const auto dst_dt = dst_md_.data_type;
|
| 191 |
+
memory_desc_init_by_md_and_dt(
|
| 192 |
+
dst_md_, *conv_pd_->diff_src_md(), dst_dt);
|
| 193 |
+
}
|
| 194 |
+
if (bias_md_.format_kind == format_kind::any)
|
| 195 |
+
CHECK(memory_desc_init_by_tag(bias_md_, x));
|
| 196 |
+
|
| 197 |
+
dst_tag_ = memory_desc_matches_one_of_tag(dst_md_,
|
| 198 |
+
utils::pick(ndims() - 3, ncw, nchw, ncdhw),
|
| 199 |
+
utils::pick(ndims() - 3, nwc, nhwc, ndhwc),
|
| 200 |
+
utils::pick(ndims() - 3, nCw8c, nChw8c, nCdhw8c),
|
| 201 |
+
utils::pick(ndims() - 3, nCw16c, nChw16c, nCdhw16c));
|
| 202 |
+
|
| 203 |
+
init_name();
|
| 204 |
+
init_scratchpad();
|
| 205 |
+
return attr_.set_default_formats(dst_md(0));
|
| 206 |
+
}
|
| 207 |
+
|
| 208 |
+
std::shared_ptr<primitive_desc_t> conv_pd_;
|
| 209 |
+
bool conv_supports_bias_ = false;
|
| 210 |
+
format_tag_t dst_tag_;
|
| 211 |
+
|
| 212 |
+
private:
|
| 213 |
+
std::string name_ = "conv:any+"; // convolution-based deconvolution
|
| 214 |
+
|
| 215 |
+
void init_name() { name_.append(conv_pd_->name()); }
|
| 216 |
+
|
| 217 |
+
void init_scratchpad() {
|
| 218 |
+
using namespace memory_tracking::names;
|
| 219 |
+
auto scratchpad = scratchpad_registry().registrar();
|
| 220 |
+
scratchpad.book(key_nested, conv_pd_->scratchpad_registry());
|
| 221 |
+
|
| 222 |
+
// This scratchpad is required for intermediate f32 conv output
|
| 223 |
+
// since original memory can be of smaller size and will cause
|
| 224 |
+
// out of boundary access.
|
| 225 |
+
if ((with_bias() && !conv_supports_bias_)
|
| 226 |
+
|| !attr()->has_default_values()) {
|
| 227 |
+
const memory_desc_wrapper diff_src_d(conv_pd_->diff_src_md());
|
| 228 |
+
assert(diff_src_d.data_type_size() == sizeof(float));
|
| 229 |
+
scratchpad.book(key_deconv_bias, diff_src_d.nelems(true),
|
| 230 |
+
diff_src_d.data_type_size());
|
| 231 |
+
}
|
| 232 |
+
// This scratchpad is required to stash original dst memory for sum
|
| 233 |
+
// post-op. It will be overwritten by conv execution and will not
|
| 234 |
+
// be available to get the correct result.
|
| 235 |
+
const memory_desc_wrapper dst_d(dst_md());
|
| 236 |
+
if (attr()->post_ops_.find(primitive_kind::sum) != -1)
|
| 237 |
+
scratchpad.book(key_deconv_sum, dst_d.nelems(true),
|
| 238 |
+
dst_d.data_type_size());
|
| 239 |
+
|
| 240 |
+
if (!attr()->zero_points_.has_default_values(DNNL_ARG_SRC)) {
|
| 241 |
+
scratchpad.book<int32_t>(key_deconv_zp, OC() * G());
|
| 242 |
+
}
|
| 243 |
+
}
|
| 244 |
+
|
| 245 |
+
bool post_ops_ok() const {
|
| 246 |
+
using namespace data_type;
|
| 247 |
+
const bool is_int8 = utils::one_of(src_md()->data_type, s8, u8);
|
| 248 |
+
return attr()->post_ops_.check_sum_consistency(
|
| 249 |
+
dst_md()->data_type, is_int8)
|
| 250 |
+
&& ref_post_ops_t::primitive_kind_ok(attr()->post_ops_);
|
| 251 |
+
}
|
| 252 |
+
|
| 253 |
+
bool zero_points_ok() const {
|
| 254 |
+
using namespace data_type;
|
| 255 |
+
int mask_src = 0, mask_dst = 0;
|
| 256 |
+
attr()->zero_points_.get(DNNL_ARG_SRC, &mask_src);
|
| 257 |
+
attr()->zero_points_.get(DNNL_ARG_DST, &mask_dst);
|
| 258 |
+
|
| 259 |
+
return IMPLICATION(!utils::one_of(src_md()->data_type, s8, u8),
|
| 260 |
+
attr()->zero_points_.has_default_values())
|
| 261 |
+
&& attr()->zero_points_.has_default_values(DNNL_ARG_WEIGHTS)
|
| 262 |
+
&& (mask_src == 0 || mask_src == 1 << 1)
|
| 263 |
+
&& (mask_dst == 0 || mask_dst == 1 << 1);
|
| 264 |
+
}
|
| 265 |
+
};
|
| 266 |
+
|
| 267 |
+
ref_deconvolution_fwd_t(const pd_t *apd) : primitive_t(apd) {}
|
| 268 |
+
|
| 269 |
+
status_t init(engine_t *engine) override {
|
| 270 |
+
CHECK(pd()->conv_pd_->create_primitive(conv_p_, engine));
|
| 271 |
+
|
| 272 |
+
ref_post_ops
|
| 273 |
+
= utils::make_unique<ref_post_ops_t>(pd()->attr()->post_ops_);
|
| 274 |
+
if (!ref_post_ops) return status::out_of_memory;
|
| 275 |
+
CHECK(ref_post_ops->init(pd()->dst_md()));
|
| 276 |
+
return status::success;
|
| 277 |
+
}
|
| 278 |
+
|
| 279 |
+
status_t execute(const exec_ctx_t &ctx) const override;
|
| 280 |
+
|
| 281 |
+
private:
|
| 282 |
+
void compute_fwd_bias_common(const exec_ctx_t &ctx, void *dst,
|
| 283 |
+
const float *conv_output, bool non_default_attr) const;
|
| 284 |
+
|
| 285 |
+
void compute_fwd_bias_ncdhw(const exec_ctx_t &ctx, void *dst,
|
| 286 |
+
const float *conv_output, bool non_default_attr) const;
|
| 287 |
+
|
| 288 |
+
void compute_fwd_bias_ndhwc(const exec_ctx_t &ctx, void *dst,
|
| 289 |
+
const float *conv_output, bool non_default_attr) const;
|
| 290 |
+
|
| 291 |
+
template <dim_t blk_size>
|
| 292 |
+
void compute_fwd_bias_nCdhwXc(const exec_ctx_t &ctx, void *dst,
|
| 293 |
+
const float *conv_output, bool non_default_attr) const;
|
| 294 |
+
|
| 295 |
+
status_t compute_oscale(const exec_ctx_t &ctx, float *dst) const;
|
| 296 |
+
|
| 297 |
+
void compute_fwd_bias(const exec_ctx_t &ctx, void *dst,
|
| 298 |
+
const float *conv_output, bool non_default_attr) const;
|
| 299 |
+
|
| 300 |
+
status_t compute_ref_attrs(const exec_ctx_t &ctx, const float *conv_output,
|
| 301 |
+
void *original_dst) const;
|
| 302 |
+
|
| 303 |
+
const pd_t *pd() const { return (const pd_t *)primitive_t::pd().get(); }
|
| 304 |
+
std::shared_ptr<primitive_t> conv_p_;
|
| 305 |
+
std::unique_ptr<ref_post_ops_t> ref_post_ops;
|
| 306 |
+
};
|
| 307 |
+
|
| 308 |
+
struct ref_deconvolution_bwd_data_t : public primitive_t {
|
| 309 |
+
struct pd_t : public cpu_deconvolution_bwd_data_pd_t {
|
| 310 |
+
pd_t(const deconvolution_desc_t *adesc, const primitive_attr_t *attr,
|
| 311 |
+
const deconvolution_fwd_pd_t *hint_fwd_pd)
|
| 312 |
+
: cpu_deconvolution_bwd_data_pd_t(adesc, attr, hint_fwd_pd) {}
|
| 313 |
+
|
| 314 |
+
pd_t(const pd_t &other)
|
| 315 |
+
: cpu_deconvolution_bwd_data_pd_t(other)
|
| 316 |
+
, conv_pd_(other.conv_pd_->clone())
|
| 317 |
+
, name_(other.name_) {}
|
| 318 |
+
|
| 319 |
+
~pd_t() = default;
|
| 320 |
+
|
| 321 |
+
DECLARE_COMMON_PD_T(name_.c_str(), ref_deconvolution_bwd_data_t);
|
| 322 |
+
|
| 323 |
+
status_t init_convolution(engine_t *engine) {
|
| 324 |
+
using namespace types;
|
| 325 |
+
|
| 326 |
+
convolution_desc_t cd;
|
| 327 |
+
status_t status = conv_descr_create(desc(), &cd);
|
| 328 |
+
if (status != status::success) return status;
|
| 329 |
+
primitive_attr_t conv_attr(*attr());
|
| 330 |
+
if (!conv_attr.is_initialized()) return status::out_of_memory;
|
| 331 |
+
|
| 332 |
+
primitive_desc_iterator_t it(
|
| 333 |
+
engine, (op_desc_t *)&cd, &conv_attr, nullptr);
|
| 334 |
+
if (!it.is_initialized()) return status::out_of_memory;
|
| 335 |
+
while (++it != it.end()) {
|
| 336 |
+
conv_pd_ = *it;
|
| 337 |
+
if (conv_pd_->weights_md()->extra.flags == 0)
|
| 338 |
+
return status::success;
|
| 339 |
+
}
|
| 340 |
+
|
| 341 |
+
return status::unimplemented;
|
| 342 |
+
}
|
| 343 |
+
|
| 344 |
+
status_t init(engine_t *engine) {
|
| 345 |
+
using namespace data_type;
|
| 346 |
+
auto dsrc_type = desc()->diff_src_desc.data_type;
|
| 347 |
+
auto wei_type = desc()->weights_desc.data_type;
|
| 348 |
+
auto ddst_type = desc()->diff_dst_desc.data_type;
|
| 349 |
+
bool ok = true && desc()->prop_kind == prop_kind::backward_data
|
| 350 |
+
&& utils::one_of(wei_type, f32, bf16, f16)
|
| 351 |
+
&& ddst_type == wei_type
|
| 352 |
+
&& utils::one_of(dsrc_type, wei_type, f32)
|
| 353 |
+
&& utils::one_of(desc()->alg_kind,
|
| 354 |
+
alg_kind::deconvolution_direct,
|
| 355 |
+
alg_kind::deconvolution_winograd)
|
| 356 |
+
&& attr()->has_default_values();
|
| 357 |
+
|
| 358 |
+
if (ok) {
|
| 359 |
+
CHECK(init_convolution(engine));
|
| 360 |
+
if (weights_md_.format_kind == format_kind::any)
|
| 361 |
+
CHECK(weights_axes_permutation(&weights_md_,
|
| 362 |
+
conv_pd_->weights_md(), with_groups()));
|
| 363 |
+
if (diff_src_md_.format_kind == format_kind::any)
|
| 364 |
+
diff_src_md_ = *conv_pd_->dst_md();
|
| 365 |
+
if (diff_dst_md_.format_kind == format_kind::any)
|
| 366 |
+
diff_dst_md_ = *conv_pd_->src_md();
|
| 367 |
+
|
| 368 |
+
init_name();
|
| 369 |
+
init_scratchpad();
|
| 370 |
+
return status::success;
|
| 371 |
+
}
|
| 372 |
+
|
| 373 |
+
return status::unimplemented;
|
| 374 |
+
}
|
| 375 |
+
|
| 376 |
+
std::shared_ptr<primitive_desc_t> conv_pd_;
|
| 377 |
+
|
| 378 |
+
private:
|
| 379 |
+
std::string name_ = "conv:any+"; // convolution-based deconvolution
|
| 380 |
+
|
| 381 |
+
void init_name() { name_.append(conv_pd_->name()); }
|
| 382 |
+
|
| 383 |
+
void init_scratchpad() {
|
| 384 |
+
auto scratchpad = scratchpad_registry().registrar();
|
| 385 |
+
scratchpad.book(memory_tracking::names::key_nested,
|
| 386 |
+
conv_pd_->scratchpad_registry());
|
| 387 |
+
}
|
| 388 |
+
};
|
| 389 |
+
|
| 390 |
+
typedef typename prec_traits<data_type::f32>::type data_t;
|
| 391 |
+
|
| 392 |
+
ref_deconvolution_bwd_data_t(const pd_t *apd) : primitive_t(apd) {}
|
| 393 |
+
|
| 394 |
+
status_t init(engine_t *engine) override {
|
| 395 |
+
return pd()->conv_pd_->create_primitive(conv_p_, engine);
|
| 396 |
+
}
|
| 397 |
+
|
| 398 |
+
#if DNNL_AARCH64 && DNNL_AARCH64_USE_ACL
|
| 399 |
+
status_t create_resource(
|
| 400 |
+
engine_t *engine, resource_mapper_t &mapper) const override {
|
| 401 |
+
CHECK(conv_p_->create_resource(engine, mapper));
|
| 402 |
+
return status::success;
|
| 403 |
+
}
|
| 404 |
+
#endif
|
| 405 |
+
|
| 406 |
+
status_t execute(const exec_ctx_t &ctx) const override;
|
| 407 |
+
|
| 408 |
+
private:
|
| 409 |
+
const pd_t *pd() const { return (const pd_t *)primitive_t::pd().get(); }
|
| 410 |
+
std::shared_ptr<primitive_t> conv_p_;
|
| 411 |
+
};
|
| 412 |
+
|
| 413 |
+
struct ref_deconvolution_bwd_weights_t : public primitive_t {
|
| 414 |
+
struct pd_t : public cpu_deconvolution_bwd_weights_pd_t {
|
| 415 |
+
pd_t(const deconvolution_desc_t *adesc, const primitive_attr_t *attr,
|
| 416 |
+
const deconvolution_fwd_pd_t *hint_fwd_pd)
|
| 417 |
+
: cpu_deconvolution_bwd_weights_pd_t(adesc, attr, hint_fwd_pd) {}
|
| 418 |
+
|
| 419 |
+
pd_t(const pd_t &other)
|
| 420 |
+
: cpu_deconvolution_bwd_weights_pd_t(other)
|
| 421 |
+
, conv_pd_(other.conv_pd_->clone())
|
| 422 |
+
, dst_tag_(other.dst_tag_)
|
| 423 |
+
, name_(other.name_) {}
|
| 424 |
+
|
| 425 |
+
~pd_t() = default;
|
| 426 |
+
|
| 427 |
+
DECLARE_COMMON_PD_T(name_.c_str(), ref_deconvolution_bwd_weights_t);
|
| 428 |
+
|
| 429 |
+
status_t init_convolution(engine_t *engine) {
|
| 430 |
+
using namespace types;
|
| 431 |
+
using namespace format_tag;
|
| 432 |
+
|
| 433 |
+
convolution_desc_t cd;
|
| 434 |
+
status_t status = conv_descr_create(desc(), &cd);
|
| 435 |
+
if (status != status::success) return status;
|
| 436 |
+
primitive_attr_t conv_attr(*attr());
|
| 437 |
+
if (!conv_attr.is_initialized()) return status::out_of_memory;
|
| 438 |
+
|
| 439 |
+
primitive_desc_iterator_t it(
|
| 440 |
+
engine, (op_desc_t *)&cd, &conv_attr, nullptr);
|
| 441 |
+
if (!it.is_initialized()) return status::out_of_memory;
|
| 442 |
+
while (++it != it.end()) {
|
| 443 |
+
conv_pd_ = *it;
|
| 444 |
+
bool bf16_ref_deconv_supports_bias = IMPLICATION(with_bias()
|
| 445 |
+
&& desc()->src_desc.data_type
|
| 446 |
+
== data_type::bf16,
|
| 447 |
+
memory_desc_matches_one_of_tag(*conv_pd_->src_md(),
|
| 448 |
+
utils::pick(ndims() - 3, ncw, nchw, ncdhw),
|
| 449 |
+
utils::pick(ndims() - 3, nwc, nhwc, ndhwc),
|
| 450 |
+
utils::pick(ndims() - 3, nCw16c, nChw16c,
|
| 451 |
+
nCdhw16c)));
|
| 452 |
+
if (conv_pd_->diff_weights_md()->extra.flags == 0
|
| 453 |
+
&& bf16_ref_deconv_supports_bias) {
|
| 454 |
+
return status::success;
|
| 455 |
+
}
|
| 456 |
+
}
|
| 457 |
+
return status::unimplemented;
|
| 458 |
+
}
|
| 459 |
+
|
| 460 |
+
status_t init(engine_t *engine) {
|
| 461 |
+
using namespace format_tag;
|
| 462 |
+
using namespace data_type;
|
| 463 |
+
auto src_type = desc()->src_desc.data_type;
|
| 464 |
+
auto dwei_type = desc()->diff_weights_desc.data_type;
|
| 465 |
+
auto ddst_type = desc()->diff_dst_desc.data_type;
|
| 466 |
+
bool ok = true && desc()->prop_kind == prop_kind::backward_weights
|
| 467 |
+
&& utils::one_of(src_type, f32, bf16, f16)
|
| 468 |
+
&& ddst_type == src_type
|
| 469 |
+
&& utils::one_of(dwei_type, src_type, f32)
|
| 470 |
+
&& utils::one_of(desc()->alg_kind,
|
| 471 |
+
alg_kind::deconvolution_direct,
|
| 472 |
+
alg_kind::deconvolution_winograd)
|
| 473 |
+
&& attr()->has_default_values();
|
| 474 |
+
|
| 475 |
+
if (ok) {
|
| 476 |
+
CHECK(init_convolution(engine));
|
| 477 |
+
if (diff_weights_md_.format_kind == format_kind::any)
|
| 478 |
+
CHECK(weights_axes_permutation(&diff_weights_md_,
|
| 479 |
+
conv_pd_->diff_weights_md(), with_groups()));
|
| 480 |
+
if (src_md_.format_kind == format_kind::any)
|
| 481 |
+
src_md_ = *conv_pd_->diff_dst_md();
|
| 482 |
+
if (diff_dst_md_.format_kind == format_kind::any)
|
| 483 |
+
diff_dst_md_ = *conv_pd_->src_md();
|
| 484 |
+
if (diff_bias_md_.format_kind == format_kind::any)
|
| 485 |
+
CHECK(memory_desc_init_by_tag(diff_bias_md_, x));
|
| 486 |
+
|
| 487 |
+
dst_tag_ = memory_desc_matches_one_of_tag(diff_dst_md_,
|
| 488 |
+
utils::pick(ndims() - 3, ncw, nchw, ncdhw),
|
| 489 |
+
utils::pick(ndims() - 3, nwc, nhwc, ndhwc),
|
| 490 |
+
utils::pick(ndims() - 3, nCw8c, nChw8c, nCdhw8c),
|
| 491 |
+
utils::pick(ndims() - 3, nCw16c, nChw16c, nCdhw16c));
|
| 492 |
+
|
| 493 |
+
init_name();
|
| 494 |
+
init_scratchpad();
|
| 495 |
+
return status::success;
|
| 496 |
+
}
|
| 497 |
+
|
| 498 |
+
return status::unimplemented;
|
| 499 |
+
}
|
| 500 |
+
|
| 501 |
+
std::shared_ptr<primitive_desc_t> conv_pd_;
|
| 502 |
+
format_tag_t dst_tag_;
|
| 503 |
+
|
| 504 |
+
private:
|
| 505 |
+
std::string name_ = "conv:any+"; // convolution-based deconvolution
|
| 506 |
+
|
| 507 |
+
void init_name() { name_.append(conv_pd_->name()); }
|
| 508 |
+
|
| 509 |
+
void init_scratchpad() {
|
| 510 |
+
auto scratchpad = scratchpad_registry().registrar();
|
| 511 |
+
scratchpad.book(memory_tracking::names::key_nested,
|
| 512 |
+
conv_pd_->scratchpad_registry());
|
| 513 |
+
}
|
| 514 |
+
};
|
| 515 |
+
|
| 516 |
+
ref_deconvolution_bwd_weights_t(const pd_t *apd) : primitive_t(apd) {}
|
| 517 |
+
|
| 518 |
+
status_t init(engine_t *engine) override {
|
| 519 |
+
return pd()->conv_pd_->create_primitive(conv_p_, engine);
|
| 520 |
+
}
|
| 521 |
+
|
| 522 |
+
status_t execute(const exec_ctx_t &ctx) const override;
|
| 523 |
+
|
| 524 |
+
private:
|
| 525 |
+
const pd_t *pd() const { return (const pd_t *)primitive_t::pd().get(); }
|
| 526 |
+
void compute_bwd_bias(float *diff_bias, const float *diff_dst) const;
|
| 527 |
+
|
| 528 |
+
template <data_type_t dbia_type, data_type_t ddst_type>
|
| 529 |
+
void compute_bwd_bias_ncdhw(
|
| 530 |
+
typename prec_traits<dbia_type>::type *diff_bias,
|
| 531 |
+
const typename prec_traits<ddst_type>::type *diff_dst) const;
|
| 532 |
+
|
| 533 |
+
template <data_type_t dbia_type, data_type_t ddst_type>
|
| 534 |
+
void compute_bwd_bias_ndhwc(
|
| 535 |
+
typename prec_traits<dbia_type>::type *diff_bias,
|
| 536 |
+
const typename prec_traits<ddst_type>::type *diff_dst) const;
|
| 537 |
+
|
| 538 |
+
template <data_type_t dbia_type, data_type_t ddst_type, dim_t blksize>
|
| 539 |
+
void compute_bwd_bias_nCdhwXc(
|
| 540 |
+
typename prec_traits<dbia_type>::type *diff_bias,
|
| 541 |
+
const typename prec_traits<ddst_type>::type *diff_dst) const;
|
| 542 |
+
|
| 543 |
+
template <data_type_t dbia_type, data_type_t ddst_type>
|
| 544 |
+
void compute_bias(const exec_ctx_t &ctx) const;
|
| 545 |
+
std::shared_ptr<primitive_t> conv_p_;
|
| 546 |
+
};
|
| 547 |
+
|
| 548 |
+
} // namespace cpu
|
| 549 |
+
} // namespace impl
|
| 550 |
+
} // namespace dnnl
|
| 551 |
+
|
| 552 |
+
#endif
|
| 553 |
+
|
| 554 |
+
// vim: et ts=4 sw=4 cindent cino+=l0,\:4,N-s
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/ref_eltwise.hpp
ADDED
|
@@ -0,0 +1,181 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*******************************************************************************
|
| 2 |
+
* Copyright 2016-2023 Intel Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*******************************************************************************/
|
| 16 |
+
|
| 17 |
+
#ifndef CPU_REF_ELTWISE_HPP
|
| 18 |
+
#define CPU_REF_ELTWISE_HPP
|
| 19 |
+
|
| 20 |
+
#include <assert.h>
|
| 21 |
+
|
| 22 |
+
#include "common/c_types_map.hpp"
|
| 23 |
+
#include "common/primitive.hpp"
|
| 24 |
+
#include "common/type_helpers.hpp"
|
| 25 |
+
#include "common/utils.hpp"
|
| 26 |
+
|
| 27 |
+
#include "cpu/platform.hpp"
|
| 28 |
+
#include "cpu/primitive_attr_postops.hpp"
|
| 29 |
+
|
| 30 |
+
#include "cpu/cpu_eltwise_pd.hpp"
|
| 31 |
+
|
| 32 |
+
namespace dnnl {
|
| 33 |
+
namespace impl {
|
| 34 |
+
namespace cpu {
|
| 35 |
+
|
| 36 |
+
template <impl::data_type_t data_type>
|
| 37 |
+
struct ref_eltwise_fwd_t : public primitive_t {
|
| 38 |
+
struct pd_t : public cpu_eltwise_fwd_pd_t {
|
| 39 |
+
using cpu_eltwise_fwd_pd_t::cpu_eltwise_fwd_pd_t;
|
| 40 |
+
|
| 41 |
+
DECLARE_COMMON_PD_T("ref:any", ref_eltwise_fwd_t);
|
| 42 |
+
|
| 43 |
+
status_t init(engine_t *engine) {
|
| 44 |
+
using namespace utils;
|
| 45 |
+
using sm = primitive_attr_t::skip_mask_t;
|
| 46 |
+
|
| 47 |
+
const memory_desc_wrapper src_d(src_md());
|
| 48 |
+
const memory_desc_wrapper dst_d(dst_md());
|
| 49 |
+
|
| 50 |
+
bool ok = is_fwd()
|
| 51 |
+
&& utils::everyone_is(
|
| 52 |
+
data_type, src_md()->data_type, dst_md()->data_type)
|
| 53 |
+
&& platform::has_data_type_support(data_type)
|
| 54 |
+
&& attr()->has_default_values(sm::post_ops)
|
| 55 |
+
&& ref_post_ops_t::primitive_kind_ok(attr()->post_ops_)
|
| 56 |
+
&& set_default_formats_common() && src_d == dst_d
|
| 57 |
+
&& attr_.set_default_formats(dst_md(0)) == status::success;
|
| 58 |
+
if (!ok) return status::unimplemented;
|
| 59 |
+
|
| 60 |
+
use_dense_ = src_d.is_dense(true) && dst_d.is_dense(true)
|
| 61 |
+
&& IMPLICATION(!src_d.is_dense() || !dst_d.is_dense(),
|
| 62 |
+
is_zero_preserved());
|
| 63 |
+
|
| 64 |
+
use_nCspBc_padded_ = !use_dense_
|
| 65 |
+
&& src_d.blocking_desc().inner_nblks == 1
|
| 66 |
+
&& one_of(src_d.blocking_desc().inner_blks[0], 8, 16)
|
| 67 |
+
&& src_d.blocking_desc().inner_idxs[0] == 1
|
| 68 |
+
&& src_d.only_padded_dim(1) && src_d.is_dense(true);
|
| 69 |
+
|
| 70 |
+
const auto &po = attr()->post_ops_;
|
| 71 |
+
if (has_zero_dim_memory() || !po.has_default_values())
|
| 72 |
+
use_dense_ = use_nCspBc_padded_ = false;
|
| 73 |
+
|
| 74 |
+
return status::success;
|
| 75 |
+
}
|
| 76 |
+
|
| 77 |
+
bool use_dense_, use_nCspBc_padded_;
|
| 78 |
+
};
|
| 79 |
+
|
| 80 |
+
ref_eltwise_fwd_t(const pd_t *apd) : primitive_t(apd) {}
|
| 81 |
+
|
| 82 |
+
status_t init(engine_t *engine) override {
|
| 83 |
+
ref_post_ops
|
| 84 |
+
= utils::make_unique<ref_post_ops_t>(pd()->attr()->post_ops_);
|
| 85 |
+
if (!ref_post_ops) return status::out_of_memory;
|
| 86 |
+
CHECK(ref_post_ops->init(pd()->dst_md()));
|
| 87 |
+
return status::success;
|
| 88 |
+
}
|
| 89 |
+
|
| 90 |
+
using data_t = typename prec_traits<data_type>::type;
|
| 91 |
+
|
| 92 |
+
status_t execute(const exec_ctx_t &ctx) const override {
|
| 93 |
+
if (pd()->use_dense_)
|
| 94 |
+
return execute_forward_dense(ctx);
|
| 95 |
+
else if (pd()->use_nCspBc_padded_)
|
| 96 |
+
return execute_forward_nCspBc_padded(ctx);
|
| 97 |
+
else
|
| 98 |
+
return execute_forward_generic(ctx);
|
| 99 |
+
}
|
| 100 |
+
|
| 101 |
+
private:
|
| 102 |
+
const pd_t *pd() const { return (const pd_t *)primitive_t::pd().get(); }
|
| 103 |
+
status_t execute_forward_nCspBc_padded(const exec_ctx_t &ctx) const;
|
| 104 |
+
status_t execute_forward_dense(const exec_ctx_t &ctx) const;
|
| 105 |
+
status_t execute_forward_generic(const exec_ctx_t &ctx) const;
|
| 106 |
+
std::unique_ptr<ref_post_ops_t> ref_post_ops;
|
| 107 |
+
};
|
| 108 |
+
|
| 109 |
+
template <impl::data_type_t data_type>
|
| 110 |
+
struct ref_eltwise_bwd_t : public primitive_t {
|
| 111 |
+
struct pd_t : public cpu_eltwise_bwd_pd_t {
|
| 112 |
+
using cpu_eltwise_bwd_pd_t::cpu_eltwise_bwd_pd_t;
|
| 113 |
+
|
| 114 |
+
DECLARE_COMMON_PD_T("ref:any", ref_eltwise_bwd_t);
|
| 115 |
+
|
| 116 |
+
status_t init(engine_t *engine) {
|
| 117 |
+
using namespace utils;
|
| 118 |
+
using namespace data_type;
|
| 119 |
+
|
| 120 |
+
const memory_desc_wrapper diff_src_d(diff_src_md());
|
| 121 |
+
const memory_desc_wrapper diff_dst_d(diff_dst_md());
|
| 122 |
+
|
| 123 |
+
bool ok = !is_fwd()
|
| 124 |
+
&& utils::everyone_is(data_type, data_md()->data_type,
|
| 125 |
+
diff_src_md()->data_type, diff_dst_md()->data_type)
|
| 126 |
+
&& platform::has_data_type_support(data_type)
|
| 127 |
+
&& attr()->has_default_values()
|
| 128 |
+
&& set_default_formats_common() && diff_dst_d == diff_src_d;
|
| 129 |
+
if (!ok) return status::unimplemented;
|
| 130 |
+
|
| 131 |
+
use_dense_ = diff_dst_d.is_dense()
|
| 132 |
+
|| (diff_dst_d.is_dense(true) && is_zero_preserved());
|
| 133 |
+
|
| 134 |
+
if (has_zero_dim_memory()) use_dense_ = false;
|
| 135 |
+
if (diff_dst_d != memory_desc_wrapper(data_md()))
|
| 136 |
+
use_dense_ = false;
|
| 137 |
+
|
| 138 |
+
if (utils::one_of(data_type, bf16, f16)) init_scratchpad();
|
| 139 |
+
|
| 140 |
+
return status::success;
|
| 141 |
+
}
|
| 142 |
+
|
| 143 |
+
bool use_dense_;
|
| 144 |
+
|
| 145 |
+
private:
|
| 146 |
+
void init_scratchpad() {
|
| 147 |
+
const memory_desc_wrapper data_d(data_md());
|
| 148 |
+
const memory_desc_wrapper diff_dst_d(diff_dst_md());
|
| 149 |
+
using namespace memory_tracking::names;
|
| 150 |
+
auto scratchpad = scratchpad_registry().registrar();
|
| 151 |
+
const auto diff_dst_size = diff_dst_d.nelems(true);
|
| 152 |
+
scratchpad.template book<float>(
|
| 153 |
+
key_eltwise_src, data_d.nelems(true));
|
| 154 |
+
scratchpad.template book<float>(
|
| 155 |
+
key_eltwise_diff_dst, diff_dst_size);
|
| 156 |
+
}
|
| 157 |
+
};
|
| 158 |
+
|
| 159 |
+
ref_eltwise_bwd_t(const pd_t *apd) : primitive_t(apd) {}
|
| 160 |
+
typedef typename prec_traits<data_type>::type data_t;
|
| 161 |
+
|
| 162 |
+
status_t execute(const exec_ctx_t &ctx) const override {
|
| 163 |
+
if (pd()->use_dense_)
|
| 164 |
+
return execute_backward_dense(ctx);
|
| 165 |
+
else
|
| 166 |
+
return execute_backward_generic(ctx);
|
| 167 |
+
}
|
| 168 |
+
|
| 169 |
+
private:
|
| 170 |
+
status_t execute_backward_dense(const exec_ctx_t &ctx) const;
|
| 171 |
+
status_t execute_backward_generic(const exec_ctx_t &ctx) const;
|
| 172 |
+
const pd_t *pd() const { return (const pd_t *)primitive_t::pd().get(); }
|
| 173 |
+
};
|
| 174 |
+
|
| 175 |
+
} // namespace cpu
|
| 176 |
+
} // namespace impl
|
| 177 |
+
} // namespace dnnl
|
| 178 |
+
|
| 179 |
+
#endif
|
| 180 |
+
|
| 181 |
+
// vim: et ts=4 sw=4 cindent cino+=l0,\:4,N-s
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/ref_group_normalization.hpp
ADDED
|
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*******************************************************************************
|
| 2 |
+
* Copyright 2023 Intel Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*******************************************************************************/
|
| 16 |
+
|
| 17 |
+
#ifndef CPU_REF_GROUP_NORMALIZATION_HPP
|
| 18 |
+
#define CPU_REF_GROUP_NORMALIZATION_HPP
|
| 19 |
+
|
| 20 |
+
#include "common/c_types_map.hpp"
|
| 21 |
+
#include "common/primitive.hpp"
|
| 22 |
+
|
| 23 |
+
#include "cpu/primitive_attr_postops.hpp"
|
| 24 |
+
|
| 25 |
+
#include "cpu/cpu_group_normalization_pd.hpp"
|
| 26 |
+
|
| 27 |
+
namespace dnnl {
|
| 28 |
+
namespace impl {
|
| 29 |
+
namespace cpu {
|
| 30 |
+
|
| 31 |
+
struct ref_group_normalization_fwd_t : public primitive_t {
|
| 32 |
+
struct pd_t : public cpu_group_normalization_fwd_pd_t {
|
| 33 |
+
using cpu_group_normalization_fwd_pd_t::
|
| 34 |
+
cpu_group_normalization_fwd_pd_t;
|
| 35 |
+
|
| 36 |
+
DECLARE_COMMON_PD_T("ref:any", ref_group_normalization_fwd_t);
|
| 37 |
+
|
| 38 |
+
status_t init(engine_t *engine) {
|
| 39 |
+
using namespace data_type;
|
| 40 |
+
using skip_mask_t = primitive_attr_t::skip_mask_t;
|
| 41 |
+
|
| 42 |
+
VDISPATCH_GNORM(is_fwd(), VERBOSE_BAD_PROPKIND);
|
| 43 |
+
VDISPATCH_GNORM(
|
| 44 |
+
utils::one_of(src_md()->data_type, f32, bf16, f16, s8, u8)
|
| 45 |
+
&& platform::has_data_type_support(
|
| 46 |
+
src_md()->data_type),
|
| 47 |
+
VERBOSE_UNSUPPORTED_DT);
|
| 48 |
+
VDISPATCH_GNORM(
|
| 49 |
+
utils::one_of(dst_md()->data_type, f32, bf16, f16, s8, u8)
|
| 50 |
+
&& platform::has_data_type_support(
|
| 51 |
+
dst_md()->data_type),
|
| 52 |
+
VERBOSE_UNSUPPORTED_DT);
|
| 53 |
+
VDISPATCH_GNORM(
|
| 54 |
+
attr()->has_default_values(skip_mask_t::scales_runtime
|
| 55 |
+
| skip_mask_t::post_ops),
|
| 56 |
+
VERBOSE_UNSUPPORTED_ATTR);
|
| 57 |
+
VDISPATCH_GNORM(attr_scales_ok(), VERBOSE_UNSUPPORTED_SCALES_CFG);
|
| 58 |
+
VDISPATCH_GNORM(post_ops_ok(), VERBOSE_UNSUPPORTED_POSTOP);
|
| 59 |
+
|
| 60 |
+
VDISPATCH_GNORM(
|
| 61 |
+
set_default_formats_common(), VERBOSE_UNSUPPORTED_TAG);
|
| 62 |
+
|
| 63 |
+
bool ok = attr_.set_default_formats(dst_md(0)) == status::success;
|
| 64 |
+
if (!ok) return status::unimplemented;
|
| 65 |
+
|
| 66 |
+
return status::success;
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
private:
|
| 70 |
+
bool post_ops_ok() const {
|
| 71 |
+
return ref_post_ops_t::primitive_kind_ok(attr()->post_ops_);
|
| 72 |
+
}
|
| 73 |
+
};
|
| 74 |
+
|
| 75 |
+
using primitive_t::primitive_t;
|
| 76 |
+
|
| 77 |
+
status_t init(engine_t *engine) override {
|
| 78 |
+
ref_post_ops
|
| 79 |
+
= utils::make_unique<ref_post_ops_t>(pd()->attr()->post_ops_);
|
| 80 |
+
if (!ref_post_ops) return status::out_of_memory;
|
| 81 |
+
CHECK(ref_post_ops->init(pd()->dst_md()));
|
| 82 |
+
return status::success;
|
| 83 |
+
}
|
| 84 |
+
|
| 85 |
+
status_t execute(const exec_ctx_t &ctx) const override;
|
| 86 |
+
|
| 87 |
+
private:
|
| 88 |
+
std::unique_ptr<ref_post_ops_t> ref_post_ops;
|
| 89 |
+
|
| 90 |
+
const pd_t *pd() const { return (const pd_t *)primitive_t::pd().get(); }
|
| 91 |
+
};
|
| 92 |
+
|
| 93 |
+
struct ref_group_normalization_bwd_t : public primitive_t {
|
| 94 |
+
struct pd_t : public cpu_group_normalization_bwd_pd_t {
|
| 95 |
+
using cpu_group_normalization_bwd_pd_t::
|
| 96 |
+
cpu_group_normalization_bwd_pd_t;
|
| 97 |
+
|
| 98 |
+
DECLARE_COMMON_PD_T("ref:any", ref_group_normalization_bwd_t);
|
| 99 |
+
|
| 100 |
+
status_t init(engine_t *engine) {
|
| 101 |
+
using namespace data_type;
|
| 102 |
+
|
| 103 |
+
VDISPATCH_GNORM(!is_fwd(), VERBOSE_BAD_PROPKIND);
|
| 104 |
+
|
| 105 |
+
VDISPATCH_GNORM(utils::one_of(src_md()->data_type, f32, bf16, f16)
|
| 106 |
+
&& platform::has_data_type_support(
|
| 107 |
+
src_md()->data_type),
|
| 108 |
+
VERBOSE_UNSUPPORTED_DT);
|
| 109 |
+
VDISPATCH_GNORM(
|
| 110 |
+
utils::one_of(diff_dst_md()->data_type, f32, bf16, f16)
|
| 111 |
+
&& platform::has_data_type_support(
|
| 112 |
+
diff_dst_md()->data_type),
|
| 113 |
+
VERBOSE_UNSUPPORTED_DT);
|
| 114 |
+
VDISPATCH_GNORM(
|
| 115 |
+
utils::one_of(diff_src_md()->data_type, f32, bf16, f16)
|
| 116 |
+
&& platform::has_data_type_support(
|
| 117 |
+
diff_src_md()->data_type),
|
| 118 |
+
VERBOSE_UNSUPPORTED_DT);
|
| 119 |
+
VDISPATCH_GNORM(
|
| 120 |
+
attr()->has_default_values(), VERBOSE_UNSUPPORTED_ATTR);
|
| 121 |
+
|
| 122 |
+
VDISPATCH_GNORM(
|
| 123 |
+
set_default_formats_common(), VERBOSE_UNSUPPORTED_TAG);
|
| 124 |
+
|
| 125 |
+
return status::success;
|
| 126 |
+
}
|
| 127 |
+
};
|
| 128 |
+
|
| 129 |
+
ref_group_normalization_bwd_t(const pd_t *apd) : primitive_t(apd) {}
|
| 130 |
+
|
| 131 |
+
status_t execute(const exec_ctx_t &ctx) const override;
|
| 132 |
+
|
| 133 |
+
private:
|
| 134 |
+
const pd_t *pd() const { return (const pd_t *)primitive_t::pd().get(); }
|
| 135 |
+
};
|
| 136 |
+
|
| 137 |
+
} // namespace cpu
|
| 138 |
+
} // namespace impl
|
| 139 |
+
} // namespace dnnl
|
| 140 |
+
|
| 141 |
+
#endif
|
| 142 |
+
|
| 143 |
+
// vim: et ts=4 sw=4 cindent cino^=l0,\:0,N-s
|
videochat2/lib/python3.10/site-packages/tensorflow/include/external/onednn/src/cpu/ref_inner_product.hpp
ADDED
|
@@ -0,0 +1,175 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/*******************************************************************************
|
| 2 |
+
* Copyright 2016-2023 Intel Corporation
|
| 3 |
+
*
|
| 4 |
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
* you may not use this file except in compliance with the License.
|
| 6 |
+
* You may obtain a copy of the License at
|
| 7 |
+
*
|
| 8 |
+
* http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
*
|
| 10 |
+
* Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
* See the License for the specific language governing permissions and
|
| 14 |
+
* limitations under the License.
|
| 15 |
+
*******************************************************************************/
|
| 16 |
+
|
| 17 |
+
#ifndef CPU_REF_INNER_PRODUCT_HPP
|
| 18 |
+
#define CPU_REF_INNER_PRODUCT_HPP
|
| 19 |
+
|
| 20 |
+
#include <assert.h>
|
| 21 |
+
|
| 22 |
+
#include "common/c_types_map.hpp"
|
| 23 |
+
#include "common/primitive.hpp"
|
| 24 |
+
#include "common/type_helpers.hpp"
|
| 25 |
+
#include "common/utils.hpp"
|
| 26 |
+
|
| 27 |
+
#include "cpu/primitive_attr_postops.hpp"
|
| 28 |
+
|
| 29 |
+
#include "cpu/cpu_inner_product_pd.hpp"
|
| 30 |
+
|
| 31 |
+
namespace dnnl {
|
| 32 |
+
namespace impl {
|
| 33 |
+
namespace cpu {
|
| 34 |
+
|
| 35 |
+
struct ref_inner_product_fwd_t : public primitive_t {
|
| 36 |
+
struct pd_t : public cpu_inner_product_fwd_pd_t {
|
| 37 |
+
using cpu_inner_product_fwd_pd_t::cpu_inner_product_fwd_pd_t;
|
| 38 |
+
|
| 39 |
+
DECLARE_COMMON_PD_T("ref:any", ref_inner_product_fwd_t);
|
| 40 |
+
|
| 41 |
+
status_t init(engine_t *engine) {
|
| 42 |
+
using namespace data_type;
|
| 43 |
+
using smask_t = primitive_attr_t::skip_mask_t;
|
| 44 |
+
const auto src_type = src_md(0)->data_type;
|
| 45 |
+
const auto wei_type = weights_md(0)->data_type;
|
| 46 |
+
const auto bia_type = weights_md(1)->data_type;
|
| 47 |
+
const auto dst_type = dst_md(0)->data_type;
|
| 48 |
+
|
| 49 |
+
const bool allow_all_tags = true; // ref should support all tags
|
| 50 |
+
|
| 51 |
+
bool ok = is_fwd() && platform::has_data_type_support(src_type)
|
| 52 |
+
&& platform::has_data_type_support(wei_type)
|
| 53 |
+
&& platform::has_data_type_support(bia_type)
|
| 54 |
+
&& platform::has_data_type_support(dst_type)
|
| 55 |
+
&& utils::one_of(src_type, f32, bf16, f16)
|
| 56 |
+
&& wei_type == src_type
|
| 57 |
+
&& utils::one_of(dst_type, f32, src_type)
|
| 58 |
+
&& IMPLICATION(
|
| 59 |
+
with_bias(), utils::one_of(bia_type, f32, src_type))
|
| 60 |
+
&& set_default_params(allow_all_tags) == status::success
|
| 61 |
+
&& attr()->has_default_values(
|
| 62 |
+
smask_t::post_ops | smask_t::sum_dt)
|
| 63 |
+
&& attr()->post_ops_.check_sum_consistency(dst_type,
|
| 64 |
+
/* is_int8 */ false)
|
| 65 |
+
&& ref_post_ops_t::primitive_kind_ok(attr()->post_ops_)
|
| 66 |
+
&& attr_.set_default_formats(dst_md(0)) == status::success;
|
| 67 |
+
return ok ? status::success : status::unimplemented;
|
| 68 |
+
}
|
| 69 |
+
};
|
| 70 |
+
|
| 71 |
+
ref_inner_product_fwd_t(const pd_t *apd) : primitive_t(apd) {}
|
| 72 |
+
|
| 73 |
+
status_t init(engine_t *engine) override {
|
| 74 |
+
ref_post_ops
|
| 75 |
+
= utils::make_unique<ref_post_ops_t>(pd()->attr()->post_ops_);
|
| 76 |
+
if (!ref_post_ops) return status::out_of_memory;
|
| 77 |
+
CHECK(ref_post_ops->init(pd()->dst_md()));
|
| 78 |
+
return status::success;
|
| 79 |
+
}
|
| 80 |
+
|
| 81 |
+
status_t execute(const exec_ctx_t &ctx) const override {
|
| 82 |
+
return execute_forward(ctx);
|
| 83 |
+
}
|
| 84 |
+
|
| 85 |
+
private:
|
| 86 |
+
status_t execute_forward(const exec_ctx_t &ctx) const;
|
| 87 |
+
const pd_t *pd() const { return (const pd_t *)primitive_t::pd().get(); }
|
| 88 |
+
std::unique_ptr<ref_post_ops_t> ref_post_ops;
|
| 89 |
+
};
|
| 90 |
+
|
| 91 |
+
struct ref_inner_product_bwd_data_t : public primitive_t {
|
| 92 |
+
struct pd_t : public cpu_inner_product_bwd_data_pd_t {
|
| 93 |
+
using cpu_inner_product_bwd_data_pd_t::cpu_inner_product_bwd_data_pd_t;
|
| 94 |
+
|
| 95 |
+
DECLARE_COMMON_PD_T("ref:any", ref_inner_product_bwd_data_t);
|
| 96 |
+
|
| 97 |
+
status_t init(engine_t *engine) {
|
| 98 |
+
using namespace data_type;
|
| 99 |
+
const auto diff_src_type = diff_src_md(0)->data_type;
|
| 100 |
+
const auto wei_type = weights_md(0)->data_type;
|
| 101 |
+
const auto diff_dst_type = diff_dst_md(0)->data_type;
|
| 102 |
+
|
| 103 |
+
const bool allow_all_tags = true; // ref should support all tags
|
| 104 |
+
|
| 105 |
+
bool ok = desc()->prop_kind == prop_kind::backward_data
|
| 106 |
+
&& platform::has_data_type_support(diff_src_type)
|
| 107 |
+
&& platform::has_data_type_support(wei_type)
|
| 108 |
+
&& platform::has_data_type_support(diff_dst_type)
|
| 109 |
+
&& utils::one_of(diff_src_type, f32, wei_type)
|
| 110 |
+
&& utils::one_of(wei_type, f32, bf16, f16)
|
| 111 |
+
&& diff_dst_type == wei_type && attr()->has_default_values()
|
| 112 |
+
&& set_default_params(allow_all_tags) == status::success;
|
| 113 |
+
return ok ? status::success : status::unimplemented;
|
| 114 |
+
}
|
| 115 |
+
};
|
| 116 |
+
|
| 117 |
+
ref_inner_product_bwd_data_t(const pd_t *apd) : primitive_t(apd) {}
|
| 118 |
+
|
| 119 |
+
status_t execute(const exec_ctx_t &ctx) const override {
|
| 120 |
+
return execute_backward_data(ctx);
|
| 121 |
+
}
|
| 122 |
+
|
| 123 |
+
private:
|
| 124 |
+
status_t execute_backward_data(const exec_ctx_t &ctx) const;
|
| 125 |
+
const pd_t *pd() const { return (const pd_t *)primitive_t::pd().get(); }
|
| 126 |
+
};
|
| 127 |
+
|
| 128 |
+
struct ref_inner_product_bwd_weights_t : public primitive_t {
|
| 129 |
+
struct pd_t : public cpu_inner_product_bwd_weights_pd_t {
|
| 130 |
+
using cpu_inner_product_bwd_weights_pd_t::
|
| 131 |
+
cpu_inner_product_bwd_weights_pd_t;
|
| 132 |
+
|
| 133 |
+
DECLARE_COMMON_PD_T("ref:any", ref_inner_product_bwd_weights_t);
|
| 134 |
+
|
| 135 |
+
status_t init(engine_t *engine) {
|
| 136 |
+
using namespace data_type;
|
| 137 |
+
const auto src_type = src_md(0)->data_type;
|
| 138 |
+
const auto diff_wei_type = diff_weights_md(0)->data_type;
|
| 139 |
+
const auto diff_bia_type = diff_weights_md(1)->data_type;
|
| 140 |
+
const auto diff_dst_type = diff_dst_md(0)->data_type;
|
| 141 |
+
|
| 142 |
+
const bool allow_all_tags = true; // ref should support all tags
|
| 143 |
+
|
| 144 |
+
bool ok = desc()->prop_kind == prop_kind::backward_weights
|
| 145 |
+
&& platform::has_data_type_support(src_type)
|
| 146 |
+
&& platform::has_data_type_support(diff_wei_type)
|
| 147 |
+
&& platform::has_data_type_support(diff_bia_type)
|
| 148 |
+
&& utils::one_of(src_type, f32, bf16, f16)
|
| 149 |
+
&& utils::one_of(diff_wei_type, f32, src_type)
|
| 150 |
+
&& IMPLICATION(with_bias(),
|
| 151 |
+
utils::one_of(diff_bia_type, f32, src_type))
|
| 152 |
+
&& diff_dst_type == src_type && attr()->has_default_values()
|
| 153 |
+
&& set_default_params(allow_all_tags) == status::success;
|
| 154 |
+
return ok ? status::success : status::unimplemented;
|
| 155 |
+
}
|
| 156 |
+
};
|
| 157 |
+
|
| 158 |
+
ref_inner_product_bwd_weights_t(const pd_t *apd) : primitive_t(apd) {}
|
| 159 |
+
|
| 160 |
+
status_t execute(const exec_ctx_t &ctx) const override {
|
| 161 |
+
return execute_backward_weights(ctx);
|
| 162 |
+
}
|
| 163 |
+
|
| 164 |
+
private:
|
| 165 |
+
status_t execute_backward_weights(const exec_ctx_t &ctx) const;
|
| 166 |
+
const pd_t *pd() const { return (const pd_t *)primitive_t::pd().get(); }
|
| 167 |
+
};
|
| 168 |
+
|
| 169 |
+
} // namespace cpu
|
| 170 |
+
} // namespace impl
|
| 171 |
+
} // namespace dnnl
|
| 172 |
+
|
| 173 |
+
#endif
|
| 174 |
+
|
| 175 |
+
// vim: et ts=4 sw=4 cindent cino+=l0,\:4,N-s
|