repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
|---|---|---|---|---|---|---|
null |
pytorch-main/caffe2/quantization/server/activation_distribution_observer.h
|
#pragma once
#include "caffe2/core/observer.h"
#include "caffe2/core/operator.h"
#include "caffe2/quantization/server/dnnlowp.h"
#include "caffe2/quantization/server/dynamic_histogram.h"
#include <memory>
#include <set>
#include <vector>
namespace caffe2 {
class OutputMinMaxObserver final : public ObserverBase<OperatorBase> {
public:
explicit OutputMinMaxObserver(OperatorBase* op);
~OutputMinMaxObserver() override;
struct TensorInfo {
explicit TensorInfo(const std::string& name)
: min(std::numeric_limits<float>::max()),
max(std::numeric_limits<float>::lowest()),
total_min(std::numeric_limits<float>::max()),
total_max(std::numeric_limits<float>::lowest()),
name(name) {}
void Update(float cur_min, float cur_max) {
min = std::min(min, cur_min);
max = std::max(max, cur_max);
total_min = std::min(total_min, cur_min);
total_max = std::max(total_max, cur_max);
}
float min, max;
float total_min, total_max;
std::string name;
};
struct OperatorInfo {
std::vector<TensorInfo> tensor_infos;
std::string type;
};
// OutputMinMaxObserver is assumed to be used together with
// OutputMinMaxNetObserver and the information shared via shared_ptr to be
// prepared for the case when OutputMinMaxObserver is destroyed before
// OutputMinMaxNetObserver
std::shared_ptr<OperatorInfo> GetInfo() {
return info_;
}
private:
void Stop() override;
std::shared_ptr<OperatorInfo> info_;
bool warning_printed_ = false;
}; // class OutputMinMaxObserver
class OutputMinMaxNetObserver final : public NetObserver {
public:
/// @params dump_freq Print out only once in destructor if -1.
// Otherwise, print out every dum_freq invocations
explicit OutputMinMaxNetObserver(
NetBase* subject,
const std::string& out_file_name,
int dump_freq = -1,
string delimiter = " ");
~OutputMinMaxNetObserver() override;
private:
void Stop() override;
void DumpAndReset_(
const std::string& out_file_name,
bool print_total_min_max = false);
int dump_freq_, cnt_;
const std::string out_file_name_;
std::string delimiter_;
std::vector<std::shared_ptr<OutputMinMaxObserver::OperatorInfo>>
min_max_infos_;
};
/**
* Given min/max, collect histogram
*/
class HistogramObserver final : public ObserverBase<OperatorBase> {
public:
struct Info {
std::vector<dnnlowp::DynamicHistogram> histograms;
std::vector<dnnlowp::DynamicHistogram> total_histograms;
OutputMinMaxObserver::OperatorInfo min_max_info;
};
explicit HistogramObserver(OperatorBase* op, std::shared_ptr<Info> info);
private:
void Stop() override;
std::shared_ptr<Info> info_;
bool warning_printed_ = false;
}; // class HistogramObserver
/**
* Given min/max, collect histogram of the max value of each column of tensor
*/
class OutputColumnMaxHistogramObserver final
: public ObserverBase<OperatorBase> {
public:
explicit OutputColumnMaxHistogramObserver(
OperatorBase* op,
const std::string& col_max_blob_name,
int nbins,
std::shared_ptr<HistogramObserver::Info> info);
private:
void Stop() override;
std::string col_max_blob_name_;
int nbins_;
std::shared_ptr<HistogramObserver::Info> info_;
bool warning_printed_ = false;
int col_max_blob_idx_ = -1;
int num_columns_ = -1;
}; // class OutputColumnMaxHistogramObserver
class HistogramNetObserver final : public NetObserver {
public:
/**
* @params mul_nets true if we expect multiple nets with the same name so
* we include extra information in the file name to
* distinghuish them
* @params dump_freq if not -1 we dump histogram every dump_freq invocation
* of the net
*/
explicit HistogramNetObserver(
NetBase* subject,
const std::string& out_file_name,
int nbins,
int dump_freq = -1,
bool mul_nets = false,
string op_filter = "",
string delimiter = " ");
~HistogramNetObserver() override;
void DumpHistogramFile() {
DumpAndReset_(out_file_name_, false);
}
private:
void Stop() override;
void DumpAndReset_(
const std::string& out_file_name,
bool print_total_min_max = false);
int dump_freq_, cnt_;
/** If multiple nets exist and are attached with the observers, the histogram
* files for the nets will be appended with netbase addresses.
*/
bool mul_nets_;
string net_name_;
string op_filter_;
string delimiter_;
const std::string out_file_name_;
std::vector<std::shared_ptr<HistogramObserver::Info>> hist_infos_;
};
class OutputColumnMaxHistogramNetObserver final : public NetObserver {
public:
explicit OutputColumnMaxHistogramNetObserver(
NetBase* subject,
const std::string& out_file_name,
const std::vector<std::string>& observe_column_max_for_blobs,
int nbins,
int dump_freq = -1,
bool mul_nets = false,
string delimiter = " ");
~OutputColumnMaxHistogramNetObserver() override;
void DumpOutputColumnMaxHistogramFile() {
DumpAndReset_(out_file_name_, false);
}
private:
void Stop() override;
void DumpAndReset_(
const std::string& out_file_name,
bool print_total_min_max = false);
int dump_freq_, cnt_;
bool mul_nets_;
const std::string out_file_name_;
std::string delimiter_;
std::unordered_set<std::string> col_max_blob_names_;
// {op_idx: {output_index: col_hists}}
std::unordered_map<
int,
std::unordered_map<int, std::shared_ptr<HistogramObserver::Info>>>
hist_infos_;
};
/**
* Set quantization parameters of operators based on min/max
* collected from OutputMinMaxObserver
*/
class RegisterQuantizationParamsNetObserver final : public NetObserver {
public:
explicit RegisterQuantizationParamsNetObserver(
NetBase* subject,
const std::string& min_max_file_name,
bool is_weight = false,
const std::string& qparams_output_file_name = "");
};
/**
* Set quantization parameters of operators based on min/max
* collected from OutputMinMaxObserver
*/
class RegisterQuantizationParamsWithHistogramNetObserver final
: public NetObserver {
public:
explicit RegisterQuantizationParamsWithHistogramNetObserver(
NetBase* subject,
const std::string& histogram_file_name,
bool is_weight = false,
const std::string& qparams_output_file_name = "");
};
#ifdef _MSC_VER
struct tm* localtime_r(time_t* _clock, struct tm* _result) {
struct tm* candidate_result = localtime(_clock);
if (candidate_result) {
*(_result) = *candidate_result;
}
return candidate_result;
}
#endif
} // namespace caffe2
| 6,736
| 27.306723
| 79
|
h
|
null |
pytorch-main/caffe2/quantization/server/batch_matmul_dnnlowp_op.h
|
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "caffe2/operators/batch_matmul_op.h"
#include "caffe2/quantization/server/dnnlowp_op.h"
#include "fbgemm/Fbgemm.h"
namespace caffe2 {
template <typename T>
class BatchMatMulDNNLowPOp final
: public DNNLowPOp<T, BatchMatMulOp<CPUContext>> {
public:
BatchMatMulDNNLowPOp(const OperatorDef& operator_def, Workspace* ws);
bool RunOnDevice() override;
USE_OPERATOR_FUNCTIONS(CPUContext);
USE_DNNLOWP_OPERATOR_BASE_FUNCTIONS(T, BatchMatMulOp<CPUContext>);
private:
bool trans_a_;
bool trans_b_;
bool broadcast_{false};
bool is_B_constant_{false};
std::vector<std::int8_t> B_quantized_;
std::vector<std::unique_ptr<fbgemm::PackBMatrix<std::int8_t>>> Bq_packed_;
std::vector<std::uint8_t> A_pack_buf_;
std::vector<std::int32_t> row_offsets_, column_offsets_;
std::vector<dnnlowp::TensorQuantizationParams> B_qparams_;
std::vector<dnnlowp::RequantizationParams> requantization_params_;
std::vector<std::int32_t> Y_int32_;
bool first_invocation_{true};
}; // BatchMatMulDNNLowPOp
} // namespace caffe2
| 1,674
| 30.018519
| 76
|
h
|
null |
pytorch-main/caffe2/quantization/server/batch_permutation_dnnlowp_op.h
|
#pragma once
#include "caffe2/operators/copy_op.h"
#include "caffe2/quantization/server/dnnlowp_op.h"
namespace caffe2 {
// FIXME
using BatchPermutationFP32Op = CopyOp<CPUContext, CPUContext, CPUContext>;
template <typename T>
class BatchPermutationDNNLowPOp final
: public DNNLowPOp<T, BatchPermutationFP32Op> {
public:
USE_OPERATOR_FUNCTIONS(CPUContext);
USE_DNNLOWP_OPERATOR_BASE_FUNCTIONS(T, BatchPermutationFP32Op);
BatchPermutationDNNLowPOp(const OperatorDef& operator_def, Workspace* ws)
: BaseType(operator_def, ws) {}
bool RunOnDevice() override;
private:
INPUT_TAGS(INPUT, INDICES);
OUTPUT_TAGS(OUTPUT);
};
} // namespace caffe2
| 671
| 22.172414
| 75
|
h
|
null |
pytorch-main/caffe2/quantization/server/caffe2_dnnlowp_utils.h
|
#pragma once
#include "caffe2/core/operator.h"
#include "caffe2/quantization/server/dnnlowp.h"
#include "caffe2/utils/eigen_utils.h"
namespace dnnlowp {
/**
* Let consumers of op know that qparams the quantization parameter used
* for output_index'th output of op.
*/
void PropagateOutputTensorQuantizationParams(
caffe2::OperatorBase* op,
int output_index,
const TensorQuantizationParams& qparams);
/**
* If input_index'th input is already quantized, return quantization parameter
* used for the input tensor (should've been set by
* PropagateOutputTensorQuantizationParams when the producer was invoked).
* If the input tensor is not quantized, return the quantization parameter
* chosen by qfactory based on the distribution of the input tensor
*/
TensorQuantizationParams GetInputTensorQuantizationParamsOf(
caffe2::OperatorBase* op,
int input_index,
const QuantizationFactory* qfactory,
bool is_weight = false);
void SetStaticQuantizationParams(
caffe2::OperatorBase* op,
int output_index,
const TensorQuantizationParams& qparams);
/**
* @return true if op's outputs should use static quantization (i.e. op has
* Y_scale and optionally Y_zero_offset argument).
*/
bool HasStaticQuantization(
const caffe2::OperatorBase* op,
int output_index = 0);
/**
* Get output_index'th quantization parameter.
* Should be used only when UseStaticQuantization is true
*/
TensorQuantizationParams GetStaticQuantizationParamsOf(
const caffe2::OperatorBase* op,
int output_index);
/**
* Quantize input_index'th input if it's not already quantized.
* a vector temp should be passed to store quantized results.
*
* @return array of quantized values
*/
template <typename T>
const T* QuantizeInputIfNeeded(
caffe2::OperatorBase* op,
int input_index,
const TensorQuantizationParams& qparams,
std::vector<T>& temp);
template <typename T>
const T* RowWiseQuantizeInputIfNeeded(
caffe2::OperatorBase* op,
int input_index,
const std::vector<TensorQuantizationParams>& qparams,
std::vector<T>& temp);
struct QuantizationErrorStats {
float sum_sq{0}, sum_err_sq{0};
float max_abs_err{0};
// actual and reference values that resulted in max_abs_err
float max_err_actual{0}, max_err_ref{0};
int measure_cnt{0};
};
void MeasureQuantizationError(
const float* actual,
const float* ref,
size_t len,
QuantizationErrorStats* stat);
void ReportQuantizationError(
const caffe2::OperatorBase* op,
const QuantizationErrorStats& stat);
/**
* Get QuantizationFactory based on the arguments of op
*/
std::unique_ptr<QuantizationFactory> GetQuantizationFactoryOf(
const caffe2::OperatorBase* op);
void AdjustOutputTensorQuantizationParamsWithFollowedBy(
caffe2::OperatorBase* op,
const std::string& followed_by);
void ParseDNNLowPOperatorArguments(
caffe2::OperatorBase* op,
bool* dequantize_output = nullptr,
bool* measure_quantization_error = nullptr,
std::string* followed_by = nullptr);
caffe2::NetDef AddScaleZeroOffsetArgumentsWithHistogram(
caffe2::NetDef net_def,
const std::string& histogram_file_name);
} // namespace dnnlowp
| 3,209
| 27.918919
| 78
|
h
|
null |
pytorch-main/caffe2/quantization/server/channel_shuffle_dnnlowp_op.h
|
#pragma once
#include "caffe2/operators/channel_shuffle_op.h"
#include "caffe2/operators/conv_pool_op_base.h"
#include "caffe2/quantization/server/conv_pool_dnnlowp_op_base.h"
#include "caffe2/quantization/server/dnnlowp.h"
#include "caffe2/quantization/server/dnnlowp_op.h"
namespace caffe2 {
namespace {
template <class Context>
using ChannelShuffleFp32Op = ChannelShuffleOp<float, Context>;
} // namespace
template <typename T>
class ChannelShuffleDNNLowPOp final
: public DNNLowPOp<T, ChannelShuffleFp32Op<CPUContext>> {
public:
USE_OPERATOR_FUNCTIONS(CPUContext);
USE_DNNLOWP_OPERATOR_BASE_FUNCTIONS(T, ChannelShuffleFp32Op<CPUContext>);
ChannelShuffleDNNLowPOp(const OperatorDef& operator_def, Workspace* ws);
bool RunOnDevice() override;
bool RunOnDeviceWithOrderNCHW();
bool RunOnDeviceWithOrderNHWC();
private:
const StorageOrder order_;
const int group_;
};
} // namespace caffe2
| 923
| 23.315789
| 75
|
h
|
null |
pytorch-main/caffe2/quantization/server/compute_equalization_scale.h
|
// Copyright 2004-present Facebook. All Rights Reserved.
#pragma once
#include "caffe2/quantization/server/caffe2_dnnlowp_utils.h"
#include "caffe2/quantization/server/dnnlowp.h"
namespace caffe2 {
class ComputeEqualizationScaleOp final : public Operator<CPUContext> {
public:
ComputeEqualizationScaleOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<CPUContext>(operator_def, ws) {}
bool RunOnDevice() override;
}; // class ComputeEqualizationScaleOp
} // namespace caffe2
| 503
| 25.526316
| 76
|
h
|
null |
pytorch-main/caffe2/quantization/server/concat_dnnlowp_op.h
|
#pragma once
#include "caffe2/operators/concat_split_op.h"
#include "caffe2/quantization/server/dnnlowp_op.h"
namespace caffe2 {
template <typename T>
class ConcatDNNLowPOp final : public DNNLowPOp<T, ConcatOp<CPUContext>> {
public:
ConcatDNNLowPOp(const OperatorDef& operator_def, Workspace* ws);
bool RunOnDevice() override;
USE_OPERATOR_FUNCTIONS(CPUContext);
USE_DNNLOWP_OPERATOR_BASE_FUNCTIONS(T, ConcatOp<CPUContext>);
private:
void GetQuantizationParameters_();
int axis_;
int add_axis_;
// Input: a number of tensors. Output: Y, split
// The split are stored in CPU.
std::vector<dnnlowp::RequantizationParams> requantization_params_;
}; // class ConcatDNNLowPOp
} // namespace caffe2
| 722
| 23.931034
| 73
|
h
|
null |
pytorch-main/caffe2/quantization/server/conv_dnnlowp_acc16_op.h
|
#pragma once
#include "caffe2/quantization/server/conv_dnnlowp_op.h"
#include "fbgemm/Fbgemm.h"
namespace caffe2 {
/**
* Quantized Conv operator with 16-bit accumulation.
* We'll encounter saturation but this will be faster in Intel CPUs
*/
template <bool ReluFused = false>
class ConvDNNLowPAcc16Op final : public ConvDNNLowPOp<std::uint8_t, ReluFused> {
public:
USE_CONV_POOL_BASE_FUNCTIONS(CPUContext);
ConvDNNLowPAcc16Op(const OperatorDef& operator_def, Workspace* ws);
using BaseType = ConvDNNLowPOp<std::uint8_t, ReluFused>;
using BaseType::BIAS;
using BaseType::col_buffer_;
using BaseType::FILTER;
using BaseType::in_qparams_;
using BaseType::INPUT;
using BaseType::InputTensorCPU_;
using BaseType::out_qparams_;
using BaseType::OutputTensorCPU_;
using BaseType::row_offsets_;
using BaseType::W_quantized_;
using BaseType::X_pack_buf_;
using BaseType::Y_int32_;
private:
bool RunOnDeviceWithOrderNCHW() override;
bool RunOnDeviceWithOrderNHWC() override;
bool GetQuantizationParameters_();
template <typename PackAMatrix, fbgemm::QuantizationGranularity Q_GRAN>
void DispatchFBGEMM_(
PackAMatrix& packA,
const std::uint8_t* col_buffer_data,
vector<std::int32_t>* Y_int32,
uint8_t* Y_uint8_data);
void ConvOutlier_(
const std::uint8_t* col_buffer,
vector<std::int32_t>* Y_int32);
bool Acc16() const override {
return !fallback_to_32_bit_accumulation_;
}
std::shared_ptr<fbgemm::PackBMatrix<std::int8_t, std::int16_t>>
Wq_acc16_packed_;
// Wq outlier in CSC format
std::shared_ptr<fbgemm::CompressedSparseColumn> Wq_outlier_;
// Threshold to decide whether a weight is outlier.
// For example, if nbits_in_non_outlier_ == 7, w is an outlier if w < -64 or
// w >= 64.
// nbits_in_non_outlier_ == 0 means everything is outlier.
// nbits_in_non_outlier_ == 8 means nothing is outlier.
int nbits_in_non_outlier_;
int copy_to_32bit_frequency_;
bool first_invocation_{true};
// If outlier matrix is not sparse enough, using 16-bit accumulation won't
// give speedup due to too much overhead of sparse matrix multiplication or
// sparse convolution anyway, so fallback to 32-bit accumulation
bool fallback_to_32_bit_accumulation_{false};
}; // class ConvDNNLowPAcc16Op
} // namespace caffe2
| 2,332
| 30.106667
| 80
|
h
|
null |
pytorch-main/caffe2/quantization/server/conv_dnnlowp_op.h
|
#pragma once
#include <fbgemm/Fbgemm.h>
#include "caffe2/operators/conv_op.h"
#include "caffe2/operators/conv_pool_op_base.h"
#include "caffe2/quantization/server/caffe2_dnnlowp_utils.h"
#include "caffe2/quantization/server/conv_pool_dnnlowp_op_base.h"
#include "caffe2/quantization/server/dnnlowp.h"
#include "caffe2/quantization/server/op_wrapper.h"
namespace caffe2 {
using ConvFp32Op = ConvOp<float, CPUContext>;
// Convolutional layer computed in integer with quantization
template <typename T, bool ReluFused = false>
class ConvDNNLowPOp : public ConvPoolDNNLowPOpBase<T, ConvFp32Op> {
public:
USE_CONV_POOL_BASE_FUNCTIONS(CPUContext);
USE_CONV_POOL_DNNLOWP_OPERATOR_BASE_FUNCTIONS(T, ConvFp32Op);
ConvDNNLowPOp(const OperatorDef& operator_def, Workspace* ws);
~ConvDNNLowPOp() override;
protected:
bool RunOnDeviceWithOrderNCHW() override;
bool RunOnDeviceWithOrderNHWC() override;
bool GetQuantizationParameters_();
/**
* @return true if convolution is basically a GEMM point-wise (e.g., 1x1)
* convolution, no stride/dilation/pad
*/
bool IsConvGEMM_() const;
bool NoIm2ColNHWC_();
int KernelDim_();
const T* Im2ColNHWC_(Tensor* col_buffer);
dnnlowp::TensorQuantizationParams& FilterQuantizationParams(int group_id);
dnnlowp::RequantizationParams& RequantizationParams(int group_id);
static void PartitionGroupedNHWCConv_(
int* group_begin,
int* group_end,
int* i_begin,
int* i_end,
int num_groups,
int m,
int nthreads,
int thread_id);
virtual bool Acc16() const {
return false;
}
Tensor col_buffer_{CPU};
Tensor img_shape_device_{CPU};
Tensor col_buffer_shape_device_{CPU};
// Input: X, W, b
// Output: Y
INPUT_TAGS(INPUT, FILTER, BIAS);
// x86 only provides SIMD instructions that multiply a signed integer with an
// unsigned integer. We use signed for weights.
using T_signed = typename std::make_signed<T>::type;
// used in slow path for T != uint8_t
std::vector<T_signed> W_quantized_;
// pre-computed biases and offsets
std::shared_ptr<std::vector<std::int32_t>> column_offsets_;
std::vector<std::int32_t> row_offsets_;
const std::int32_t* b_quantized_data_{nullptr};
std::vector<std::uint8_t> X_pack_buf_;
void RunOnDeviceEpilogueNCHW_(
const T* col_buffer_data,
std::int32_t* Y_int32,
T* Y_data,
std::size_t i_offset,
int group_id);
void RunOnDeviceEpilogueNHWC_(
const T* col_buffer_data,
std::int32_t* Y_int32);
std::vector<std::int32_t> Y_int32_;
std::vector<dnnlowp::TensorQuantizationParams> filter_qparams_;
std::vector<std::int32_t> filter_zero_points_;
std::vector<float> requantization_multipliers_;
bool quantize_groupwise_;
private:
void QuantizeWeight_();
void PreComputeRowColumnOffsets_();
void QuantizeBias_();
bool TakeDepthWise3x3FastPath_();
bool TakeDepthWise3x3x3FastPath_();
bool TakeGConvFastPath_();
template <typename PackAMatrix, fbgemm::QuantizationGranularity Q_GRAN>
void DispatchFBGEMM_(
PackAMatrix& packA,
vector<std::int32_t>* Y_int32,
uint8_t* Y_uint8_data);
void ConvNHWCCore_(const T* col_buffer_data, vector<std::int32_t>* Y_int32);
fbgemm::conv_param_t<> GetConvParam_();
fbgemm::conv_param_t<3> GetConv3DParam_();
std::vector<dnnlowp::RequantizationParams> requantization_params_;
// used in fast path for T == uint8_t
std::shared_ptr<fbgemm::PackBMatrix<std::int8_t>> Wq_packed_;
// For depthwise conv
std::shared_ptr<fbgemm::PackedDepthWiseConvMatrix> Wq_depthwise_packed_;
// For small gconv
std::shared_ptr<fbgemm::PackWeightMatrixForGConv<std::int8_t>>
Wq_gconv_packed_;
std::shared_ptr<
fbgemm::PackWeightMatrixForGConv<std::int8_t, std::int32_t, 3>>
Wq_gconv3d_packed_;
// pre-computed biases and offsets
std::shared_ptr<std::vector<std::int32_t>> b_quantized_;
float in_qparams_scale_old_{0};
std::int32_t in_qparams_zero_point_old_{0};
}; // class ConvDNNLowPOp
} // namespace caffe2
| 4,060
| 28.427536
| 79
|
h
|
null |
pytorch-main/caffe2/quantization/server/conv_pool_dnnlowp_op_base.h
|
#pragma once
#ifdef _OPENMP
#include <omp.h>
#endif
#include "caffe2/core/tensor_int8.h"
#include "caffe2/operators/conv_op_shared.h"
#include "caffe2/operators/conv_pool_op_base.h"
#include "caffe2/quantization/server/fbgemm_pack_blob.h"
#include "caffe2/quantization/server/op_wrapper.h"
#ifdef _OPENMP
C10_DECLARE_int(caffe2_omp_num_threads);
#endif
C10_DECLARE_bool(caffe2_dnnlowp_shared_int32_buffer);
C10_DECLARE_bool(caffe2_force_shared_col_buffer);
namespace caffe2 {
// TODO: code duplication with dnnlowp_op.h
template <typename T, typename FP32_OP>
class ConvPoolDNNLowPOpBase : public ConvPoolOpBase<CPUContext> {
static_assert(std::is_integral<T>::value, "Integral required.");
public:
USE_CONV_POOL_BASE_FUNCTIONS(CPUContext);
ConvPoolDNNLowPOpBase(const OperatorDef& operator_def, Workspace* ws)
: ConvPoolOpBase<CPUContext>(operator_def, ws),
in_qparams_(InputSize()),
qfactory_(dnnlowp::GetQuantizationFactoryOf(this)) {
#ifdef _OPENMP
if (FLAGS_caffe2_omp_num_threads > 0) {
omp_set_num_threads(FLAGS_caffe2_omp_num_threads);
}
#endif
if (this->debug_def().engine() == "DNNLOWP_16" ||
this->debug_def().engine() == "DNNLOWP_ROWWISE_16") {
LOG(WARNING)
<< this->debug_def().engine()
<< " is an experimental feature mostly for testing accuracy with "
"fixed-point precision higher than 8 and performance is very slow";
}
}
virtual ~ConvPoolDNNLowPOpBase() {
if (measure_quantization_error_) {
dnnlowp::ReportQuantizationError(this, quantization_error_stats_);
LOG(WARNING) << this->debug_def().output(0) << " with type "
<< this->debug_def().type() << " has output qparams : "
<< "scale " << out_qparams_.scale << " offset "
<< out_qparams_.zero_point << "; ";
}
}
protected:
const TensorCPU& InputTensorCPU_(int idx) {
if (InputIsType<int8::Int8TensorCPU>(idx)) {
return this->Input<int8::Int8TensorCPU>(idx).t;
} else if (InputIsType<Int8ConvDNNLowPPackedWeightBlob>(idx)) {
return this->Input<Int8ConvDNNLowPPackedWeightBlob>(idx).original_tensor;
} else {
return Input(idx);
}
}
TensorCPU* OutputTensorCPU_(int idx) {
return &Outputs()[idx]->template GetMutable<int8::Int8TensorCPU>()->t;
}
Tensor* OutputTensorCPU_(int idx, at::IntArrayRef dims, at::TensorOptions options) {
auto* t = &Outputs()[idx]->template GetMutable<int8::Int8TensorCPU>()->t;
ReinitializeTensor(t, dims, options.device(CPU));
return t;
}
T* GetQuantizedOutputData_() {
return OutputTensorCPU_(0)->template mutable_data<T>();
}
void MeasureQuantizationError_() {
if (!measure_quantization_error_ || !Fp32Op_()) {
return;
}
const float* actual = nullptr;
vector<float> actual_temp;
if (OutputTensorCPU_(0)->template IsType<float>()) {
actual = OutputTensorCPU_(0)->template data<float>();
} else {
actual_temp.resize(OutputTensorCPU_(0)->numel());
fbgemm::Dequantize<T>(
OutputTensorCPU_(0)->template data<T>(),
actual_temp.data(),
OutputTensorCPU_(0)->numel(),
out_qparams_);
actual = actual_temp.data();
}
TensorCPU* float_tensor = Fp32Op_()->Get()->Output(0);
float* ref = float_tensor->template mutable_data<float>();
if (followed_by_ == "Relu" || debug_def().type() == "ConvRelu" ||
debug_def().type() == "Int8ConvRelu") {
for (int i = 0; i < OutputTensorCPU_(0)->numel(); ++i) {
ref[i] = std::max(0.f, ref[i]);
}
}
dnnlowp::MeasureQuantizationError(
actual, ref, OutputTensorCPU_(0)->numel(), &quantization_error_stats_);
}
void RunOnDeviceEpilogue_() {
dnnlowp::PropagateOutputTensorQuantizationParams(this, 0, out_qparams_);
MeasureQuantizationError_();
}
void ParseDNNLowPOperatorArguments_() {
if (!arguments_parsed_) {
bool dequantize_output;
dnnlowp::ParseDNNLowPOperatorArguments(
this,
&dequantize_output,
&measure_quantization_error_,
&followed_by_);
CAFFE_ENFORCE_EQ(
dequantize_output,
false,
"Conv DNNLOWP operators don't support dequantize_output");
arguments_parsed_ = true;
}
}
void GetOutputQuantizationParams_() {
using namespace dnnlowp;
ParseDNNLowPOperatorArguments_();
if (HasStaticQuantization(this)) {
out_qparams_ = GetStaticQuantizationParamsOf(this, 0);
if (measure_quantization_error_) {
// To measure quantization error, run ref fp32 impl.
// This doesn't really belong here but we need to run the reference fp32
// implementation before quantized computation of some inplace operators
// will overwrite their inputs.
Fp32Op_()->DequantizeInput();
Fp32Op_()->Get()->RunOnDevice();
}
} else {
// TODO: this is only needed when dequantize_output_ == false but leave
// as it is now because some code relies on out_qparams_ initialized even
// though it never actually uses it.
Fp32Op_()->DequantizeInput();
Fp32Op_()->Get()->RunOnDevice();
out_qparams_ = Fp32Op_()->GetOutputQuantizationParams(qfactory_.get());
}
}
OpWrapper<FP32_OP, T>* Fp32Op_() {
if (!fp32_op_) {
fp32_op_.reset(new OpWrapper<FP32_OP, T>(this, qfactory_.get()));
}
return fp32_op_.get();
}
void CreateSharedInt32Buffer_() {
auto* mutexPtr =
ws_->CreateBlob("__CAFFE2_DNNLOWP_SHARED_INT32_BUFFER_CPU_MUTEX__")
->GetMutable<std::unique_ptr<std::mutex>>();
mutexPtr->reset(new std::mutex());
ws_->CreateBlob("__CAFFE2_DNNLOWP_SHARED_INT32_BUFFER_CPU__");
}
void RunWithSharedBuffer_(
Tensor* col_buffer,
vector<int32_t>* Y_int32,
std::function<
void(Tensor* col_buffer_shared, vector<int32_t>* Y_int32_shared)> f) {
auto f2 = [this, Y_int32, f](Tensor* col_buffer_shared) {
if (FLAGS_caffe2_dnnlowp_shared_int32_buffer) {
auto* mutexBlob =
ws_->GetBlob("__CAFFE2_DNNLOWP_SHARED_INT32_BUFFER_CPU_MUTEX__");
CAFFE_ENFORCE(mutexBlob, "Must call CreateSharedInt32Buffer() first");
auto* mutexPtr = mutexBlob->GetMutable<std::unique_ptr<std::mutex>>();
std::lock_guard<std::mutex> g(**mutexPtr);
auto* Y_int32_shared =
ws_->GetBlob("__CAFFE2_DNNLOWP_SHARED_INT32_BUFFER_CPU__")
->template GetMutable<vector<int32_t>>();
f(col_buffer_shared, Y_int32_shared);
} else {
f(col_buffer_shared, Y_int32);
}
};
if (FLAGS_caffe2_force_shared_col_buffer || this->shared_buffer_) {
runWithSharedBuffer<CPUContext>(this->ws_, f2);
} else {
f2(col_buffer);
}
}
bool measure_quantization_error_{false};
std::string followed_by_;
std::vector<dnnlowp::TensorQuantizationParams> in_qparams_;
dnnlowp::TensorQuantizationParams out_qparams_;
std::unique_ptr<OpWrapper<FP32_OP, T>> fp32_op_;
std::unique_ptr<dnnlowp::QuantizationFactory> qfactory_;
std::vector<T> out_temp_;
// Buffer to store quantized output temporarily
// when we output dequantized values.
dnnlowp::QuantizationErrorStats quantization_error_stats_;
bool arguments_parsed_{false};
};
#define USE_CONV_POOL_DNNLOWP_OPERATOR_BASE_FUNCTIONS(T, FP32_OP) \
/* using override */ using BaseType = ConvPoolDNNLowPOpBase<T, FP32_OP>; \
/* using override */ using BaseType::GetOutputQuantizationParams_; \
/* using override */ using BaseType::GetQuantizedOutputData_; \
/* using override */ using BaseType::Fp32Op_; \
/* using override */ using BaseType::InputTensorCPU_; \
/* using override */ using BaseType::MeasureQuantizationError_; \
/* using override */ using BaseType::OutputTensorCPU_; \
/* using override */ using BaseType::RunOnDeviceEpilogue_; \
/* using override */ using BaseType::followed_by_; \
/* using override */ using BaseType::in_qparams_; \
/* using override */ using BaseType::measure_quantization_error_; \
/* using override */ using BaseType::out_qparams_; \
/* using override */ using BaseType::qfactory_;
} // namespace caffe2
| 8,443
| 34.037344
| 86
|
h
|
null |
pytorch-main/caffe2/quantization/server/conv_relu_op.h
|
#pragma once
#include "caffe2/operators/conv_op.h"
#include "caffe2/operators/conv_pool_op_base.h"
namespace caffe2 {
template <typename T, class Context>
class ConvReluOp final : public ConvPoolOpBase<Context> {
public:
ConvReluOp(const OperatorDef& operator_def, Workspace* ws)
: ConvPoolOpBase<Context>(operator_def, ws) {
for (auto name : operator_def.input()) {
local_input_blobs_.push_back(local_ws_.CreateBlob(name));
TORCH_CHECK_NOTNULL(local_input_blobs_.back());
}
local_op_.reset(new ConvOp<T, Context>(operator_def, &local_ws_));
for (auto name : operator_def.output()) {
local_output_blobs_.push_back(local_ws_.GetBlob(name));
TORCH_CHECK_NOTNULL(local_output_blobs_.back());
}
}
~ConvReluOp() override {}
bool RunOnDeviceWithOrderNCHW() override;
bool RunOnDeviceWithOrderNHWC() override;
private:
Workspace local_ws_;
std::vector<Blob*> local_input_blobs_;
std::vector<Blob*> local_output_blobs_;
std::unique_ptr<ConvOp<T, Context>> local_op_;
}; // class ConvReluOp
} // namespace caffe2
| 1,080
| 29.027778
| 70
|
h
|
null |
pytorch-main/caffe2/quantization/server/dnnlowp.h
|
#pragma once
#include <algorithm>
#include <cassert>
#include <cmath>
#include <cstdint>
#include <limits>
#ifdef __x86_64__
#include <immintrin.h>
#endif
#include <fbgemm/QuantUtils.h>
#include "caffe2/quantization/server/dynamic_histogram.h"
#include "caffe2/utils/cpuid.h"
namespace dnnlowp {
using fbgemm::RequantizationParams;
using fbgemm::TensorQuantizationParams;
// Represents a quantization scheme that provides quantization parameter based
// on distribution of data to be quantized.
class QuantizationFactory {
public:
enum QuantizationKind {
// A simple quantization scheme that determines quantization parameter by
// just looking at min/max.
MIN_MAX_QUANTIZATION,
// Minimizes L2 norm of quantization error
L2_MIN_QUANTIZATION,
// fast search to remove histogram outliers and approximate L2 min
L2_MIN_QUANTIZATION_APPROX,
// Minimizes Kullback-Leibler divergence
KL_MIN_QUANTIZATION,
// Take 99 percentail (only works with sparsity preserving quantization)
P99_QUANTIZATION,
L1_MIN_QUANTIZATION,
};
/// Get the default factory whose policy is determined by gflags
static QuantizationFactory* GetDefaultInstance();
/// Choose quantization scale and zero_point that maps
/// floating-point range [min, max] to the integer range of the specified
/// precision
TensorQuantizationParams ChooseQuantizationParams(
float min,
float max,
int precision,
bool preserve_sparsity,
bool is_signed = false) const {
TensorQuantizationParams qparams = fbgemm::ChooseQuantizationParams(
min,
max,
is_signed ? -(1 << (precision - 1)) : 0,
is_signed ? ((1 << (precision - 1)) - 1) : (1 << precision) - 1,
preserve_sparsity,
force_scale_power_of_two_);
qparams.precision = precision;
return qparams;
}
/// Choose quantization scale and zero_point that maps
/// floating-point range [min, max] to the default integer range of
/// this quantization factory
TensorQuantizationParams
ChooseQuantizationParams(float min, float max, bool is_weight = false) const {
return ChooseQuantizationParams(
min,
max,
is_weight ? GetWeightPrecision() : GetActivationPrecision(),
is_weight ? GetPreserveWeightSparsity()
: GetPreserveActivationSparsity());
}
/// Choose quantization based on the values in an array to optimize the
/// quantization errors ignoring a few outliers
TensorQuantizationParams ChooseQuantizationParams(
const float* values,
int len,
QuantizationKind kind,
int precision,
bool preserve_sparsity) const;
TensorQuantizationParams ChooseQuantizationParams(
const float* values,
int len,
bool is_weight = false) const;
/// Choose quantization based on histogram of values to optimize the
/// quantization errors ignoring a few outliers
TensorQuantizationParams ChooseQuantizationParams(
const Histogram& hist,
QuantizationKind kind,
int precision,
bool preserve_sparsity,
bool is_weight = false) const;
TensorQuantizationParams ChooseQuantizationParams(
const Histogram& hist,
bool is_weight = false) const;
// Given a real_multiplier, produces a pair (quantized_multiplier,
// right_shift) where quantized_multiplier is an int32 representing a
// fixed-point value (in practice we only produce positive values) and
// right_shift is an amount to shift right by, so that the floating-point
// multiplication of some int32 input value by real_multiplier,
//
// return static_cast<int32>(int32_value * real_multiplier);
//
// is best approximated by the integer-arithmetic-only code
//
// return RoundingRightShift(
// Multiplication(int32_value, quantized_multiplier),
// right_shift);
//
// Note: all this code only needs to run offline to generate the quantized
// neural network workload, not at runtime on the device on which quantized
// neural networks need to run. So it's not performance-critical at all.
RequantizationParams ChooseRequantizationMultiplier(
float real_multiplier,
TensorQuantizationParams target_qparams) const;
int GetActivationPrecision() const {
return activation_precision_;
}
int GetWeightPrecision() const {
return weight_precision_;
}
int GetEltwiseQuantizePrecision() const {
return eltwise_quantize_precision_;
}
bool GetPreserveActivationSparsity() const {
return preserve_activation_sparsity_;
}
bool GetPreserveWeightSparsity() const {
return preserve_weight_sparsity_;
}
QuantizationKind GetActivationKind() const {
return activation_kind_;
}
QuantizationKind GetWeightKind() const {
return weight_kind_;
}
void SetWeightP99Threshold(float threshold) {
weight_p99_threshold_ = threshold;
}
void SetActivationP99Threshold(float threshold) {
activation_p99_threshold_ = threshold;
}
explicit QuantizationFactory(
int activation_precision = 8,
// precision used for activations in main operations like matmul
int weight_precision = 8, // precision used for weights
int requantization_multiplier_precision = 32,
// precision used for the requantization multiplier
int eltwise_quantize_precision = 16,
// precision used for element-wise addition
bool preserve_activation_sparsity = false,
// preserve zeros in quantization
bool preserve_weight_sparsity = false,
// preserve zeros in quantization
bool force_scale_power_of_two = false,
// restrict scaling to a power of two
QuantizationKind activation_kind = MIN_MAX_QUANTIZATION,
QuantizationKind weight_kind = MIN_MAX_QUANTIZATION,
float weight_p99_threshold = 0.99,
// P99 percentage to select out from the full histogram for weights
float activation_p99_threshold = 0.99
// P99 percentage to select out from the full histogram for activations
);
private:
int activation_precision_;
int weight_precision_;
int requantization_multiplier_precision_;
int eltwise_quantize_precision_;
bool preserve_activation_sparsity_;
bool preserve_weight_sparsity_;
bool force_scale_power_of_two_;
QuantizationKind activation_kind_, weight_kind_;
float weight_p99_threshold_;
float activation_p99_threshold_;
}; // class QuantizationFactory
/**
* Parse a string to QuantizationKind
*/
QuantizationFactory::QuantizationKind StringToKind(const std::string& s);
std::vector<float>
adjust_hist_to_include_zero(const Histogram& hist, float* min, float* max);
} // namespace dnnlowp
| 6,678
| 31.580488
| 80
|
h
|
null |
pytorch-main/caffe2/quantization/server/dnnlowp_op.h
|
#pragma once
#ifdef _OPENMP
#include <omp.h>
#endif
#include "caffe2/core/operator.h"
#include "caffe2/core/tensor_int8.h"
#include "caffe2/quantization/server/caffe2_dnnlowp_utils.h"
#include "caffe2/quantization/server/dnnlowp.h"
#include "caffe2/quantization/server/fbgemm_pack_blob.h"
#include "caffe2/quantization/server/op_wrapper.h"
#include "caffe2/quantization/server/sigmoid.h"
#include "caffe2/quantization/server/tanh.h"
#ifdef _OPENMP
C10_DECLARE_int(caffe2_omp_num_threads);
#endif
namespace caffe2 {
/**
* @brief A convenient base class for C2 operators with DNNLOWP engine.
* DNNLOWP ops give flexibility on the type of input/output blobs.
* For example, some inputs can be the usual fp32 tensor and they will be
* quantized before doing actual computation.
* Otherwise, the inputs should be pre-quantized Int8TensorCPU.
* A few constraints: when the weight is pre-quantized if and only if the
* bias is also pre-quantized.
*
* static quantization vs. dynamic quantization
* When Y_scale and Y_zero_point (optional with default = 0) arg is set,
* and dequantize_output is false, we do static quantization, meaning
* we're using the same pre-computed scale and zero_point for the output
* activation tensor.
* Otherwise, we do dynamic quantization by looking at the min/max of
* output activation tensor for each batch.
* Y_scale and Y_zero_point arguments are used for static quantization.
* scale and zero_point of Int8TensorCPU is used for carrying
* quantization information across operators both in static and dynamic
* quantization. This means scale and zero_point of Int8TensorCPU is
* valid only for the current batch and will be reset in the next batch
* when dynamic quantization is used.
*
* C2 operators with DNNLOWP engine have the following arguments:
* - dequantize_output (default=false): when true, output is dequantized
* as fp32. Useful when we're only quantizing individual operators
* rather than doing end-to-end quantization. Conv operators don't
support dequantize_output option as an exception because doing so
complicate the implementation significantly and having a separate
Dequantize operator doesn't add much overhead because Conv ops are
usually used in deep networks where regions of quantization are
long chains.
* - followed_by (default=null): can be relu, sigmoid, or tanh. When
* specified, the current operator is only followed by relu, sigmoid,
* or tanh, and this fact can be used for more accurate output
* quantization.
* - measure_quantization_error (default=false): when true, L2 error
* with respect to the baseline C2 operator in fp32 is reported.
* WARNING: turning this option will make performance very slow and
* this option is intended for debugging accuracy issues.
*
* For the following quantization method related options, please refer
* to caffe2/quantization/server/dnnlowp.cc for more details.
*
* - activation_quantization_precision (default=8)
* - weight_quantization_precision (default=8)
* - requantization_multiplier_precision (default=32)
* - eltwise_quantization_precision (default=16)
* - force_scale_power_of_two (default=0)
* - preserve_activation_sparsity (default=0)
* - preserve_weight_sparsity (default=0)
* - activation_quantization_kind (default=min_max)
* - weight_quantization_kind (default=min_max)
*/
template <typename T, typename FP32_OP>
class DNNLowPOp : public Operator<CPUContext> {
static_assert(std::is_integral<T>::value, "Integral required.");
public:
USE_OPERATOR_FUNCTIONS(CPUContext);
DNNLowPOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<CPUContext>(operator_def, ws),
in_qparams_(InputSize()),
qfactory_(dnnlowp::GetQuantizationFactoryOf(this)) {
#ifdef _OPENMP
if (FLAGS_caffe2_omp_num_threads > 0) {
omp_set_num_threads(FLAGS_caffe2_omp_num_threads);
}
#endif
if (this->debug_def().engine() == "DNNLOWP_16" ||
this->debug_def().engine() == "DNNLOWP_ROWWISE_16") {
LOG(WARNING)
<< this->debug_def().engine()
<< " is an experimental feature mostly for testing accuracy with "
"fixed-point precision higher than 8 and performance is very slow";
}
}
virtual ~DNNLowPOp() {
if (measure_quantization_error_) {
dnnlowp::ReportQuantizationError(this, quantization_error_stats_);
}
}
protected:
const TensorCPU& InputTensorCPU_(int idx) {
if (InputIsType<int8::Int8TensorCPU>(idx)) {
return this->Input<int8::Int8TensorCPU>(idx).t;
} else if (InputIsType<Int8FCDNNLowPPackedWeightBlob>(idx)) {
return this->Input<Int8FCDNNLowPPackedWeightBlob>(idx).original_tensor;
} else {
return Input(idx);
}
}
TensorCPU* OutputTensorCPU_(int idx) {
if (dequantize_output_) {
return Output(idx);
} else {
return &Outputs()[idx]->template GetMutable<int8::Int8TensorCPU>()->t;
}
}
Tensor*
OutputTensorCPU_(int idx, at::IntArrayRef dims, at::TensorOptions options) {
if (dequantize_output_) {
return Output(idx, dims, options.device(CPU));
} else {
auto* t = &Outputs()[idx]->template GetMutable<int8::Int8TensorCPU>()->t;
ReinitializeTensor(t, dims, options.device(CPU));
return t;
}
}
T* GetQuantizedOutputData_() {
if (dequantize_output_) {
out_temp_.resize(Output(0)->numel());
return out_temp_.data();
} else {
return OutputTensorCPU_(0)->template mutable_data<T>();
}
}
void MeasureQuantizationError_() {
if (!measure_quantization_error_ || !Fp32Op_()) {
return;
}
const float* actual = nullptr;
std::vector<float> actual_temp;
if (OutputTensorCPU_(0)->template IsType<float>()) {
actual = OutputTensorCPU_(0)->template data<float>();
std::string op_type = this->debug_def().type();
bool relu_fused = op_type.length() >= 4 &&
op_type.compare(op_type.length() - 4, 4, "Relu") == 0;
if (GetSingleArgument<std::string>("followed_by", "") == "Relu" &&
!relu_fused) {
// If dequantize_output_ is true and relu is not fused,
// dnnlowp op won't clip negative values. Do it here.
actual_temp.resize(OutputTensorCPU_(0)->numel());
for (int i = 0; i < Output(0)->numel(); ++i) {
actual_temp[i] = std::max(0.f, actual[i]);
}
actual = actual_temp.data();
}
} else {
actual_temp.resize(OutputTensorCPU_(0)->numel());
fbgemm::Dequantize<T>(
OutputTensorCPU_(0)->template data<T>(),
actual_temp.data(),
OutputTensorCPU_(0)->numel(),
out_qparams_);
actual = actual_temp.data();
}
float* ref = Fp32Op_()->Get()->Output(0)->template mutable_data<float>();
if (followed_by_ == "Relu") {
for (int i = 0; i < OutputTensorCPU_(0)->numel(); ++i) {
ref[i] = std::max(0.f, ref[i]);
}
}
dnnlowp::MeasureQuantizationError(
actual, ref, OutputTensorCPU_(0)->numel(), &quantization_error_stats_);
}
void RunOnDeviceEpilogue_() {
if (dequantize_output_) {
fbgemm::Dequantize<T>(
out_temp_.data(),
OutputTensorCPU_(0)->template mutable_data<float>(),
OutputTensorCPU_(0)->numel(),
out_qparams_);
} else {
dnnlowp::PropagateOutputTensorQuantizationParams(this, 0, out_qparams_);
}
MeasureQuantizationError_();
}
void ParseDNNLowPOperatorArguments_() {
// Ideally, this should be done in constructor but any modification of
// arguments in ParseDNNLowPOperatorArguments will be ignored if we call
// this from constructor.
// Make sure all derived classes call this "early enough" so that they
// use correct parameters.
if (!arguments_parsed_) {
dnnlowp::ParseDNNLowPOperatorArguments(
this,
&dequantize_output_,
&measure_quantization_error_,
&followed_by_);
arguments_parsed_ = true;
}
}
void GetOutputQuantizationParams_(
dnnlowp::TensorQuantizationParams* out_qparams_overwrite = nullptr) {
using namespace dnnlowp;
ParseDNNLowPOperatorArguments_();
if (HasStaticQuantization(this)) {
if (out_qparams_overwrite != nullptr) {
out_qparams_ = *out_qparams_overwrite;
} else {
out_qparams_ = GetStaticQuantizationParamsOf(this, 0);
}
if (measure_quantization_error_) {
// To measure quantization error, run ref fp32 impl.
// This doesn't really belong here but we need to run the reference fp32
// implementation before quantized computation of some inplace operators
// will overwrite their inputs.
Fp32Op_()->DequantizeInput();
Fp32Op_()->Get()->RunOnDevice();
}
} else {
// TODO: this is only needed when dequantize_output_ == false but leave
// as it is now because some code relies on out_qparams_ initialized even
// though it never actually uses it.
Fp32Op_()->DequantizeInput();
Fp32Op_()->Get()->RunOnDevice();
if (out_qparams_overwrite != nullptr) {
out_qparams_ = *out_qparams_overwrite;
} else {
out_qparams_ = Fp32Op_()->GetOutputQuantizationParams(qfactory_.get());
}
}
}
OpWrapper<FP32_OP, T>* Fp32Op_() {
if (!fp32_op_) {
fp32_op_.reset(new OpWrapper<FP32_OP, T>(this, qfactory_.get()));
}
return fp32_op_.get();
}
bool dequantize_output_{false}, measure_quantization_error_{false};
std::string followed_by_;
std::vector<dnnlowp::TensorQuantizationParams> in_qparams_;
dnnlowp::TensorQuantizationParams out_qparams_;
std::unique_ptr<OpWrapper<FP32_OP, T>> fp32_op_;
std::unique_ptr<dnnlowp::QuantizationFactory> qfactory_;
std::vector<T> out_temp_;
// Buffer to store quantized output temporarily
// when we output dequantized values.
dnnlowp::QuantizationErrorStats quantization_error_stats_;
bool arguments_parsed_{false};
};
#define USE_DNNLOWP_OPERATOR_BASE_FUNCTIONS(T, FP32_OP) \
/* using override */ using BaseType = DNNLowPOp<T, FP32_OP>; \
/* using override */ using BaseType::GetOutputQuantizationParams_; \
/* using override */ using BaseType::GetQuantizedOutputData_; \
/* using override */ using BaseType::Fp32Op_; \
/* using override */ using BaseType::InputTensorCPU_; \
/* using override */ using BaseType::MeasureQuantizationError_; \
/* using override */ using BaseType::OutputTensorCPU_; \
/* using override */ using BaseType::RunOnDeviceEpilogue_; \
/* using override */ using BaseType::dequantize_output_; \
/* using override */ using BaseType::followed_by_; \
/* using override */ using BaseType::in_qparams_; \
/* using override */ using BaseType::measure_quantization_error_; \
/* using override */ using BaseType::out_qparams_; \
/* using override */ using BaseType::qfactory_;
inline int dnnlowp_get_num_threads() {
#ifdef _OPENMP
return omp_get_num_threads();
#else
return 1;
#endif
}
inline int dnnlowp_get_max_threads() {
#ifdef _OPENMP
return omp_get_max_threads();
#else
return 1;
#endif
}
inline int dnnlowp_get_thread_num() {
#ifdef _OPENMP
return omp_get_thread_num();
#else
return 0;
#endif
}
} // namespace caffe2
| 11,769
| 35.896552
| 80
|
h
|
null |
pytorch-main/caffe2/quantization/server/dnnlowp_partition.h
|
#pragma once
#include <algorithm>
#include <cstddef>
#include <utility>
namespace caffe2 {
std::pair<size_t, size_t>
Get1DPartition(size_t work, int nthreads, int tid, int work_align = 1);
/**
* 1D-partition m x n 2D work.
* First try partitioning m if m >= nthreads.
* Otherwise, each row is partitioned by multiple threads.
* In this case, each thread only works on a single row.
* Optionally, we can force the number of columns assigned per thread is a
* multiple of n_align.
*/
void Get1DPartitionOf2D(
int m,
int n,
int nthreads,
int thread_id,
int* m_begin,
int* m_end,
int* n_begin,
int* n_end,
int n_align = 1);
} // namespace caffe2
| 690
| 20.59375
| 74
|
h
|
null |
pytorch-main/caffe2/quantization/server/dynamic_histogram.h
|
#pragma once
#include <memory>
#include <vector>
namespace dnnlowp {
/**
* bin_width = (max - min)/nbins
* ith bin (zero-based indexing) contains [i*bin_width, (i+1)*bin_width)
* with an exception that (nbins - 1)th bin contains
* [(nbins-1)*bin_width, nbins*bin_width]
*
*/
class Histogram {
public:
Histogram(int nbins, float min, float max)
: min_(min), max_(max), histogram_(nbins) {}
Histogram(float min, float max, const std::vector<uint64_t>& bins)
: min_(min), max_(max), histogram_(bins) {}
void Add(float f, uint64_t cnt = 1);
/**
* This version collects histogram with single thread
*/
void Add(const float* f, int len);
float Min() const {
return min_;
}
float Max() const {
return max_;
}
const std::vector<uint64_t>* GetHistogram() const {
return &histogram_;
}
private:
float min_, max_;
std::vector<uint64_t> histogram_;
};
/// An equi-width histogram where the spread of bins change over time when
/// we see new min or max values.
class DynamicHistogram {
public:
DynamicHistogram(int nbins);
void Add(float f);
void Add(const float* f, int len);
/// Indicate we're not dynamically adjusting histogram bins any more and
/// return the current static histogram.
const Histogram* Finalize();
private:
/// Dynamic histogram is implemented by the series of static histograms
/// and expands from the old histogram to new histogram when
/// we see a new extremum.
/// An invariant: the beginning of the first bin of histograms_[i] exactly
/// matches with the beginning of a bin in histograms_[i+1]. The end of the
/// last bin of histograms_[i] exactly matches with the end of a bin in
/// histograms_[i+1].
std::unique_ptr<Histogram> histogram_;
int nbins_;
float min_, max_;
std::unique_ptr<Histogram> final_histogram_;
}; // class DynamicHistogram
} // namespace dnnlowp
| 1,901
| 25.054795
| 77
|
h
|
null |
pytorch-main/caffe2/quantization/server/elementwise_dnnlowp_op.h
|
#pragma once
#include "caffe2/core/tensor_int8.h"
#include "caffe2/operators/elementwise_ops.h"
#include "caffe2/quantization/server/caffe2_dnnlowp_utils.h"
#include "caffe2/quantization/server/dnnlowp_op.h"
#include "caffe2/quantization/server/sigmoid.h"
namespace caffe2 {
template <typename T, class Functor>
class UnaryElementwiseWithArgsDNNLowPOp : public Operator<CPUContext> {
public:
USE_OPERATOR_FUNCTIONS(CPUContext);
UnaryElementwiseWithArgsDNNLowPOp(
const OperatorDef& operator_def,
Workspace* ws)
: Operator<CPUContext>(operator_def, ws), functor_() {}
bool RunOnDevice() override {
if (!arguments_parsed_) {
dnnlowp::ParseDNNLowPOperatorArguments(this);
dnnlowp::SetStaticQuantizationParams(
this, 0, functor_.GetOutputQuantizationParams());
arguments_parsed_ = true;
}
auto& input = this->template Input<int8::Int8TensorCPU>(0).t;
auto& output = Outputs()[0]->template GetMutable<int8::Int8TensorCPU>()->t;
output.ResizeLike(input);
functor_(
input.size(),
input.template data<T>(),
output.template mutable_data<T>());
dnnlowp::PropagateOutputTensorQuantizationParams(
this, 0, functor_.GetOutputQuantizationParams());
return true;
}
private:
Functor functor_;
bool arguments_parsed_{false};
};
template <typename T, typename FP32_OP>
class BinaryElementwiseDNNLowPOp : public DNNLowPOp<T, FP32_OP> {
public:
USE_OPERATOR_FUNCTIONS(CPUContext);
BinaryElementwiseDNNLowPOp(const OperatorDef& operator_def, Workspace* ws)
: DNNLowPOp<T, FP32_OP>(operator_def, ws),
OP_SINGLE_ARG(bool, "broadcast", enable_broadcast_, 0),
OP_SINGLE_ARG(int, "axis", axis_, -1),
OP_SINGLE_ARG(string, "axis_str", axis_str_, ""),
OP_SINGLE_ARG(string, "order", order_, "NCHW") {
// Figure out the correct axis to use.
if (enable_broadcast_) {
if (axis_ != -1) {
// Get axis from an explicit axis argument.
CAFFE_ENFORCE_EQ(
axis_str_.size(),
0,
"Args axis and axis_str cannot be used simultaneously.");
} else if (axis_str_.size()) {
// Get the axis index semantically.
CAFFE_ENFORCE_EQ(
axis_str_.size(), 1, "Unsupported axis string", axis_str_);
size_t semantic_axis_ = order_.find(axis_str_);
CAFFE_ENFORCE_NE(
semantic_axis_,
string::npos,
"Unrecognizable axis string ",
axis_str_,
" from order string ",
order_);
axis_ = semantic_axis_;
}
} else {
CAFFE_ENFORCE(
axis_ == -1 && axis_str_.size() == 0,
"Do not specify axis or axis_str if broadcast is not enabled.");
}
}
protected:
bool enable_broadcast_;
int axis_;
string axis_str_;
string order_;
dnnlowp::RequantizationParams requantization_params_;
}; // BinaryElementwiseDNNLowPOp
// For arithmetic operators, Eigen provides a good way to vectorize even
// when broadcasting.
#define DECLARE_EIGEN_FUNCTOR(name, eigen_op, input_type, output_type) \
struct Eigen##name##Functor { \
template <int b_is_scalar, typename T, typename R> \
inline void Run(size_t n, const T* a, const T* b, R* out, CPUContext*) { \
if (b_is_scalar) { \
EigenVectorArrayMap<R>(out, n) = \
eigen_op((ConstEigenVectorArrayMap<T>(a, n)), (b[0])); \
} else { \
EigenVectorArrayMap<R>(out, n) = eigen_op( \
(ConstEigenVectorArrayMap<T>(a, n)), \
(ConstEigenVectorArrayMap<T>(b, n))); \
} \
} \
template <typename T, typename R> \
void RunWithBroadcast( \
const T* a, \
const T* b, \
R* out, \
size_t pre, \
size_t n, \
CPUContext*) { \
EigenArrayMap<R>(out, n, pre) = eigen_op( \
(ConstEigenArrayMap<T>(a, n, pre).colwise()), \
(ConstEigenVectorArrayMap<T>(b, n))); \
} \
template <typename T, typename R> \
void RunWithBroadcast2( \
const T* a, \
const T* b, \
R* out, \
size_t pre, \
size_t n, \
size_t post, \
CPUContext*) { \
for (const auto i : c10::irange(pre)) { \
EigenArrayMap<R>(out + i * n * post, post, n) = eigen_op( \
(ConstEigenArrayMap<T>(a + i * n * post, post, n).rowwise()), \
(Eigen::Map<const Eigen::Array<T, 1, Eigen::Dynamic>>(b, n))); \
} \
} \
};
} // namespace caffe2
| 6,252
| 44.311594
| 86
|
h
|
null |
pytorch-main/caffe2/quantization/server/elementwise_linear_dnnlowp_op.h
|
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "caffe2/operators/elementwise_linear_op.h"
#include "caffe2/quantization/server/dnnlowp_op.h"
namespace caffe2 {
using ElementwiseLinearFp32Op = ElementwiseLinearOp<float, CPUContext>;
template <typename T>
class ElementwiseLinearDNNLowPOp final
: public DNNLowPOp<T, ElementwiseLinearFp32Op> {
public:
ElementwiseLinearDNNLowPOp(const OperatorDef& operator_def, Workspace* ws);
bool RunOnDevice() override;
USE_OPERATOR_FUNCTIONS(CPUContext);
USE_DNNLOWP_OPERATOR_BASE_FUNCTIONS(T, ElementwiseLinearFp32Op);
private:
bool GetQuantizationParameters_();
int axis_;
dnnlowp::RequantizationParams requantization_params_;
std::vector<T> a_quantized_;
};
} // namespace caffe2
| 1,339
| 27.510638
| 77
|
h
|
null |
pytorch-main/caffe2/quantization/server/fb_fc_packed_op.h
|
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <fbgemm/FbgemmFP16.h>
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/operators/fully_connected_op.h"
#include "caffe2/utils/conversions.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
/**
* C2 wrapper for fp16 gemm
*
* Suppose your predict_net has an FC operator in fp32 as follows:
* op {
* input: "x"
* input: "w"
* input: "b"
* output: "y"
* type: "FC"
* }
* ...
* external_input: "w"
*
* To use FbFCPacked operator with fp16 fbgemm, in init_net
* ... # an operator that generates w
* op {
* input: "w"
* output: "w_packed"
* type: "FbGemmPack"
* }
* ...
* external_output: "w_packed"
*
* in predict_net:
* op {
* input: "x"
* input: "w_packed"
* input: "b"
* output: "y"
* type: "FbFCPacked"
* }
* ...
* external_input: "w_packed"
*/
template <
class Context,
class Engine = DefaultEngine,
typename T_W = fbgemm::float16>
class FbFCPackedOperator final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
FbFCPackedOperator(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
axis_(this->template GetSingleArgument<int32_t>("axis", 1)),
axis_w_(this->template GetSingleArgument<int32_t>("axis_w", 1)) {}
~FbFCPackedOperator() override {}
// template on X, B, and Y.
template <typename T_X, typename T_B, typename T_Y>
bool DoRunWithType() {
const auto& X = Input(0);
const auto& b = Input(2);
CAFFE_ENFORCE(b.dim() == 1, b.dim());
// batch size
const auto canonical_axis = X.canonical_axis_index(axis_);
const int M = X.size_to_dim(canonical_axis);
const int N = b.numel();
// Load the packed matrix
auto* W =
OperatorBase::Input<caffe2::unique_ptr<fbgemm::PackedGemmMatrixFP16>>(1)
.get();
const int K = W->numRows();
if (!W->packed()) {
if (!packed_w_) {
std::vector<float> src_mat(W->matSize());
for (int i = 0; i < W->matSize(); ++i) {
src_mat[i] =
fbgemm::cpu_half2float(W->pmat()[i]);
}
packed_w_ = std::make_unique<fbgemm::PackedGemmMatrixFP16>(
fbgemm::matrix_op_t::Transpose,
W->numRows(), W->numCols(),
1.0,
src_mat.data());
}
W = packed_w_.get();
}
auto dimErrorString = [&]() {
return c10::str(
"Dimension mismatch: ",
"X: ",
X.sizes(),
", W: ",
std::vector<int>({K, W->numCols()}),
", b: ",
b.sizes(),
", axis: ",
axis_,
", M: ",
M,
", N: ",
N,
", K: ",
K);
};
// Error checking
CAFFE_ENFORCE(M == X.numel() / K, dimErrorString());
CAFFE_ENFORCE(K == X.size_from_dim(canonical_axis), dimErrorString());
CAFFE_ENFORCE(N == W->numCols(), dimErrorString());
Y_shape_cache_ = X.sizes().vec();
// This is an invariant of canonical_axis, so we can DCHECK.
TORCH_DCHECK_LE(canonical_axis + 1, Y_shape_cache_.size());
Y_shape_cache_.resize(canonical_axis + 1);
Y_shape_cache_[canonical_axis] = N;
auto* Y = Output(0, Y_shape_cache_, at::dtype<T_Y>());
if (X.numel() == 0) {
// skip the rest of the computation if X is empty
Y->template mutable_data<T_Y>();
return true;
}
// Call the fp16 gemm interface
fbgemm::cblas_gemm_compute(
fbgemm::matrix_op_t::NoTranspose,
M,
X.template data<T_X>(),
*W,
0.f,
Y->template mutable_data<T_Y>());
// Add bias term, accumulation is still in fp32.
TensorProto::DataType math_type = TensorProto_DataType_FLOAT;
if (bias_multiplier_.numel() != M) {
// If the helper bias multiplier is not M, reshape and fill it with one.
bias_multiplier_.Resize(M);
math::Set<T_B, Context>(
M,
convert::To<float, T_B>(1),
bias_multiplier_.template mutable_data<T_B>(),
&context_);
}
math::Gemm<T_B, Context, Engine>(
CblasNoTrans,
CblasNoTrans,
M,
N,
1,
1,
bias_multiplier_.template data<T_B>(),
b.template data<T_B>(),
1,
Y->template mutable_data<T_Y>(),
&context_,
math_type);
return true;
}
bool RunOnDevice() override {
return DoRunWithType<
float, // X
float, // B
float>(); // Y
}
protected:
size_t axis_{1};
size_t axis_w_{1};
// A local vector to cache the output shape so we don't need to recreate
// a vector object every time we run Run().
vector<int64_t> Y_shape_cache_;
Tensor bias_multiplier_{Context::GetDeviceType()};
caffe2::unique_ptr<fbgemm::PackedGemmMatrixFP16> packed_w_{nullptr};
};
class PackedGemmMatrixFP16ShapeFunctions : public ExternalTensorFunctionsBase {
public:
explicit PackedGemmMatrixFP16ShapeFunctions()
: ExternalTensorFunctionsBase() {}
~PackedGemmMatrixFP16ShapeFunctions() override {}
bool isQuantized() const override {
return false;
}
bool IsSameMetaType(TypeIdentifier id) override;
void SetupExternalTensorDescriptor(
const Blob* blob,
std::vector<std::vector<uint64_t>>* shapes,
std::vector<std::vector<float>>* all_scales,
std::vector<std::vector<int32_t>>* all_offsets,
ExternalTensorDescriptor* desc) override;
void LoadInfoOfBlob(
const Blob* /* unused */,
std::vector<float>* /* unused */,
std::vector<float>* /* unused */,
uint32_t* /* unused */) override {}
TypeIdentifier GetTypeMetaId() override;
TypeMeta GetExternalTensorType(const void* c) override;
vector<int64_t> GetExternalTensorInfo(
const void* c,
size_t* capacity,
DeviceOption* device) override;
};
} // namespace caffe2
| 6,518
| 27.845133
| 80
|
h
|
null |
pytorch-main/caffe2/quantization/server/fbgemm_fp16_pack_op.h
|
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <fbgemm/FbgemmFP16.h>
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/conversions.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <
class Context,
class Engine = DefaultEngine,
bool TransposeWeight = true,
typename TPacked = fbgemm::float16>
class FbGemmPackOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
FbGemmPackOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
axis_(this->template GetSingleArgument<int32_t>("axis_w", 1)),
no_packing_(
this->template GetSingleArgument<int32_t>("no_packing", 0)) {}
~FbGemmPackOp() override {}
bool RunOnDevice() override {
const auto& X = Input(0);
const auto canonical_axis = X.canonical_axis_index(axis_);
const auto N = X.size_to_dim(canonical_axis);
const auto K = X.size_from_dim(canonical_axis);
fbgemm::PackedGemmMatrixFP16* resultPtr;
if (TransposeWeight) {
resultPtr = new fbgemm::PackedGemmMatrixFP16(
fbgemm::matrix_op_t::Transpose,
K,
N,
1.0f, /*alpha*/
X.template data<float>());
} else {
resultPtr = new fbgemm::PackedGemmMatrixFP16(
fbgemm::matrix_op_t::NoTranspose,
N,
K,
1.0f, /*alpha*/
X.template data<float>());
}
if (no_packing_) {
C10_LOG_FIRST_N(WARNING, 10) << "no_packing will be deprecated soon";
vector<fbgemm::float16> src_mat(resultPtr->matSize());
fbgemm::float16* pmat = resultPtr->pmat();
memcpy(
src_mat.data(), pmat, resultPtr->matSize() * sizeof(fbgemm::float16));
resultPtr->unpackFromSrc(fbgemm::matrix_op_t::Transpose, src_mat.data());
}
auto* Y =
this->template Output<unique_ptr<fbgemm::PackedGemmMatrixFP16>>(0);
Y->reset(resultPtr);
return true;
}
protected:
size_t axis_{1};
// Do not pack the layout, for testing only
bool no_packing_;
};
} // namespace caffe2
| 2,695
| 29.636364
| 80
|
h
|
null |
pytorch-main/caffe2/quantization/server/fbgemm_pack_blob.h
|
#pragma once
#include <memory>
#include <fbgemm/Fbgemm.h>
#include <caffe2/core/tensor.h>
#include "caffe2/quantization/server/dnnlowp.h"
namespace caffe2 {
/**
* Packed weight matrix for DNNLOWP Int8FC operator
*/
struct Int8FCDNNLowPPackedWeightBlob {
std::vector<dnnlowp::TensorQuantizationParams> qparams;
std::shared_ptr<std::vector<std::int32_t>> column_offsets;
// The original tensor before packing but only with meta information
Tensor original_tensor{CPU};
std::shared_ptr<std::vector<std::int32_t>> bias;
// Only for 32-bit accumulation
std::shared_ptr<fbgemm::PackBMatrix<std::int8_t>> W;
// Only for 16-bit accumulation
// Dense matrix holding common values
std::shared_ptr<fbgemm::PackBMatrix<std::int8_t, std::int16_t>> W_acc16;
// Sparse matrix holding outliers
std::shared_ptr<fbgemm::CompressedSparseColumn> W_outlier;
int nbits_in_non_outlier;
};
/**
* Packed weight matrix for DNNLOWP Int8Conv operator
*/
struct Int8ConvDNNLowPPackedWeightBlob : public Int8FCDNNLowPPackedWeightBlob {
// Only for 32-bit accumulation
std::shared_ptr<fbgemm::PackedDepthWiseConvMatrix> W_depthwise;
std::shared_ptr<fbgemm::PackWeightMatrixForGConv<std::int8_t>> W_gconv;
std::shared_ptr<
fbgemm::PackWeightMatrixForGConv<std::int8_t, std::int32_t, 3>>
W_gconv3d;
};
} // namespace caffe2
| 1,352
| 27.1875
| 79
|
h
|
null |
pytorch-main/caffe2/quantization/server/fbgemm_pack_matrix_cache.h
|
#pragma once
#include "fbgemm/Fbgemm.h"
namespace caffe2 {
/**
* If there's an existing packed matrix for the same matrix, reuse it.
* Create a new one otherwise. This can save memory usage if many threads are
* sharing the same weight.
*/
template <typename ACC_T>
std::shared_ptr<fbgemm::PackBMatrix<int8_t, ACC_T>>
GetOrCreateFbgemmPackBMatrix(
fbgemm::matrix_op_t trans,
std::int32_t m,
std::int32_t n,
const void* orig_data,
const std::int8_t* quantized_data,
std::int32_t ld);
} // namespace caffe2
| 537
| 22.391304
| 77
|
h
|
null |
pytorch-main/caffe2/quantization/server/fbgemm_pack_op.h
|
#pragma once
#include "caffe2/core/operator.h"
#include "caffe2/operators/conv_op.h"
#include "caffe2/quantization/server/conv_pool_dnnlowp_op_base.h"
#include "caffe2/quantization/server/fbgemm_pack_blob.h"
#include "caffe2/quantization/server/fully_connected_dnnlowp_op.h"
namespace caffe2 {
using FCFp32Op = FullyConnectedOp<CPUContext>;
void QuantizeConvBias(
const Blob& blob,
int M,
const dnnlowp::TensorQuantizationParams& in_qparams,
const vector<dnnlowp::TensorQuantizationParams>& filter_qparams,
std::vector<int32_t>& b_quantized, bool use_fp16=false, bool round_nearest_even=true);
class FullyConnectedDNNLowPPackWeightOp final
: public DNNLowPOp<std::uint8_t, FCFp32Op> {
public:
FullyConnectedDNNLowPPackWeightOp(
const OperatorDef& operator_def,
Workspace* ws);
USE_OPERATOR_FUNCTIONS(CPUContext);
bool RunOnDevice() override;
private:
int axis_w_;
bool quantize_channelwise_;
int nbits_in_non_outlier_; // only for DNNLOWP_ACC16
bool save_unpacked_weights_;
INPUT_TAGS(FILTER, BIAS);
};
using ConvFp32Op = ConvOp<float, CPUContext>;
/**
* Pack a weight matrix that can be used by DNNLOWP Int8Conv operators.
* DNNLOWP operators can pack matrix on demand during their first invocations
* but calling this operator to pre-pack can have benefits like saving memory
* space when multiple operators are sharing the same weight.
* This operator should be a part of init net to be called once to populate
* packed blob to be used by Int8Conv DNNLOWP operators in the predictor net
*
* This operator optionally can also pre-quantize bias.
* Then, we should also provide the scale of input activation tensor as in_scale
* argument.
*/
class ConvDNNLowPPackWeightOp final
: public ConvPoolDNNLowPOpBase<std::uint8_t, ConvFp32Op> {
public:
USE_CONV_POOL_BASE_FUNCTIONS(CPUContext);
USE_CONV_POOL_DNNLOWP_OPERATOR_BASE_FUNCTIONS(std::uint8_t, ConvFp32Op);
ConvDNNLowPPackWeightOp(const OperatorDef& operator_def, Workspace* ws);
bool RunOnDevice() override;
private:
bool TakeDepthWise3x3FastPath_();
bool TakeDepthWise3x3x3FastPath_();
bool TakeGConvFastPath_();
fbgemm::conv_param_t<> GetConvParam_();
fbgemm::conv_param_t<3> GetConv3DParam_();
// Save quantized weights right after quantization before layout packing for
// performance purpose
bool save_unpacked_weights_;
bool quantize_groupwise_;
int nbits_in_non_outlier_; // only for DNNLOWP_ACC16
INPUT_TAGS(FILTER, BIAS);
};
// Helper functions for packing weights that can be used by
// ConvDNNLowPAcc16PackWeightOp, ConvDNNLowPOp, and ConvDNNLowPAcc16Op
template <typename T>
void QuantizeWeight(
const Blob& blob,
int kernel_dim,
int M,
vector<dnnlowp::TensorQuantizationParams>& qparams,
vector<typename std::make_signed<T>::type>& w_quantized,
dnnlowp::QuantizationFactory* qfactory);
template <typename T>
void ComputeColumnOffsets(
int num_rows,
int num_cols,
const T* W,
const vector<dnnlowp::TensorQuantizationParams>& qparams,
vector<int32_t>& col_offsets);
int CountOutliers(
int groups,
int kernel_dim,
int M,
int nbits_in_non_outlier,
vector<std::int8_t>& W_quantized);
/**
* @param W_quantized input quantized weight that is not packed yet
*/
fbgemm::CompressedSparseColumn* ExtractOutlierMatrix(
int groups,
int kernel_dim,
int M,
int nbits_in_non_outlier,
vector<std::int8_t>& W_quantized);
/*
* Set up used onnxifi data type constexpr
* Should always be synced with onnxifi.h
*/
constexpr uint64_t kONNXIFI_DATATYPE_UINT8 = 2;
constexpr uint64_t kONNXIFI_DATATYPE_INT32 = 6;
constexpr uint64_t kONNXIFI_DATATYPE_INT8 = 3;
class Int8ConvDNNLowpPackedWeightBlobShapeFunctions
: public ExternalTensorFunctionsBase {
public:
explicit Int8ConvDNNLowpPackedWeightBlobShapeFunctions()
: ExternalTensorFunctionsBase() {}
~Int8ConvDNNLowpPackedWeightBlobShapeFunctions() override {}
bool isQuantized() const override {
return true;
}
bool IsSameMetaType(TypeIdentifier id) override;
void SetupExternalTensorDescriptor(
const Blob* blob,
std::vector<std::vector<uint64_t>>* shapes,
std::vector<std::vector<float>>* all_scales,
std::vector<std::vector<int32_t>>* all_offsets,
ExternalTensorDescriptor* desc) override;
void LoadInfoOfBlob(
const Blob* blob,
std::vector<float>* scale,
std::vector<float>* offset,
uint32_t* axis) override;
TypeIdentifier GetTypeMetaId() override;
TypeMeta GetExternalTensorType(const void* c) override;
vector<int64_t> GetExternalTensorInfo(
const void* c,
size_t* capacity,
DeviceOption* device) override;
};
class Int8FCDNNLowpPackedWeightBlobShapeFunctions
: public ExternalTensorFunctionsBase {
public:
explicit Int8FCDNNLowpPackedWeightBlobShapeFunctions()
: ExternalTensorFunctionsBase() {}
~Int8FCDNNLowpPackedWeightBlobShapeFunctions() override {}
bool isQuantized() const override {
return true;
}
bool IsSameMetaType(TypeIdentifier id) override;
void SetupExternalTensorDescriptor(
const Blob* blob,
std::vector<std::vector<uint64_t>>* shapes,
std::vector<std::vector<float>>* all_scales,
std::vector<std::vector<int32_t>>* all_offsets,
ExternalTensorDescriptor* desc) override;
void LoadInfoOfBlob(
const Blob* blob,
std::vector<float>* scale,
std::vector<float>* offset,
uint32_t* axis) override;
TypeIdentifier GetTypeMetaId() override;
TypeMeta GetExternalTensorType(const void* c) override;
vector<int64_t> GetExternalTensorInfo(
const void* c,
size_t* capacity,
DeviceOption* device) override;
};
} // namespace caffe2
| 5,763
| 30.67033
| 90
|
h
|
null |
pytorch-main/caffe2/quantization/server/fully_connected_dnnlowp_acc16_op.h
|
#pragma once
#include "caffe2/quantization/server/fully_connected_dnnlowp_op.h"
namespace caffe2 {
/**
* Quantized FC operator with 16-bit accumulation.
* We'll encounter saturation but this will be faster in Intel CPUs
*/
class FullyConnectedDNNLowPAcc16Op final
: public FullyConnectedDNNLowPOp<std::uint8_t> {
public:
FullyConnectedDNNLowPAcc16Op(const OperatorDef& operator_def, Workspace* ws);
bool RunOnDevice() override;
USE_OPERATOR_FUNCTIONS(CPUContext);
using BaseType = FullyConnectedDNNLowPOp<std::uint8_t>;
using BaseType::dequantize_output_;
using BaseType::in_qparams_;
using BaseType::InputTensorCPU_;
using BaseType::out_qparams_;
using BaseType::OutputTensorCPU_;
using BaseType::W_quantized_;
private:
std::shared_ptr<fbgemm::PackBMatrix<std::int8_t, std::int16_t>>
Wq_acc16_packed_;
// Wq outlier in CSC format
std::shared_ptr<fbgemm::CompressedSparseColumn> Wq_outlier_;
int nbits_in_non_outlier_;
int copy_to_32bit_frequency_;
}; // class FullyConnectedDNNLowPAcc16Op
} // namespace caffe2
| 1,065
| 27.052632
| 79
|
h
|
null |
pytorch-main/caffe2/quantization/server/fully_connected_dnnlowp_op.h
|
#pragma once
#include <fbgemm/Fbgemm.h>
#include "caffe2/operators/fully_connected_op.h"
#include "caffe2/quantization/server/dnnlowp_op.h"
namespace caffe2 {
template <typename T, bool ReluFused = false>
class FullyConnectedDNNLowPOp
: public DNNLowPOp<T, FullyConnectedOp<CPUContext>> {
public:
FullyConnectedDNNLowPOp(const OperatorDef& operator_def, Workspace* ws);
bool RunOnDevice() override;
USE_OPERATOR_FUNCTIONS(CPUContext);
USE_DNNLOWP_OPERATOR_BASE_FUNCTIONS(T, FullyConnectedOp<CPUContext>);
protected:
bool GetQuantizationParameters_(float X_scale_=-1.0, int X_zero_point_=0);
std::size_t axis_{1};
std::size_t axis_w_{1};
float X_scale_{-1.0};
int X_zero_point_{0};
vector<std::int64_t> Y_shape_cache_;
std::vector<dnnlowp::RequantizationParams> requantization_params_;
bool requantization_param_selected_{false};
// x86 only provides SIMD instructions that multiply a signed integer with an
// unsigned integer. We use signed for weights.
using T_signed = typename std::make_signed<T>::type;
// used in fast path for T == uint8_t
std::shared_ptr<fbgemm::PackBMatrix<std::int8_t>> Wq_packed_;
std::vector<std::uint8_t> X_pack_buf_;
std::vector<std::int32_t> Y_int32_;
std::vector<dnnlowp::TensorQuantizationParams> filter_qparams_;
std::vector<float> filter_scales_;
std::vector<std::int32_t> filter_zero_points_;
std::vector<float> requantization_multipliers_;
bool quantize_channelwise_;
// used in slow path for T != uint8_t
std::vector<T_signed> W_quantized_;
// pre-computed biases and offsets
std::shared_ptr<std::vector<std::int32_t>> b_quantized_;
const std::int32_t* b_quantized_data_{nullptr};
std::vector<std::int32_t> row_offsets_;
std::shared_ptr<std::vector<std::int32_t>> column_offsets_;
// Dequantized bias populated when input bias is quantized and
// dequantized_output_ == true
std::vector<float> b_dequantized_;
const float* b_dequantized_data_{nullptr};
bool is_weight_constant_{true};
float in_qparams0_scale_old_ = 0;
std::int32_t in_qparams0_zero_point_old_ = 0;
}; // class FullyConnectedDNNLowPOp
} // namespace caffe2
| 2,160
| 30.779412
| 79
|
h
|
null |
pytorch-main/caffe2/quantization/server/fully_connected_fake_lowp_op.h
|
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#ifdef __x86_64__
#include <immintrin.h>
#endif
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/conversions.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
// convert to float16 reducing mantissa, preserving exponent
void fp32_to_bfp16(const float* source, size_t size, float* dest);
// convert to float24 reducing mantissa, preserving exponent
void fp32_to_bfp24(const float* source, size_t size, float* dest);
// convert to float14 reducing mantissa, preserving exponent
void fp32_to_bfp14(const float* source, size_t size, float* dest);
void fp32_to_bfp16_scalar(const float* source, size_t size, float* dest);
// convert to IEEE float16
void fp32_to_fp16(const float* source, size_t size, float* dest);
// fp32 -> int32 -> += 1<< 15 -> fp32 -> truncation
void fp32_to_bfp16_round(const float* source, size_t size, float* dest);
// This is Caffe's InnerProductOp, with a name that fits its purpose better.
template <
void (*Q)(const float*, size_t, float*),
class Context,
class Engine = DefaultEngine,
bool TransposeWeight = true>
class FullyConnectedFakeLowpFPOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
FullyConnectedFakeLowpFPOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
axis_(this->template GetSingleArgument<int32_t>("axis", 1)),
axis_w_(this->template GetSingleArgument<int32_t>("axis_w", 1)),
float16_compute_(
this->template GetSingleArgument<bool>("float16_compute", false)) {}
~FullyConnectedFakeLowpFPOp() override {}
template <
typename T_X,
typename T_W,
typename T_B,
typename T_Y,
typename MATH>
bool DoRunWithType();
bool RunOnDevice() override {
return DoRunWithType<
float, // X
float, // W
float, // B
float, // Y
float>(); // Math
}
protected:
size_t axis_{1};
size_t axis_w_{1};
// A local vector to cache the output shape so we don't need to recreate
// a vector object every time we run Run().
vector<int64_t> Y_shape_cache_;
Tensor bias_multiplier_;
bool float16_compute_;
};
template <
void (*Q)(const float*, size_t, float*),
class Context,
class Engine = DefaultEngine,
bool TransposeWeight = true>
class FullyConnectedGradientFakeLowpFPOp : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
FullyConnectedGradientFakeLowpFPOp(
const OperatorDef& operator_def,
Workspace* ws)
: Operator<Context>(operator_def, ws),
axis_(this->template GetSingleArgument<int32_t>("axis", 1)),
axis_w_(this->template GetSingleArgument<int32_t>("axis_w", 1)),
float16_compute_(
this->template GetSingleArgument<bool>("float16_compute", false)) {}
~FullyConnectedGradientFakeLowpFPOp() override {}
template <
typename T_X,
typename T_W,
typename T_DY,
typename T_B,
typename T_DX,
typename T_DW,
typename T_DB,
typename MATH>
bool DoRunWithType();
bool RunOnDevice() override {
return DoRunWithType<
float, // X
float, // W
float, // dY
float, // B
float, // dX
float, // dW
float, // dB
float>(); // Math
}
protected:
size_t axis_{1};
size_t axis_w_{1};
Tensor bias_multiplier_;
bool float16_compute_;
};
} // namespace caffe2
| 4,101
| 28.3
| 80
|
h
|
null |
pytorch-main/caffe2/quantization/server/group_norm_dnnlowp_op.h
|
#pragma once
#include <vector>
#include "caffe2/operators/group_norm_op.h"
#include "caffe2/quantization/server/dnnlowp_op.h"
namespace caffe2 {
using GroupNormFP32Op = GroupNormOp<float, CPUContext>;
template <typename T>
class GroupNormDNNLowPOp final : public DNNLowPOp<T, GroupNormFP32Op> {
public:
USE_OPERATOR_FUNCTIONS(CPUContext);
USE_DNNLOWP_OPERATOR_BASE_FUNCTIONS(T, GroupNormFP32Op);
GroupNormDNNLowPOp(const OperatorDef& operator_def, Workspace* ws);
bool RunOnDevice() override;
private:
bool GetQuantizationParameters();
void QuantizeGamma();
void QuantizeGammaImpl();
void QuantizeBeta();
bool RunOnDeviceWithOrderNCHW();
bool RunOnDeviceWithOrderNHWC();
void QuantizedGroupMomentsNCHW(
int N,
int G,
int K,
int HxW,
const T* X,
int32_t* mu,
int32_t* rsig);
void QuantizedGroupMomentsNHWC(
int N,
int G,
int K,
int HxW,
const T* X,
int32_t* mu,
int32_t* rsig);
void DequantizedGroupMomentsNCHW(
int N,
int G,
int K,
int HxW,
const T* X,
float* mu,
float* rsig);
void DequantizedGroupMomentsNHWC(
int N,
int G,
int K,
int HxW,
const T* X,
float* mu,
float* rsig);
void ComputeQuantizedInvStd(
int N,
const float* var,
float* rsig,
int32_t* rsig_quantized);
void ComputeQuantizedFusedParams(
int N,
int G,
int K,
const int32_t* mu,
const int32_t* rsig,
const int32_t* gamma,
const int32_t* beta,
int32_t* scale,
int32_t* bias);
void ComputeDequantizedFusedParams(
int N,
int G,
int K,
const float* mu,
const float* rsig,
const float* gamma,
const float* beta,
float* scale,
float* bias);
void AffineBatchChannelQuantizedNCHW(
int N,
int C,
int HxW,
const T* X,
const int32_t* scale,
const int32_t* bias,
T* Y);
void AffineBatchChannelQuantizedNHWC(
int N,
int C,
int HxW,
const T* X,
const int32_t* scale,
const int32_t* bias,
T* Y);
void AffineBatchChannelDequantizedNCHW(
int N,
int C,
int HxW,
const float* X,
const float* scale,
const float* bias,
float* Y);
void AffineBatchChannelDequantizedNHWC(
int N,
int C,
int HxW,
const float* X,
const float* scale,
const float* bias,
float* Y);
const bool is_test_;
const int group_;
const float epsilon_;
const StorageOrder order_;
const bool is_param_constant_;
std::vector<int32_t> mu_quantized_;
std::vector<int32_t> rsig_quantized_;
std::vector<float> mu_dequantized_;
std::vector<float> rsig_dequantized_;
dnnlowp::TensorQuantizationParams rsig_qparams_;
std::vector<int32_t> gamma_quantized_;
std::vector<int32_t> beta_quantized_;
std::vector<float> gamma_dequantized_;
std::vector<float> beta_dequantized_;
const int32_t* gamma_quantized_data_ = nullptr;
const int32_t* beta_quantized_data_ = nullptr;
const float* gamma_dequantized_data_ = nullptr;
const float* beta_dequantized_data_ = nullptr;
std::vector<int32_t> scale_quantized_;
std::vector<int32_t> bias_quantized_;
std::vector<float> scale_dequantized_;
std::vector<float> bias_dequantized_;
dnnlowp::TensorQuantizationParams internal_qparams_;
std::vector<float> X_dequantized_;
std::vector<int32_t> Y_int32_;
float cached_X_qparams_scale_ = 0.0f;
// Input: X, gamma, beta
// Output: Y, mu, inv_sig
INPUT_TAGS(INPUT, GAMMA, BETA);
OUTPUT_TAGS(OUTPUT, MU, INV_SIGMA);
};
namespace internal {
template <typename T>
void VectorMomentsAVX2(const int N, const T* src, int64_t* sum, int64_t* sumsq);
void ComputeQuantizedFusedParamsAVX2(
const int N,
const int G,
const int K,
const int32_t X_zero_point,
const int32_t* mu,
const int32_t* rsig,
const int32_t* gamma,
int32_t* scale,
int32_t* bias);
template <typename T>
void AffineBatchChannelAndRequantizeNCHWAVX2(
const int N,
const int C,
const int HxW,
const dnnlowp::RequantizationParams& params,
const T* X,
const int32_t* scale,
const int32_t* bias,
T* Y);
template <typename T>
void AffineBatchChannelAndRequantizeNHWCAVX2(
const int N,
const int C,
const int HxW,
const dnnlowp::RequantizationParams& params,
const T* X,
const int32_t* scale,
const int32_t* bias,
T* Y);
} // namespace internal
} // namespace caffe2
| 4,610
| 20.546729
| 80
|
h
|
null |
pytorch-main/caffe2/quantization/server/im2col_dnnlowp.h
|
#pragma once
#ifdef _OPENMP
#include <omp.h>
#endif
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
#include "caffe2/utils/math/utils.h"
namespace caffe2 {
namespace math {
template <typename T>
static void Im2ColNCHW(
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const T* data_im,
T* data_col,
CPUContext* /*context*/,
const T& zero_point = 0) {
const int output_h =
(height + pad_b + pad_t - (dilation_h * (kernel_h - 1) + 1)) / stride_h +
1;
const int output_w =
(width + pad_l + pad_r - (dilation_w * (kernel_w - 1) + 1)) / stride_w +
1;
// Fast path for zero padding and no dilation
// From Torch, THNN_(unfolded_copy)
if (dilation_h == 1 && dilation_w == 1 && pad_l == 0 && pad_r == 0 &&
pad_t == 0 && pad_b == 0) {
for (auto k = 0; k < channels * kernel_h * kernel_w; k++) {
const auto nip = k / (kernel_h * kernel_w);
const auto rest = k % (kernel_h * kernel_w);
const auto kh = rest / kernel_w;
const auto kw = rest % kernel_w;
auto* dst = data_col + nip * (kernel_h * kernel_w * output_h * output_w) +
kh * (kernel_w * output_h * output_w) + kw * (output_h * output_w);
const auto* src = data_im + nip * (height * width);
for (const auto y : c10::irange(output_h)) {
const auto iy = y * stride_h + kh;
const auto ix = kw;
if (stride_w == 1) {
memcpy(
dst + (y * output_w),
src + (iy * width + ix),
sizeof(T) * output_w);
} else {
for (const auto x : c10::irange(output_w)) {
memcpy(
dst + (y * output_w + x),
src + (iy * width + ix + x * stride_w),
sizeof(T));
}
}
}
}
return;
}
// Fast path for equal padding
if (pad_l == pad_r && pad_t == pad_b) {
// From Intel, https://github.com/BVLC/caffe/pull/3536
const int pad_h = pad_t;
const int pad_w = pad_l;
const int channel_size = height * width;
for (int channel = channels; channel--; data_im += channel_size) {
for (const auto kernel_row : c10::irange(kernel_h)) {
for (const auto kernel_col : c10::irange(kernel_w)) {
int input_row = -pad_h + kernel_row * dilation_h;
for (int output_rows = output_h; output_rows; output_rows--) {
if (!utils::IsAGeZeroAndALtB(input_row, height)) {
for (int output_cols = output_w; output_cols; output_cols--) {
*(data_col++) = zero_point;
}
} else {
int input_col = -pad_w + kernel_col * dilation_w;
for (int output_col = output_w; output_col; output_col--) {
if (utils::IsAGeZeroAndALtB(input_col, width)) {
*(data_col++) = data_im[input_row * width + input_col];
} else {
*(data_col++) = zero_point;
}
input_col += stride_w;
}
}
input_row += stride_h;
}
}
}
}
return;
}
// Baseline
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
int height_col = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
int width_col = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
int channels_col = channels * kernel_h * kernel_w;
for (const auto c : c10::irange(channels_col)) {
int w_offset = c % kernel_w;
int h_offset = (c / kernel_w) % kernel_h;
int c_im = c / kernel_h / kernel_w;
for (const auto h : c10::irange(height_col)) {
for (const auto w : c10::irange(width_col)) {
int h_pad = h * stride_h - pad_t + h_offset * dilation_h;
int w_pad = w * stride_w - pad_l + w_offset * dilation_w;
if (h_pad >= 0 && h_pad < height && w_pad >= 0 && w_pad < width)
data_col[(c * height_col + h) * width_col + w] =
data_im[(c_im * height + h_pad) * width + w_pad];
else
data_col[(c * height_col + h) * width_col + w] = zero_point;
}
}
}
}
template <typename T>
static void Im2ColNdNCHW(
const int N,
const int /* img_size*/,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const T* X_data,
T* Y_data,
CPUContext* /* context */,
const T& zero_point = 0) {
const int outer_size = col_shape[0];
const int inner_size = col_size / outer_size;
const int kernel_size = std::accumulate(
kernel_shape, kernel_shape + N, 1, std::multiplies<int>());
std::vector<int> d_offset(N, 0);
std::vector<int> d_iter(N, 0);
for (const auto i : c10::irange(outer_size)) {
// Loop over spatial axes in reverse order to compute a per-axis offset.
int offset = i;
for (int d_i = N - 1; d_i >= 0; --d_i) {
d_offset[d_i] = offset % kernel_shape[d_i];
offset /= kernel_shape[d_i];
}
for (const auto j : c10::irange(inner_size)) {
// Loop over spatial axes in forward order to compute the indices in the
// image and column, and whether the index lies in the padding.
const int col_index = i * inner_size + j;
int img_index = i / kernel_size;
bool is_padding = false;
for (const auto d_i : c10::irange(N)) {
const int d_img = d_iter[d_i] * stride[d_i] - pad[d_i] +
d_offset[d_i] * dilation[d_i];
is_padding |= d_img < 0 || d_img >= img_shape[d_i + 1];
img_index = img_index * img_shape[d_i + 1] + d_img;
}
Y_data[col_index] = is_padding ? zero_point : X_data[img_index];
utils::IncreaseIndexInDims(N, col_shape + 1, d_iter.data());
}
}
}
/**
* The layout of the result is N H W G R S C/G.
* Note that groups are pulled out to an outer dimension so that we can use
* GEMMs efficiently.
*/
template <typename T>
static void Im2ColNHWC(
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const T* data_im,
T* data_col,
CPUContext* /*context*/,
const int groups,
const T& zero_point) {
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
int height_col = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
int width_col = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
#ifdef _OPENMP
#pragma omp parallel for if (!omp_in_parallel())
#endif
for (int h = 0; h < height_col; ++h) {
int h_pad = -pad_t + h * stride_h;
T* data_col_temp =
data_col + h * width_col * kernel_h * kernel_w * channels;
int w_pad = -pad_l;
for (C10_UNUSED const auto w : c10::irange(width_col)) {
int r = 0;
for (int ih = h_pad; ih < h_pad + dkernel_h; ih += dilation_h, ++r) {
int s = 0;
for (int iw = w_pad; iw < w_pad + dkernel_w; iw += dilation_w, ++s) {
if (ih >= 0 && ih < height && iw >= 0 && iw < width) {
for (const auto g : c10::irange(groups)) {
memcpy(
data_col_temp +
((g * kernel_h + r) * kernel_w + s) * (channels / groups),
data_im + (ih * width + iw) * channels +
g * (channels / groups),
sizeof(T) * (channels / groups));
}
} else {
// This should be simply padded with zero.
for (const auto g : c10::irange(groups)) {
for (int i = 0; i < channels / groups; ++i) {
data_col_temp
[(((g * kernel_h + r) * kernel_w) + s) *
(channels / groups) +
i] = zero_point;
}
}
}
} // for each iw
} // for each ih
data_col_temp += kernel_h * kernel_w * channels;
w_pad += stride_w;
} // for each output pixel
} // for each image row
}
/**
* The layout of the result is N T H W G Q R S C/G.
* Note that groups are pulled out to an outer dimension so that we can use
* GEMMs efficiently.
*/
template <typename T>
static void Im2Col3DNHWC(
const int channels,
const int num_frames,
const int height,
const int width,
const int kernel_t,
const int kernel_h,
const int kernel_w,
const int dilation_t,
const int dilation_h,
const int dilation_w,
const int pad_p, // previous frame
const int pad_t, // top
const int pad_l, // left
const int pad_n, // next frame
const int pad_b, // bottom
const int pad_r, // right
const int stride_t,
const int stride_h,
const int stride_w,
const T* data_im,
T* data_col,
CPUContext* /*context*/,
const int groups,
const T& zero_point) {
const int dkernel_t = dilation_t * (kernel_t - 1) + 1;
const int dkernel_h = dilation_h * (kernel_h - 1) + 1;
const int dkernel_w = dilation_w * (kernel_w - 1) + 1;
int frame_col = (num_frames + pad_p + pad_n - dkernel_t) / stride_t + 1;
int height_col = (height + pad_t + pad_b - dkernel_h) / stride_h + 1;
int width_col = (width + pad_l + pad_r - dkernel_w) / stride_w + 1;
#ifdef _OPENMP
#pragma omp parallel for if (!omp_in_parallel())
#endif
for (int t = 0; t < frame_col; ++t) {
int t_pad = -pad_p + t * stride_t;
for (const auto h : c10::irange(height_col)) {
int h_pad = -pad_t + h * stride_h;
T* data_col_temp = data_col +
(t * height_col + h) * width_col * kernel_t * kernel_h * kernel_w *
channels;
for (const auto w : c10::irange(width_col)) {
int w_pad = -pad_l + w * stride_w;
int q = 0;
for (int it = t_pad; it < t_pad + dkernel_t; it += dilation_t, ++q) {
int r = 0;
for (int ih = h_pad; ih < h_pad + dkernel_h; ih += dilation_h, ++r) {
int s = 0;
for (int iw = w_pad; iw < w_pad + dkernel_w;
iw += dilation_w, ++s) {
if (it >= 0 && it < num_frames && ih >= 0 && ih < height &&
iw >= 0 && iw < width) {
for (const auto g : c10::irange(groups)) {
memcpy(
data_col_temp +
(((g * kernel_t + q) * kernel_h + r) * kernel_w + s) *
(channels / groups),
data_im + ((it * height + ih) * width + iw) * channels +
g * (channels / groups),
sizeof(T) * (channels / groups));
}
} else {
// This should be simply padded with zero.
for (const auto g : c10::irange(groups)) {
for (int i = 0; i < channels / groups; ++i) {
data_col_temp
[((((g * kernel_t + q) * kernel_h + r) * kernel_w) +
s) *
(channels / groups) +
i] = zero_point;
}
}
}
} // for each iw
} // for each ih
} // for each it
data_col_temp += kernel_t * kernel_h * kernel_w * channels;
} // for each output pixel
} // for each image row
} // for each frame
}
} // namespace math
} // namespace caffe2
| 11,848
| 33.344928
| 80
|
h
|
null |
pytorch-main/caffe2/quantization/server/int8_gen_quant_params.h
|
// Copyright 2004-present Facebook. All Rights Reserved.
#pragma once
#include "caffe2/quantization/server/caffe2_dnnlowp_utils.h"
#include "caffe2/quantization/server/dnnlowp.h"
namespace caffe2 {
using namespace std;
using dnnlowp::TensorQuantizationParams;
struct Int8QuantSchemeBlob {
public:
Int8QuantSchemeBlob(std::string quantization_kind, bool preserve_sparsity)
: quantization_kind_(quantization_kind),
preserve_sparsity_(preserve_sparsity) {}
std::string quantization_kind_;
bool preserve_sparsity_;
};
struct Int8QuantParamsBlob {
public:
Int8QuantParamsBlob(float scale, int zero_point) {
qparam.scale = scale;
qparam.zero_point = zero_point;
}
dnnlowp::TensorQuantizationParams qparam;
};
template <class Context, class Engine = DefaultEngine>
class Int8GenQuantParamsOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
Int8GenQuantParamsOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws) {}
bool RunOnDevice() override {
// Generate Int8 quant params based on the input data (last N samples of the
// activations) and the quant scheme
const auto& input_data = Input(0);
const auto* quant_scheme =
this->template Input<unique_ptr<Int8QuantSchemeBlob>>(1).get();
CAFFE_ENFORCE(input_data.dim() > 0);
CAFFE_ENFORCE(quant_scheme);
std::string quant_kind = quant_scheme->quantization_kind_;
bool preserve_sparsity = quant_scheme->preserve_sparsity_;
dnnlowp::QuantizationFactory* qfactory =
dnnlowp::QuantizationFactory::GetDefaultInstance();
TensorQuantizationParams qparam = qfactory->ChooseQuantizationParams(
input_data.template data<float>(),
input_data.numel(),
dnnlowp::StringToKind(quant_kind),
8,
preserve_sparsity);
auto* output_qparam =
this->template Output<unique_ptr<Int8QuantParamsBlob>>(0);
output_qparam->reset(
new Int8QuantParamsBlob(qparam.scale, qparam.zero_point));
return true;
}
}; // class Int8GenQuantParamsOp
} // namespace caffe2
| 2,113
| 33.096774
| 80
|
h
|
null |
pytorch-main/caffe2/quantization/server/int8_gen_quant_params_min_max.h
|
// Copyright 2004-present Facebook. All Rights Reserved.
#pragma once
#include "caffe2/quantization/server/caffe2_dnnlowp_utils.h"
#include "caffe2/quantization/server/dnnlowp.h"
#include "caffe2/quantization/server/int8_gen_quant_params.h"
#include <glog/logging.h>
namespace caffe2 {
using namespace std;
using dnnlowp::TensorQuantizationParams;
template <class Context, class Engine = DefaultEngine>
class Int8GenQuantParamsMinMaxOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
Int8GenQuantParamsMinMaxOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws) {}
bool RunOnDevice() override {
// Generate Int8 quant params based on the input data (last N samples of the
// activations) and the quant scheme
const float min =
OperatorBase::Input<Tensor>(0, CPU).template data<float>()[0];
const float max =
OperatorBase::Input<Tensor>(1, CPU).template data<float>()[0];
bool preserve_sparsity = false;
if (InputSize() == 3){
const auto* quant_scheme =
this->template Input<unique_ptr<Int8QuantSchemeBlob>>(2).get();
preserve_sparsity = quant_scheme->preserve_sparsity_;
}
dnnlowp::QuantizationFactory* qfactory =
dnnlowp::QuantizationFactory::GetDefaultInstance();
TensorQuantizationParams qparam = qfactory->ChooseQuantizationParams(
min,
max,
8,
preserve_sparsity);
auto* output_qparam =
this->template Output<unique_ptr<Int8QuantParamsBlob>>(0);
output_qparam->reset(
new Int8QuantParamsBlob(qparam.scale, qparam.zero_point));
LOG_EVERY_N(INFO, 1) << "scale and bias are " << qparam.scale << "," << qparam.zero_point;
return true;
}
}; // class Int8GenQuantParamsOp
} // namespace caffe2
| 1,827
| 34.843137
| 94
|
h
|
null |
pytorch-main/caffe2/quantization/server/int8_quant_scheme_blob_fill.h
|
// Copyright 2004-present Facebook. All Rights Reserved.
#pragma once
#include "caffe2/quantization/server/caffe2_dnnlowp_utils.h"
#include "caffe2/quantization/server/dnnlowp.h"
#include "caffe2/quantization/server/int8_gen_quant_params.h"
namespace caffe2 {
using namespace std;
template <class Context, class Engine = DefaultEngine>
class Int8QuantSchemeBlobFillOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
Int8QuantSchemeBlobFillOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws) {}
bool RunOnDevice() override {
std::string quantization_kind =
this->template GetSingleArgument<std::string>(
"quantization_kind", "MIN_MAX_QUANTIZATION");
bool preserve_sparsity =
this->template GetSingleArgument<bool>("preserve_sparsity", false);
auto* output_qscheme =
this->template Output<unique_ptr<Int8QuantSchemeBlob>>(0);
output_qscheme->reset(
new Int8QuantSchemeBlob(quantization_kind, preserve_sparsity));
return true;
}
}; // class Int8QuantSchemeBlobFillOp
} // namespace caffe2
| 1,135
| 32.411765
| 75
|
h
|
null |
pytorch-main/caffe2/quantization/server/lstm_unit_dnnlowp_op.h
|
#pragma once
#include "caffe2/operators/lstm_unit_op.h"
#include "caffe2/quantization/server/caffe2_dnnlowp_utils.h"
#include "caffe2/quantization/server/dnnlowp.h"
#include "caffe2/quantization/server/op_wrapper.h"
#include "caffe2/quantization/server/sigmoid.h"
namespace caffe2 {
template <typename T>
class LSTMUnitDNNLowPOp final : public LSTMUnitOp<CPUContext> {
static_assert(std::is_integral<T>::value, "Integral required.");
public:
LSTMUnitDNNLowPOp(const OperatorDef& operator_def, Workspace* ws);
~LSTMUnitDNNLowPOp() override;
bool RunOnDevice() override;
private:
const TensorCPU& InputTensorCPU_(int idx);
TensorCPU* OutputTensorCPU_(int idx);
bool GetQuantizationParameters_();
OpWrapper<LSTMUnitOp<CPUContext>, T>* Fp32Op_();
bool drop_states_;
dnnlowp::Sigmoid<T> sigmoid_;
dnnlowp::Tanh<T> tanh_;
dnnlowp::TensorQuantizationParams H_in_qparams_, C_in_qparams_, G_in_qparams_,
H_out_qparams_, C_out_qparams_;
std::unique_ptr<OpWrapper<LSTMUnitOp<CPUContext>, T>> fp32_op_;
bool dequantize_output_{false}, measure_quantization_error_{false};
std::unique_ptr<dnnlowp::QuantizationFactory> qfactory_;
dnnlowp::QuantizationErrorStats cell_quantization_error_stats_,
hidden_quantization_error_stats_;
bool arguments_parsed_{false};
}; // class LSTMUnitDNNLowPOp
} // namespace caffe2
| 1,357
| 29.177778
| 80
|
h
|
null |
pytorch-main/caffe2/quantization/server/mmio.h
|
#pragma once
#include <cstdio>
#include <set>
#include <string>
#include <type_traits>
namespace caffe2 {
template <typename T>
void StoreMatrixInMatrixMarketFormat(
int m,
int n,
const T* a,
const std::string& matrix_name) {
using namespace std;
static set<string> dumped_matrix_names;
string name(matrix_name);
string::size_type pos = name.rfind('/');
if (pos != string::npos) {
name = name.substr(pos + 1);
}
if (dumped_matrix_names.find(name) == dumped_matrix_names.end()) {
dumped_matrix_names.insert(name);
FILE* fp = fopen((matrix_name + ".mtx").c_str(), "w");
if (!fp) {
return;
}
if (is_integral<T>::value) {
fprintf(fp, "%%%%MatrixMarket matrix array integer general\n");
} else {
fprintf(fp, "%%%%MatrixMarket matrix array real general\n");
}
fprintf(fp, "%d %d\n", m, n);
// matrix market array format uses column-major order
for (const auto j : c10::irange(n)) {
for (const auto i : c10::irange(m)) {
if (is_integral<T>::value) {
// NOLINTNEXTLINE(clang-analyzer-core.NullDereference)
fprintf(fp, "%d\n", static_cast<int>(a[j * m + i]));
} else {
fprintf(fp, "%f\n", static_cast<float>(a[j * m + i]));
}
}
}
fclose(fp);
}
}
} // namespace caffe2
| 1,335
| 23.290909
| 69
|
h
|
null |
pytorch-main/caffe2/quantization/server/op_wrapper.h
|
#pragma once
#include "caffe2/core/operator.h"
#include "caffe2/core/tensor_int8.h"
#include "caffe2/quantization/server/caffe2_dnnlowp_utils.h"
#include "caffe2/quantization/server/dnnlowp.h"
namespace caffe2 {
/**
* Wrap a floating-point operator with quantized inputs with type T.
* This class is to measure quantization error against fp32 reference.
*/
template <typename OpType, typename T>
class OpWrapper {
public:
OpWrapper(OperatorBase* op, dnnlowp::QuantizationFactory* qfactory)
: op_(op), qfactory_(qfactory) {
for (auto name : op->debug_def().input()) {
local_input_blobs_.push_back(local_ws_.CreateBlob(name));
TORCH_CHECK_NOTNULL(local_input_blobs_.back());
}
OperatorDef def = op->debug_def();
local_op_.reset(new OpType(def, &local_ws_));
for (auto name : def.output()) {
local_output_blobs_.push_back(local_ws_.GetBlob(name));
TORCH_CHECK_NOTNULL(local_output_blobs_.back());
}
}
void DequantizeInput() {
const OperatorDef& def = op_->debug_def();
CPUContext context(def.device_option());
for (int i = 0; i < op_->InputSize(); ++i) {
if (op_->InputIsType<int8::Int8TensorCPU>(i)) {
const TensorCPU& qtensor = op_->Input<int8::Int8TensorCPU>(i).t;
TensorCPU* float_tensor =
BlobGetMutableTensor(local_input_blobs_[i], CPU);
// FIXME: doesn't work for bias so we shouldn't quantize bias before
// model loading when we're running a shadow operator in fp32 for
// example for measuring quantization error.
float_tensor->ResizeLike(qtensor);
fbgemm::Dequantize<T>(
qtensor.data<T>(),
float_tensor->template mutable_data<float>(),
qtensor.numel(),
dnnlowp::GetInputTensorQuantizationParamsOf(op_, i, qfactory_));
} else {
local_input_blobs_[i]->ShareExternal(
const_cast<void*>(op_->Inputs()[i]->GetRaw()),
op_->Inputs()[i]->meta());
}
}
}
OpType* Get() {
return local_op_.get();
}
dnnlowp::TensorQuantizationParams GetOutputQuantizationParams(
dnnlowp::QuantizationFactory* qfactory,
int index = 0) {
using namespace dnnlowp;
float min, max;
auto& out_tensor = local_output_blobs_[index]->template Get<TensorCPU>();
fbgemm::FindMinMax(
out_tensor.template data<float>(), &min, &max, out_tensor.numel());
if (op_->OperatorBase::GetSingleArgument<std::string>("followed_by", "") ==
"Relu") {
min = std::max(0.0f, min);
max = std::max(0.0f, max);
}
return qfactory->ChooseQuantizationParams(min, max);
}
private:
OperatorBase* op_; /* container quantized op */
Workspace local_ws_;
std::vector<Blob*> local_input_blobs_;
std::vector<Blob*> local_output_blobs_;
std::unique_ptr<OpType> local_op_; /* contained fp32 reference op */
dnnlowp::QuantizationFactory* qfactory_;
};
} // namespace caffe2
| 2,959
| 32.258427
| 79
|
h
|
null |
pytorch-main/caffe2/quantization/server/quantization_error_minimization.h
|
#pragma once
#include "caffe2/quantization/server/dnnlowp.h"
namespace dnnlowp {
class QuantizationErrorMinimization {
public:
virtual TensorQuantizationParams ChooseQuantizationParams(
const Histogram& hist,
bool preserve_sparsity = false,
int precision = 8) = 0;
virtual ~QuantizationErrorMinimization(){};
};
class NormMinimization : public QuantizationErrorMinimization {
public:
enum Kind {
L1,
L2,
};
NormMinimization(Kind kind) : kind_(kind) {}
/**
* Faster approximate search
*/
TensorQuantizationParams NonlinearQuantizationParamsSearch(
const Histogram& hist,
bool preserve_sparsity = false,
int precision = 8);
TensorQuantizationParams ChooseQuantizationParams(
const Histogram& hist,
bool preserve_sparsity = false,
int precision = 8) override;
protected:
Kind kind_;
};
class L1ErrorMinimization : public NormMinimization {
public:
L1ErrorMinimization() : NormMinimization(L1) {}
};
class P99 : public QuantizationErrorMinimization {
public:
float threshold_;
P99(float p99_threshold = 0.99) : threshold_(p99_threshold) {}
TensorQuantizationParams ChooseQuantizationParams(
const Histogram& hist,
bool preserve_sparsity = true,
int precision = 8) override;
}; // class P99QuantizationFactory
} // namespace dnnlowp
| 1,356
| 22.396552
| 64
|
h
|
null |
pytorch-main/caffe2/quantization/server/quantize_dnnlowp_op.h
|
#pragma once
#include "caffe2/core/operator.h"
#include "caffe2/quantization/server/caffe2_dnnlowp_utils.h"
namespace caffe2 {
template <typename T>
class QuantizeDNNLowPOp final : public Operator<CPUContext> {
public:
USE_OPERATOR_FUNCTIONS(CPUContext);
QuantizeDNNLowPOp(const OperatorDef& operator_def, Workspace* ws);
bool RunOnDevice() override;
private:
std::unique_ptr<dnnlowp::QuantizationFactory> qfactory_;
bool arguments_parsed_{false};
}; // class QuantizeDNNLowPOp
} // namespace caffe2
| 518
| 22.590909
| 68
|
h
|
null |
pytorch-main/caffe2/quantization/server/relu_dnnlowp_op.h
|
#pragma once
#include "caffe2/operators/relu_op.h"
#include "caffe2/core/tensor_int8.h"
#include "caffe2/quantization/server/caffe2_dnnlowp_utils.h"
namespace caffe2 {
template <typename T>
class ReluDNNLowPOp final : public Operator<CPUContext> {
public:
USE_OPERATOR_FUNCTIONS(CPUContext);
ReluDNNLowPOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<CPUContext>(operator_def, ws),
qfactory_(dnnlowp::GetQuantizationFactoryOf(this)) {}
bool RunOnDevice() override;
private:
std::unique_ptr<dnnlowp::QuantizationFactory> qfactory_;
};
namespace internal {
template <typename T>
void ReluAVX2(const int N, const int zero_point, const T* X, T* Y);
} // namespace internal
} // namespace caffe2
| 739
| 22.125
| 67
|
h
|
null |
pytorch-main/caffe2/quantization/server/resize_nearest_3d_dnnlowp_op.h
|
#pragma once
#include "caffe2/operators/resize_3d_op.h"
#include "caffe2/quantization/server/dnnlowp_op.h"
namespace caffe2 {
using ResizeNearest3DFP32Op = ResizeNearest3DOp<float, CPUContext>;
template <typename T>
class ResizeNearest3DDNNLowPOp final
: public DNNLowPOp<T, ResizeNearest3DFP32Op> {
public:
USE_OPERATOR_FUNCTIONS(CPUContext);
USE_DNNLOWP_OPERATOR_BASE_FUNCTIONS(T, ResizeNearest3DFP32Op);
ResizeNearest3DDNNLowPOp(const OperatorDef& operator_def, Workspace* ws)
: BaseType(operator_def, ws),
temporal_scale_(
this->template GetSingleArgument<float>("temporal_scale", 1)),
width_scale_(this->template GetSingleArgument<float>("width_scale", 1)),
height_scale_(
this->template GetSingleArgument<float>("height_scale", 1)) {
CAFFE_ENFORCE_GT(temporal_scale_, 0);
CAFFE_ENFORCE_GT(width_scale_, 0);
CAFFE_ENFORCE_GT(height_scale_, 0);
const auto& order = StringToStorageOrder(
this->template GetSingleArgument<std::string>("order", "NHWC"));
CAFFE_ENFORCE_EQ(order, StorageOrder::NHWC);
}
bool RunOnDevice() override;
private:
float temporal_scale_;
float width_scale_;
float height_scale_;
};
} // namespace caffe2
| 1,243
| 28.619048
| 80
|
h
|
null |
pytorch-main/caffe2/quantization/server/resize_nearest_dnnlowp_op.h
|
#pragma once
#include "caffe2/operators/resize_op.h"
#include "caffe2/quantization/server/dnnlowp_op.h"
namespace caffe2 {
using ResizeNearestFP32Op = ResizeNearestOp<float, CPUContext>;
template <typename T>
class ResizeNearestDNNLowPOp final : public DNNLowPOp<T, ResizeNearestFP32Op> {
public:
USE_OPERATOR_FUNCTIONS(CPUContext);
USE_DNNLOWP_OPERATOR_BASE_FUNCTIONS(T, ResizeNearestFP32Op);
ResizeNearestDNNLowPOp(const OperatorDef& operator_def, Workspace* ws)
: BaseType(operator_def, ws),
width_scale_(this->template GetSingleArgument<float>("width_scale", 1)),
height_scale_(
this->template GetSingleArgument<float>("height_scale", 1)) {
CAFFE_ENFORCE_GT(width_scale_, 0);
CAFFE_ENFORCE_GT(height_scale_, 0);
const auto& order = StringToStorageOrder(
this->template GetSingleArgument<std::string>("order", "NHWC"));
CAFFE_ENFORCE_EQ(order, StorageOrder::NHWC);
}
bool RunOnDevice() override;
private:
float width_scale_;
float height_scale_;
};
} // namespace caffe2
| 1,057
| 27.594595
| 80
|
h
|
null |
pytorch-main/caffe2/quantization/server/sigmoid.h
|
#pragma once
#include "caffe2/quantization/server/tanh.h"
namespace dnnlowp {
/**
* sigmoid(x) = (tanh(x/2) + 1)/2
* Quantized sigmoid is computed as tanh under the hood, we just use different
* input/output quantization parameters.
*/
template <typename T>
class Sigmoid {
public:
Sigmoid(double max_abs_err_ = Tanh<T>::DEFAULT_MAX_ABS_ERR);
T Compute(T x) const;
TensorQuantizationParams GetInputQuantizationParams() const {
return in_qparams_;
}
TensorQuantizationParams GetOutputQuantizationParams() const {
return out_qparams_;
}
private:
const int num_in_bits_ = Tanh<T>::DEFAULT_NUM_IN_BITS;
const int num_out_bits_ = Tanh<T>::DEFAULT_NUM_OUT_BITS;
Tanh<T> tanh_;
TensorQuantizationParams in_qparams_, out_qparams_;
}; // class Sigmoid
} // namespace dnnlowp
| 807
| 22.764706
| 78
|
h
|
null |
pytorch-main/caffe2/quantization/server/spatial_batch_norm_dnnlowp_op.h
|
#pragma once
#include "caffe2/operators/spatial_batch_norm_op.h"
#include "caffe2/quantization/server/dnnlowp_op.h"
namespace caffe2 {
/**
* Note this implementation assumes SCALE, BIAS, EST_MEAN, and EST_VAR inputs
* are still in fp32, so is epsilon argument
*/
template <typename T, bool ReluFused = false>
class SpatialBNDNNLowPOp final : public DNNLowPOp<T, SpatialBNOp<CPUContext>> {
public:
USE_OPERATOR_FUNCTIONS(CPUContext);
USE_DNNLOWP_OPERATOR_BASE_FUNCTIONS(T, SpatialBNOp<CPUContext>);
SpatialBNDNNLowPOp(const OperatorDef& operator_def, Workspace* ws);
~SpatialBNDNNLowPOp() override = default;
bool RunOnDevice() override;
private:
void ComputeFusedParam_(
const int C,
const float* scale,
const float* bias,
const float* mean,
const float* var,
float* alpha,
float* beta);
double epsilon_;
const StorageOrder order_;
Tensor alpha_;
Tensor beta_;
INPUT_TAGS(INPUT, SCALE, BIAS, EST_MEAN, EST_VAR);
OUTPUT_TAGS(OUTPUT);
};
namespace internal {
template <typename T>
void SpatialBNNHWCAVX2(
const int N,
const int C,
const int HxW,
const int in_zero_point,
const int out_zero_point,
const T* X,
const float* alpha,
const float* beta,
T* Y,
bool relu_fused);
} // namespace internal
} // namespace caffe2
| 1,340
| 21.35
| 79
|
h
|
null |
pytorch-main/caffe2/quantization/server/tanh.h
|
#pragma once
#include "caffe2/quantization/server/dnnlowp.h"
#include <cmath>
#include <vector>
namespace dnnlowp {
/**
* We use the 3-region approach described in "Efficient VLSI Implementation of
* Neural Networks with Hyperbolic Tangent Activation Function", IEEE
* Transactions on Very Large Scale Integration Systems, Zamanlooy and
* Mirhassani.
* The pass region (x < x_pq) is approximated as x.
* The saturation region (x >= x_sq) is approximated as 1.
* The processing region (x_pq <= x < x_sq) is divided into sub-ranges and the
* average value of tanh(x) is used per sub-range.
*/
template <typename T>
class Tanh {
public:
Tanh(double max_abs_err = DEFAULT_MAX_ABS_ERR);
T Compute(T x) const;
TensorQuantizationParams GetInputQuantizationParams() const {
return in_qparams_;
}
TensorQuantizationParams GetOutputQuantizationParams() const {
return out_qparams_;
}
int GetPassRegionEnd() const {
return x_pq_index_;
}
float GetPassRegionEndDequantized() const {
return fbgemm::Dequantize<T>(
static_cast<uint8_t>(x_pq_index_ + in_qparams_.zero_point),
in_qparams_);
}
float GetSaturationRegionBegin() const {
return fbgemm::Dequantize<T>(
static_cast<T>((1 << num_in_bits_) - 1), in_qparams_);
}
static constexpr double DEFAULT_MAX_ABS_ERR = 0.02;
static constexpr int DEFAULT_NUM_IN_BITS = 8;
static constexpr int DEFAULT_NUM_OUT_BITS = 8;
private:
const double max_abs_err_;
const int num_in_bits_ = DEFAULT_NUM_IN_BITS;
const int num_out_bits_ = DEFAULT_NUM_OUT_BITS;
int x_pq_index_;
std::vector<T> processing_region_lut_;
TensorQuantizationParams in_qparams_, out_qparams_;
}; // class TanhApproximation
} // namespace dnnlowp
| 1,751
| 26.375
| 78
|
h
|
null |
pytorch-main/caffe2/quantization/server/utility_dnnlowp_ops.h
|
#pragma once
#include "caffe2/operators/utility_ops.h"
#include "caffe2/quantization/server/caffe2_dnnlowp_utils.h"
#include "caffe2/quantization/server/dnnlowp.h"
#include "caffe2/quantization/server/dnnlowp_op.h"
namespace caffe2 {
template <typename T, bool ReluFused = false>
class SumDNNLowPOp final : public DNNLowPOp<T, SumOp<CPUContext>> {
public:
SumDNNLowPOp(const OperatorDef& operator_def, Workspace* ws);
bool RunOnDevice() override;
USE_OPERATOR_FUNCTIONS(CPUContext);
USE_DNNLOWP_OPERATOR_BASE_FUNCTIONS(T, SumOp<CPUContext>);
private:
bool GetQuantizationParameters_();
dnnlowp::TensorQuantizationParams intermediate_qparams_;
dnnlowp::RequantizationParams out_requantization_params_;
}; // class SumDNNLowPOp
template <typename T>
class GatherDNNLowPOp final : public GatherOp<CPUContext> {
static_assert(std::is_integral<T>::value, "Integral required.");
public:
GatherDNNLowPOp(const OperatorDef& operator_def, Workspace* ws);
~GatherDNNLowPOp() override;
bool RunOnDevice() override;
template <typename Index>
bool DoRunWithType() {
// If we endup using it on GPU doing O(N) memcpy is probably not best :)
// TODO: implement prefetching if it starts mattering (TF does it)
auto& data = (this->template Input<int8::Int8TensorCPU>(DATA)).t;
auto& indices = Input(INDICES);
auto* output = &Outputs()[0]->template GetMutable<int8::Int8TensorCPU>()->t;
CAFFE_ENFORCE_GE(data.ndim(), 1, "DATA should be at least 1-D");
auto shape = indices.sizes().vec();
shape.insert(shape.end(), data.sizes().begin() + 1, data.sizes().end());
output->Resize(shape);
int block_size = data.size_from_dim(1);
auto block_bytesize = data.size_from_dim(1) * data.dtype().itemsize();
int N = indices.numel();
auto src_base = static_cast<const char*>(data.raw_data());
const Index* idxs = indices.template data<Index>();
auto out = static_cast<char*>(output->raw_mutable_data(data.dtype()));
for (const auto i : c10::irange(N)) {
auto idx = idxs[i];
CAFFE_ENFORCE(
0 <= idx && idx < data.size(0),
"INDICES element is out of DATA bounds, id=",
idx,
" data_dim=",
data.size(0));
auto src = src_base + idx * block_bytesize;
context_.CopyItemsSameDevice(
data.dtype(), block_size, src, out + block_bytesize * i);
}
return true;
}
USE_OPERATOR_FUNCTIONS(CPUContext);
private:
OpWrapper<GatherOp<CPUContext>, T>* Fp32Op_() {
if (!fp32_op_) {
fp32_op_.reset(
new OpWrapper<GatherOp<CPUContext>, T>(this, qfactory_.get()));
}
return fp32_op_.get();
}
std::unique_ptr<OpWrapper<GatherOp<CPUContext>, T>> fp32_op_;
bool dequantize_output_{false}, measure_quantization_error_{false};
std::unique_ptr<dnnlowp::QuantizationFactory> qfactory_;
dnnlowp::QuantizationErrorStats quantization_error_stats_;
bool arguments_parsed_{false};
}; // class GatherDNNLowPOp
namespace internal {
template <typename T, bool ReluFused>
void ElementWiseSumAVX2(
const T* input0,
const T* input1,
T* output,
int len,
float a_scale,
int32_t a_zero_point,
float b_scale,
int32_t b_zero_point,
float c_scale,
int32_t c_zero_points);
}
} // namespace caffe2
| 3,310
| 28.828829
| 80
|
h
|
null |
pytorch-main/caffe2/queue/blobs_queue.h
|
#pragma once
#include <atomic>
#include <condition_variable>
#include <memory>
#include <mutex>
#include <queue>
#include "caffe2/core/blob_stats.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/stats.h"
#include "caffe2/core/tensor.h"
#include "caffe2/core/workspace.h"
namespace caffe2 {
// A thread-safe, bounded, blocking queue.
// Modelled as a circular buffer.
// Containing blobs are owned by the workspace.
// On read, we swap out the underlying data for the blob passed in for blobs
class TORCH_API BlobsQueue : public std::enable_shared_from_this<BlobsQueue> {
public:
BlobsQueue(
Workspace* ws,
const std::string& queueName,
size_t capacity,
size_t numBlobs,
bool enforceUniqueName,
const std::vector<std::string>& fieldNames = {});
~BlobsQueue() {
close();
}
bool blockingRead(
const std::vector<Blob*>& inputs,
float timeout_secs = 0.0f);
bool tryWrite(const std::vector<Blob*>& inputs);
bool blockingWrite(const std::vector<Blob*>& inputs);
void close();
size_t getNumBlobs() const {
return numBlobs_;
}
private:
bool canWrite();
void doWrite(const std::vector<Blob*>& inputs);
std::atomic<bool> closing_{false};
size_t numBlobs_;
std::mutex mutex_; // protects all variables in the class.
std::condition_variable cv_;
int64_t reader_{0};
int64_t writer_{0};
std::vector<std::vector<Blob*>> queue_;
const std::string name_;
struct QueueStats {
CAFFE_STAT_CTOR(QueueStats);
CAFFE_EXPORTED_STAT(queue_balance);
CAFFE_EXPORTED_STAT(queue_dequeued_records);
CAFFE_DETAILED_EXPORTED_STAT(queue_dequeued_bytes);
CAFFE_AVG_EXPORTED_STAT(read_time_ns);
CAFFE_AVG_EXPORTED_STAT(write_time_ns);
} stats_;
};
} // namespace caffe2
| 1,779
| 24.070423
| 78
|
h
|
null |
pytorch-main/caffe2/queue/blobs_queue_db.h
|
#pragma once
#include <chrono>
#include <string>
#include "caffe2/core/db.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/stats.h"
#include "caffe2/queue/blobs_queue.h"
namespace caffe2 {
namespace db {
namespace {
const std::string& GetStringFromBlob(Blob* blob) {
if (blob->template IsType<string>()) {
return blob->template Get<string>();
} else if (blob->template IsType<Tensor>()) {
return *blob->template Get<Tensor>().template data<string>();
} else {
CAFFE_THROW("Unsupported Blob type");
}
}
} // namespace
class BlobsQueueDBCursor : public Cursor {
public:
explicit BlobsQueueDBCursor(
std::shared_ptr<BlobsQueue> queue,
int key_blob_index,
int value_blob_index,
float timeout_secs)
: queue_(queue),
key_blob_index_(key_blob_index),
value_blob_index_(value_blob_index),
timeout_secs_(timeout_secs),
inited_(false),
valid_(false) {
LOG(INFO) << "BlobsQueueDBCursor constructed";
CAFFE_ENFORCE(queue_ != nullptr, "queue is null");
CAFFE_ENFORCE(value_blob_index_ >= 0, "value_blob_index < 0");
}
~BlobsQueueDBCursor() override {}
void Seek(const string& /* unused */) override {
CAFFE_THROW("Seek is not supported.");
}
bool SupportsSeek() override {
return false;
}
void SeekToFirst() override {
// not applicable
}
void Next() override {
unique_ptr<Blob> blob = make_unique<Blob>();
vector<Blob*> blob_vector{blob.get()};
auto success = queue_->blockingRead(blob_vector, timeout_secs_);
if (!success) {
LOG(ERROR) << "Timed out reading from BlobsQueue or it is closed";
valid_ = false;
return;
}
if (key_blob_index_ >= 0) {
key_ = GetStringFromBlob(blob_vector[key_blob_index_]);
}
value_ = GetStringFromBlob(blob_vector[value_blob_index_]);
valid_ = true;
}
string key() override {
if (!inited_) {
Next();
inited_ = true;
}
return key_;
}
string value() override {
if (!inited_) {
Next();
inited_ = true;
}
return value_;
}
bool Valid() override {
return valid_;
}
private:
std::shared_ptr<BlobsQueue> queue_;
int key_blob_index_;
int value_blob_index_;
float timeout_secs_;
bool inited_;
string key_;
string value_;
bool valid_;
};
class BlobsQueueDB : public DB {
public:
BlobsQueueDB(
const string& source,
Mode mode,
std::shared_ptr<BlobsQueue> queue,
int key_blob_index = -1,
int value_blob_index = 0,
float timeout_secs = 0.0)
: DB(source, mode),
queue_(queue),
key_blob_index_(key_blob_index),
value_blob_index_(value_blob_index),
timeout_secs_(timeout_secs) {
LOG(INFO) << "BlobsQueueDB constructed";
}
~BlobsQueueDB() override {
Close();
}
void Close() override {}
unique_ptr<Cursor> NewCursor() override {
return make_unique<BlobsQueueDBCursor>(
queue_, key_blob_index_, value_blob_index_, timeout_secs_);
}
unique_ptr<Transaction> NewTransaction() override {
CAFFE_THROW("Not implemented.");
}
private:
std::shared_ptr<BlobsQueue> queue_;
int key_blob_index_;
int value_blob_index_;
float timeout_secs_;
};
} // namespace db
} // namespace caffe2
| 3,304
| 21.636986
| 72
|
h
|
null |
pytorch-main/caffe2/queue/queue_ops.h
|
#pragma once
#include <memory>
#include "blobs_queue.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
#include <c10/util/irange.h>
namespace caffe2 {
template <typename Context>
class CreateBlobsQueueOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
CreateBlobsQueueOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
ws_(ws),
name(operator_def.output().Get(0)) {}
bool RunOnDevice() override {
const auto capacity = GetSingleArgument("capacity", 1);
const auto numBlobs = GetSingleArgument("num_blobs", 1);
const auto enforceUniqueName =
GetSingleArgument("enforce_unique_name", false);
const auto fieldNames =
OperatorBase::template GetRepeatedArgument<std::string>("field_names");
CAFFE_ENFORCE_EQ(this->OutputSize(), 1);
auto queuePtr = Operator<Context>::Outputs()[0]
->template GetMutable<std::shared_ptr<BlobsQueue>>();
CAFFE_ENFORCE(queuePtr);
*queuePtr = std::make_shared<BlobsQueue>(
ws_, name, capacity, numBlobs, enforceUniqueName, fieldNames);
return true;
}
private:
Workspace* ws_{nullptr};
const std::string name;
};
template <typename Context>
class EnqueueBlobsOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
using Operator<Context>::Operator;
bool RunOnDevice() override {
CAFFE_ENFORCE(InputSize() > 1);
auto queue = Operator<Context>::Inputs()[0]
->template Get<std::shared_ptr<BlobsQueue>>();
CAFFE_ENFORCE(
queue && static_cast<size_t>(OutputSize()) == queue->getNumBlobs());
return queue->blockingWrite(this->Outputs());
}
private:
};
template <typename Context>
class DequeueBlobsOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
DequeueBlobsOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws) {
timeout_secs_ = OperatorBase::GetSingleArgument<float>("timeout_secs", 0);
}
bool RunOnDevice() override {
CAFFE_ENFORCE(InputSize() == 1);
auto queue =
OperatorBase::Inputs()[0]->template Get<std::shared_ptr<BlobsQueue>>();
CAFFE_ENFORCE(
queue && static_cast<size_t>(OutputSize()) == queue->getNumBlobs());
return queue->blockingRead(this->Outputs(), timeout_secs_);
}
private:
float timeout_secs_;
};
template <typename Context>
class CloseBlobsQueueOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
using Operator<Context>::Operator;
bool RunOnDevice() override {
CAFFE_ENFORCE_EQ(InputSize(), 1);
auto queue =
OperatorBase::Inputs()[0]->template Get<std::shared_ptr<BlobsQueue>>();
CAFFE_ENFORCE(queue);
queue->close();
return true;
}
private:
};
template <typename Context>
class SafeEnqueueBlobsOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
using Operator<Context>::Operator;
bool RunOnDevice() override {
auto queue = Operator<Context>::Inputs()[0]
->template Get<std::shared_ptr<BlobsQueue>>();
CAFFE_ENFORCE(queue);
auto size = queue->getNumBlobs();
CAFFE_ENFORCE(
static_cast<size_t>(OutputSize()) == size + 1,
"Expected " + c10::to_string(size + 1) + ", " +
" got: " + c10::to_string(size));
bool status = queue->blockingWrite(this->Outputs());
Output(size)->Resize();
math::Set<bool, Context>(
1, !status, Output(size)->template mutable_data<bool>(), &context_);
return true;
}
void Cancel() override {
auto queue = Operator<Context>::Inputs()[0]
->template Get<std::shared_ptr<BlobsQueue>>();
queue->close();
}
};
template <typename Context>
class SafeDequeueBlobsOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
using Operator<Context>::Operator;
SafeDequeueBlobsOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
numRecords_(OperatorBase::GetSingleArgument<int>("num_records", 1)) {
CAFFE_ENFORCE_GT(numRecords_, 0);
}
bool dequeueMany(std::shared_ptr<BlobsQueue>& queue) {
auto size = queue->getNumBlobs();
if (blobs_.size() != size) {
blobs_.resize(size);
blobPtrs_.resize(size);
for (auto col : c10::irange(size)) {
blobPtrs_.at(col) = &blobs_.at(col);
}
}
const int kTensorGrowthPct = 40;
for (const auto i : c10::irange(numRecords_)) {
if (!queue->blockingRead(blobPtrs_)) {
// if we read at least one record, status is still true
return i > 0;
}
for (auto col : c10::irange(size)) {
auto* out = this->Output(col);
const auto& in = blobPtrs_.at(col)->template Get<Tensor>();
if (i == 0) {
out->CopyFrom(in);
} else {
auto oldSize = out->numel();
CAFFE_ENFORCE(
in.dim() > 0,
"Empty tensor to dequeue at column ",
col,
" within ",
size,
" total columns");
out->Extend(in.sizes()[0], kTensorGrowthPct);
auto* dst =
(char*)out->raw_mutable_data() + oldSize * in.dtype().itemsize();
context_.template CopyItems<Context, Context>(
in.meta(), in.numel(), in.raw_data(), dst);
}
}
}
return true;
}
bool dequeueOne(std::shared_ptr<BlobsQueue>& queue) {
return queue->blockingRead(this->Outputs());
}
bool RunOnDevice() override {
CAFFE_ENFORCE(InputSize() == 1);
auto queue = Operator<Context>::Inputs()[0]
->template Get<std::shared_ptr<BlobsQueue>>();
CAFFE_ENFORCE(queue);
auto size = queue->getNumBlobs();
CAFFE_ENFORCE_EQ(OutputSize(), size + 1);
bool status = numRecords_ > 1 ? dequeueMany(queue) : dequeueOne(queue);
Output(size)->Resize();
math::Set<bool, Context>(
1, !status, Output(size)->template mutable_data<bool>(), &context_);
return true;
}
void Cancel() override {
auto queue = Operator<Context>::Inputs()[0]
->template Get<std::shared_ptr<BlobsQueue>>();
queue->close();
}
private:
int numRecords_;
std::vector<Blob> blobs_;
std::vector<Blob*> blobPtrs_;
};
template <typename Context>
class WeightedSampleDequeueBlobsOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
WeightedSampleDequeueBlobsOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
table_idx_blob_(
OperatorBase::GetSingleArgument<int>("table_idx_blob", -1)) {
CAFFE_ENFORCE_LT(table_idx_blob_, OutputSize() - 1);
vector<float> weights = OperatorBase::GetRepeatedArgument<float>("weights");
if (weights.empty()) {
weights.resize(InputSize(), 1.0f);
}
CAFFE_ENFORCE_EQ(InputSize(), weights.size());
float sum = accumulate(weights.begin(), weights.end(), 0.0f);
CAFFE_ENFORCE(sum > 0.0f, "Sum of weights must be positive");
cumProbs_.resize(weights.size());
for (auto i : c10::irange(weights.size())) {
cumProbs_[i] = weights[i] / sum;
CAFFE_ENFORCE_GE(
cumProbs_[i], 0.0f, "Each probability must be non-negative");
}
std::partial_sum(cumProbs_.begin(), cumProbs_.end(), cumProbs_.begin());
// Put last value to be 1.0001 to avoid numerical issues.
cumProbs_.back() = 1.0001f;
LOG(INFO) << "Dequeue weights: " << weights;
LOG(INFO) << "cumProbs: " << cumProbs_;
}
bool RunOnDevice() override {
float r;
math::RandUniform<float, Context>(1, 0.0f, 1.0f, &r, &context_);
auto lb = lower_bound(cumProbs_.begin(), cumProbs_.end(), r);
CAFFE_ENFORCE(lb != cumProbs_.end(), "Cannot find ", r, " in cumProbs_.");
const int32_t idx = lb - cumProbs_.begin();
auto queue = Operator<Context>::Inputs()[idx]
->template Get<std::shared_ptr<BlobsQueue>>();
CAFFE_ENFORCE(queue);
auto size = queue->getNumBlobs();
CAFFE_ENFORCE_EQ(OutputSize(), size + 1);
bool status = queue->blockingRead(this->Outputs());
if (table_idx_blob_ >= 0) {
auto* table_idx_blob_out =
Output(table_idx_blob_, {1}, at::dtype<int32_t>());
int32_t* data = table_idx_blob_out->template mutable_data<int32_t>();
data[0] = idx;
}
Output(size)->Resize();
math::Set<bool, Context>(
1, !status, Output(size)->template mutable_data<bool>(), &context_);
return true;
}
private:
vector<float> cumProbs_;
int table_idx_blob_;
};
} // namespace caffe2
| 8,735
| 30.2
| 80
|
h
|
null |
pytorch-main/caffe2/queue/rebatching_queue_ops.h
|
#pragma once
#include "rebatching_queue.h"
#include "c10/util/irange.h"
namespace caffe2 {
using RebatchingQueuePtr = std::unique_ptr<RebatchingQueue>;
class CreateRebatchingQueueOp : public Operator<CPUContext> {
public:
CreateRebatchingQueueOp(const OperatorDef& operator_def, Workspace* ws)
: Operator(operator_def, ws) {}
bool RunOnDevice() override {
*OperatorBase::Output<RebatchingQueuePtr>(0) =
RebatchingQueuePtr(new RebatchingQueue(
OperatorBase::GetSingleArgument<int>("capacity", 1),
OperatorBase::GetSingleArgument<int>("num_blobs", 1)));
return true;
}
};
class EnqueueRebatchingQueueOp : public Operator<CPUContext> {
public:
EnqueueRebatchingQueueOp(const OperatorDef& operator_def, Workspace* ws)
: Operator(operator_def, ws),
enqueueBatch_(
OperatorBase::GetSingleArgument<bool>("enqueue_batch", false)) {}
bool RunOnDevice() override {
auto& queue = Inputs()[0]->template Get<RebatchingQueuePtr>();
CHECK(queue);
CAFFE_ENFORCE_EQ(InputSize(), queue->numBlobs() + 1);
std::vector<const Tensor*> inputTensors;
inputTensors.reserve(InputSize() - 1);
for (const auto i : c10::irange(1, InputSize())) {
inputTensors.push_back(&Input(i));
}
return enqueueBatch_ ? queue->enqueueMany(context_, inputTensors)
: queue->enqueueOne(context_, inputTensors);
}
private:
const bool enqueueBatch_;
};
class DequeueRebatchingQueueOp : public Operator<CPUContext> {
public:
DequeueRebatchingQueueOp(const OperatorDef& operator_def, Workspace* ws)
: Operator(operator_def, ws),
numElements_(OperatorBase::GetSingleArgument<int>("num_elements", 1)) {}
bool RunOnDevice() override {
auto& queue = Inputs()[0]->template Get<RebatchingQueuePtr>();
CHECK(queue);
std::vector<Tensor*> outputTensors;
outputTensors.reserve(OutputSize());
for (const auto i : c10::irange(OutputSize())) {
outputTensors.push_back(Output(i));
}
return queue->dequeue(context_, numElements_, outputTensors);
}
private:
int numElements_;
};
class CloseRebatchingQueueOp : public Operator<CPUContext> {
public:
CloseRebatchingQueueOp(const OperatorDef& operator_def, Workspace* ws)
: Operator(operator_def, ws) {}
bool RunOnDevice() override {
CAFFE_ENFORCE_EQ(InputSize(), 1);
auto& queue = Inputs()[0]->template Get<RebatchingQueuePtr>();
CAFFE_ENFORCE(queue);
queue->close();
return true;
}
};
} // namespace caffe2
| 2,549
| 28.651163
| 80
|
h
|
null |
pytorch-main/caffe2/serialize/file_adapter.h
|
#pragma once
#include <fstream>
#include <memory>
#include <c10/macros/Macros.h>
#include "caffe2/serialize/istream_adapter.h"
#include "caffe2/serialize/read_adapter_interface.h"
namespace caffe2 {
namespace serialize {
class TORCH_API FileAdapter final : public ReadAdapterInterface {
public:
C10_DISABLE_COPY_AND_ASSIGN(FileAdapter);
explicit FileAdapter(const std::string& file_name);
size_t size() const override;
size_t read(uint64_t pos, void* buf, size_t n, const char* what = "")
const override;
~FileAdapter() override;
private:
// An RAII Wrapper for a FILE pointer. Closes on destruction.
struct RAIIFile {
FILE* fp_;
explicit RAIIFile(const std::string& file_name);
~RAIIFile();
};
RAIIFile file_;
// The size of the opened file in bytes
uint64_t size_;
};
} // namespace serialize
} // namespace caffe2
| 866
| 22.432432
| 71
|
h
|
null |
pytorch-main/caffe2/serialize/inline_container.h
|
#pragma once
#include <cerrno>
#include <cstdio>
#include <cstring>
#include <fstream>
#include <istream>
#include <mutex>
#include <ostream>
#include <unordered_set>
#include <c10/core/Allocator.h>
#include <c10/core/Backend.h>
#include "caffe2/serialize/istream_adapter.h"
#include "caffe2/serialize/read_adapter_interface.h"
#include "caffe2/serialize/versions.h"
extern "C" {
typedef struct mz_zip_archive mz_zip_archive;
}
// PyTorch containers are a special zip archive with the following layout
// archive_name.zip contains:
// archive_name/
// version # a file with a single decimal number written in ascii,
// # used to establish the version of the archive format
// model.json # overall model description, this is a json output of
// # ModelDef from torch.proto
// # the following names are by convention only, model.json will
// # refer to these files by full names
// tensors/
// 0 # flat storage for tensor data, meta-data about shapes, etc. is
// # in model.json
// 1
// ...
// # code entries will only exist for modules that have methods attached
// code/
// archive_name.py # serialized torch script code (python syntax, using
// PythonPrint) archive_name_my_submodule.py # submodules have separate
// files
//
// The PyTorchStreamWriter also ensures additional useful properties for these
// files
// 1. All files are stored uncompressed.
// 2. All files in the archive are aligned to 64 byte boundaries such that
// it is possible to mmap the entire file and get an aligned pointer to
// tensor data.
// 3. We universally write in ZIP64 format for consistency.
// The PyTorchStreamReader also provides additional properties:
// 1. It can read zip files that are created with common
// zip tools. This means that even though our writer doesn't compress files,
// the reader can still read files that were compressed.
// 2. It provides a getRecordOffset function which returns the offset into the
// raw file where file data lives. If the file was written with
// PyTorchStreamWriter it is guaranteed to be 64 byte aligned.
// PyTorchReader/Writer handle checking the version number on the archive format
// and ensure that all files are written to a archive_name directory so they
// unzip cleanly.
// When developing this format we want to pay particular attention to the
// following use cases:
//
// -- Reading --
// 1) Reading with full random access
// a) Reading with file api's such as fread()
// b) mmaping the file and jumping around the mapped region
// 2) Reading with 1-pass sequential access
// -> A reader will need to build up a data structure of parsed structures
// as it reads
//
// -- Writing --
// 1) Writing with full random access
// 2) Writing with 1-pass sequential access
// -> We must take care not to require updating values that have already
// been written. We place the variable-length index at the end and do
// not put any indicies into the header to fulfill this constraint.
// The model.json, which contains all the metadata information,
// should be written as the last file. One reason is that the size of tensor
// data is usually stable. As long as the shape and type of the tensor do not
// change, the size of the data won't change. On the other sied, the size of the
// serialized model is likely to change, so we store it as the last record, and
// we don't need to move previous records when updating the model data.
// The zip format is sufficiently flexible to handle the above use-case.
// it puts its central directory at the end of the archive and we write
// model.json as the last file when writing after we have accumulated all
// other information.
namespace caffe2 {
namespace serialize {
static constexpr const char* kSerializationIdRecordName = ".data/serialization_id";
class TORCH_API PyTorchStreamReader final {
public:
explicit PyTorchStreamReader(const std::string& file_name);
explicit PyTorchStreamReader(std::istream* in);
explicit PyTorchStreamReader(std::shared_ptr<ReadAdapterInterface> in);
// return dataptr, size
std::tuple<at::DataPtr, size_t> getRecord(const std::string& name);
// inplace memory writing
size_t getRecord(const std::string& name, void* dst, size_t n);
size_t getRecord(
const std::string& name,
void* dst,
size_t n,
size_t chunk_size,
const std::function<void(void*, const void*, size_t)>& memcpy_func);
size_t getRecordOffset(const std::string& name);
bool hasRecord(const std::string& name);
std::vector<std::string> getAllRecords();
~PyTorchStreamReader();
uint64_t version() const {
return version_;
}
const std::string& serializationId() {
return serialization_id_;
}
void setShouldLoadDebugSymbol(bool should_load_debug_symbol) {
load_debug_symbol_ = should_load_debug_symbol;
}
private:
void init();
size_t read(uint64_t pos, char* buf, size_t n);
void valid(const char* what, const char* info = "");
size_t getRecordID(const std::string& name);
friend size_t
istream_read_func(void* pOpaque, uint64_t file_ofs, void* pBuf, size_t n);
std::unique_ptr<mz_zip_archive> ar_;
std::string archive_name_;
std::string archive_name_plus_slash_;
std::shared_ptr<ReadAdapterInterface> in_;
int64_t version_;
std::mutex reader_lock_;
bool load_debug_symbol_ = true;
std::string serialization_id_;
};
class TORCH_API PyTorchStreamWriter final {
public:
explicit PyTorchStreamWriter(const std::string& archive_name);
explicit PyTorchStreamWriter(
const std::function<size_t(const void*, size_t)> writer_func);
void setMinVersion(const uint64_t version);
void writeRecord(
const std::string& name,
const void* data,
size_t size,
bool compress = false);
void writeEndOfFile();
const std::unordered_set<std::string>& getAllWrittenRecords();
bool finalized() const {
return finalized_;
}
const std::string& archiveName() {
return archive_name_;
}
const std::string& serializationId() {
return serialization_id_;
}
~PyTorchStreamWriter();
private:
void setup(const std::string& file_name);
void valid(const char* what, const char* info = "");
void writeSerializationId();
size_t current_pos_ = 0;
std::unordered_set<std::string> files_written_;
std::unique_ptr<mz_zip_archive> ar_;
std::string archive_name_;
std::string archive_name_plus_slash_;
std::string padding_;
std::ofstream file_stream_;
std::function<size_t(const void*, size_t)> writer_func_;
uint64_t combined_uncomp_crc32_ = 0;
std::string serialization_id_;
// This number will be updated when the model has operators
// that have valid upgraders.
uint64_t version_ = kMinProducedFileFormatVersion;
bool finalized_ = false;
bool err_seen_ = false;
friend size_t ostream_write_func(
void* pOpaque,
uint64_t file_ofs,
const void* pBuf,
size_t n);
};
namespace detail {
// Writer-specific constants
constexpr uint64_t kFieldAlignment = 64;
// Returns a record to be appended to the local user extra data entry in order
// to make data beginning aligned at kFieldAlignment bytes boundary.
size_t getPadding(
size_t cursor,
size_t filename_size,
size_t size,
std::string& padding_buf);
} // namespace detail
} // namespace serialize
} // namespace caffe2
| 7,501
| 32.945701
| 83
|
h
|
null |
pytorch-main/caffe2/serialize/istream_adapter.h
|
#pragma once
#include <istream>
#include "c10/macros/Macros.h"
#include "caffe2/serialize/read_adapter_interface.h"
namespace caffe2 {
namespace serialize {
// this is a reader implemented by std::istream
class TORCH_API IStreamAdapter final : public ReadAdapterInterface {
public:
C10_DISABLE_COPY_AND_ASSIGN(IStreamAdapter);
explicit IStreamAdapter(std::istream* istream);
size_t size() const override;
size_t read(uint64_t pos, void* buf, size_t n, const char* what = "")
const override;
~IStreamAdapter() override;
private:
std::istream* istream_;
void validate(const char* what) const;
};
} // namespace serialize
} // namespace caffe2
| 669
| 22.928571
| 71
|
h
|
null |
pytorch-main/caffe2/serialize/read_adapter_interface.h
|
#pragma once
#include <cstddef>
#include <cstdint>
#include "c10/macros/Macros.h"
namespace caffe2 {
namespace serialize {
// this is the interface for the (file/stream/memory) reader in
// PyTorchStreamReader. with this interface, we can extend the support
// besides standard istream
class TORCH_API ReadAdapterInterface {
public:
virtual size_t size() const = 0;
virtual size_t read(uint64_t pos, void* buf, size_t n, const char* what = "")
const = 0;
virtual ~ReadAdapterInterface();
};
} // namespace serialize
} // namespace caffe2
| 556
| 22.208333
| 79
|
h
|
null |
pytorch-main/caffe2/serialize/versions.h
|
#pragma once
#include <cstdint>
namespace caffe2 {
namespace serialize {
constexpr uint64_t kMinSupportedFileFormatVersion = 0x1L;
constexpr uint64_t kMaxSupportedFileFormatVersion = 0xAL;
// Versions (i.e. why was the version number bumped?)
// Note [Dynamic Versions and torch.jit.save vs. torch.save]
//
// Our versioning scheme has a "produced file format version" which
// describes how an archive is to be read. The version written in an archive
// is at least this current produced file format version, but may be greater
// if it includes certain symbols. We refer to these conditional versions
// as "dynamic," since they are identified at runtime.
//
// Dynamic versioning is useful when an operator's semantics are updated.
// When using torch.jit.save we want those semantics to be preserved. If
// we bumped the produced file format version on every change, however,
// then older versions of PyTorch couldn't read even simple archives, like
// a single tensor, from newer versions of PyTorch. Instead, we
// assign dynamic versions to these changes that override the
// produced file format version as needed. That is, when the semantics
// of torch.div changed it was assigned dynamic version 4, and when
// torch.jit.saving modules that use torch.div those archives also have
// (at least) version 4. This prevents earlier versions of PyTorch
// from accidentally performing the wrong kind of division. Modules
// that don't use torch.div or other operators with dynamic versions
// can write the produced file format version, and these programs will
// run as expected on earlier versions of PyTorch.
//
// While torch.jit.save attempts to preserve operator semantics,
// torch.save does not. torch.save is analogous to pickling Python, so
// a function that uses torch.div will have different behavior if torch.saved
// and torch.loaded across PyTorch versions. From a technical perspective,
// torch.save ignores dynamic versioning.
// 1. Initial version
// 2. Removed op_version_set version numbers
// 3. Added type tags to pickle serialization of container types
// 4. (Dynamic) Stopped integer division using torch.div
// (a versioned symbol preserves the historic behavior of versions 1--3)
// 5. (Dynamic) Stops torch.full inferring a floating point dtype
// when given bool or integer fill values.
// 6. Write version string to `./data/version` instead of `version`.
// [12/15/2021]
// kProducedFileFormatVersion is set to 7 from 3 due to a different
// interpretation of what file format version is.
// Whenever there is new upgrader introduced,
// this number should be bumped.
// The reasons that version is bumped in the past:
// 1. aten::div is changed at version 4
// 2. aten::full is changed at version 5
// 3. torch.package uses version 6
// 4. Introduce new upgrader design and set the version number to 7
// mark this change
// --------------------------------------------------
// We describe new operator version bump reasons here:
// 1) [01/24/2022]
// We bump the version number to 8 to update aten::linspace
// and aten::linspace.out to error out when steps is not
// provided. (see: https://github.com/pytorch/pytorch/issues/55951)
// 2) [01/30/2022]
// Bump the version number to 9 to update aten::logspace and
// and aten::logspace.out to error out when steps is not
// provided. (see: https://github.com/pytorch/pytorch/issues/55951)
// 3) [02/11/2022]
// Bump the version number to 10 to update aten::gelu and
// and aten::gelu.out to support the new approximate kwarg.
// (see: https://github.com/pytorch/pytorch/pull/61439)
constexpr uint64_t kProducedFileFormatVersion = 0xAL;
// Absolute minimum version we will write packages. This
// means that every package from now on will always be
// greater than this number.
constexpr uint64_t kMinProducedFileFormatVersion = 0x3L;
// The version we write when the archive contains bytecode.
// It must be higher or eq to kProducedFileFormatVersion.
// Because torchscript changes is likely introduce bytecode change.
// If kProducedFileFormatVersion is increased, kProducedBytecodeVersion
// should be increased too. The relationship is:
// kMaxSupportedFileFormatVersion >= (most likely ==) kProducedBytecodeVersion
// >= kProducedFileFormatVersion
// If a format change is forward compatible (still readable by older
// executables), we will not increment the version number, to minimize the
// risk of breaking existing clients. TODO: A better way would be to allow
// the caller that creates a model to specify a maximum version that its
// clients can accept.
// Versions:
// 0x1L: Initial version
// 0x2L: (Comment missing)
// 0x3L: (Comment missing)
// 0x4L: (update) Added schema to function tuple. Forward-compatible change.
// 0x5L: (update) Update bytecode is sharing constant tensor files from
// torchscript, and only serialize extra tensors that are not in the
// torchscript constant table. Also update tensor storage schema adapting to
// the unify format, the root key of tensor storage is updated from {index} to
// {the_pointer_value_the_tensor.storage}, for example:
// `140245072983168.storage` Forward-compatibility change.
// 0x6L: Implicit opereator versioning using number of specified argument.
// Refer to the summary of https://github.com/pytorch/pytorch/pull/56845 for
// details.
// 0x7L: Enable support for operators with default arguments plus out
// arguments. Refer. See https://github.com/pytorch/pytorch/pull/63651 for
// details.
// 0x8L: Emit promoted operators as instructions. See
// https://github.com/pytorch/pytorch/pull/71662 for details.
// 0x9L: Change serialization format from pickle to format This version is to
// serve migration. v8 pickle and v9 flatbuffer are the same. Refer to the
// summary of https://github.com/pytorch/pytorch/pull/75201 for more details.
constexpr uint64_t kProducedBytecodeVersion = 0x8L;
// static_assert(
// kProducedBytecodeVersion >= kProducedFileFormatVersion,
// "kProducedBytecodeVersion must be higher or equal to
// kProducedFileFormatVersion.");
// Introduce kMinSupportedBytecodeVersion and kMaxSupportedBytecodeVersion
// for limited backward/forward compatibility support of bytecode. If
// kMinSupportedBytecodeVersion <= model_version <= kMaxSupportedBytecodeVersion
// (in loader), we should support this model_version. For example, we provide a
// wrapper to handle an updated operator.
constexpr uint64_t kMinSupportedBytecodeVersion = 0x4L;
constexpr uint64_t kMaxSupportedBytecodeVersion = 0x9L;
} // namespace serialize
} // namespace caffe2
| 6,648
| 48.619403
| 80
|
h
|
null |
pytorch-main/caffe2/sgd/adadelta_op.h
|
#include "caffe2/core/operator.h"
#include "c10/util/irange.h"
namespace caffe2 {
namespace {
template <typename Context>
void AdadeltaUpdate(
int N,
const float* w,
const float* g,
const float* h,
const float* d,
const float epsilon,
const float decay,
const float* lr,
float* nw,
float* nh,
float* nd,
Context* /*context*/) {
for (const auto i : c10::irange(N)) {
float gi = g[i];
float di = d[i];
float hi = nh[i] = decay * h[i] + (1.0f - decay) * gi * gi;
float ng = (std::sqrt(di + epsilon) / std::sqrt(hi + epsilon)) * gi;
nw[i] = w[i] + lr[0] * ng;
nd[i] = decay * di + (1.0f - decay) * ng * ng;
}
}
} // namespace
template <class Context>
class AdadeltaOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
AdadeltaOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
OP_SINGLE_ARG(float, "epsilon", epsilon_, 1e-5f),
OP_SINGLE_ARG(float, "decay", decay_, 0.95f) {}
bool RunOnDevice() override {
CAFFE_ENFORCE(Input(GRAD).numel() == Input(MOMENT_GRAD).numel());
CAFFE_ENFORCE(Input(GRAD).numel() == Input(MOMENT_DELTA).numel());
CAFFE_ENFORCE(Input(GRAD).numel() == Input(PARAM).numel());
CAFFE_ENFORCE_GE(epsilon_, 0.0f);
CAFFE_ENFORCE_GT(decay_, 0.0f);
CAFFE_ENFORCE_LT(decay_, 1.0f);
Output(OUTPUT_PARAM)->ResizeLike(Input(PARAM));
Output(OUTPUT_MOMENT_GRAD)->ResizeLike(Input(MOMENT_GRAD));
Output(OUTPUT_MOMENT_DELTA)->ResizeLike(Input(MOMENT_DELTA));
AdadeltaUpdate<Context>(
Input(GRAD).numel(),
Input(PARAM).template data<float>(),
Input(GRAD).template data<float>(),
Input(MOMENT_GRAD).template data<float>(),
Input(MOMENT_DELTA).template data<float>(),
epsilon_,
decay_,
Input(LR).template data<float>(),
Output(OUTPUT_PARAM)->template mutable_data<float>(),
Output(OUTPUT_MOMENT_GRAD)->template mutable_data<float>(),
Output(OUTPUT_MOMENT_DELTA)->template mutable_data<float>(),
&context_);
return true;
}
protected:
const float epsilon_;
const float decay_;
INPUT_TAGS(PARAM, MOMENT_GRAD, MOMENT_DELTA, GRAD, LR);
OUTPUT_TAGS(OUTPUT_PARAM, OUTPUT_MOMENT_GRAD, OUTPUT_MOMENT_DELTA);
};
template <class Context>
class SparseAdadeltaOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
SparseAdadeltaOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
OP_SINGLE_ARG(float, "epsilon", epsilon_, 1e-5f),
OP_SINGLE_ARG(float, "decay", decay_, 0.95f) {}
bool RunOnDevice() override {
// Enforce shapes
CAFFE_ENFORCE_EQ(Input(PARAM).numel(), Input(MOMENT_GRAD).numel());
CAFFE_ENFORCE_EQ(Input(PARAM).numel(), Input(MOMENT_DELTA).numel());
CAFFE_ENFORCE_EQ(Input(LR).numel(), 1);
CAFFE_ENFORCE_EQ(
Input(PARAM).size_from_dim(1),
Input(GRAD).size_from_dim(Input(INDICES).dim()));
// Enforce domain constraints for attributes
CAFFE_ENFORCE_GE(epsilon_, 0.0f);
CAFFE_ENFORCE_GT(decay_, 0.0f);
CAFFE_ENFORCE_LT(decay_, 1.0f);
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(
this, Input(INDICES));
}
template <typename SIndex>
bool DoRunWithType() {
const auto* lr = Input(LR).template data<float>();
const auto* indices = Input(INDICES).template data<SIndex>();
const auto* gradIn = Input(GRAD).template data<float>();
const auto* paramIn = Input(PARAM).template data<float>();
const auto* momentIn = Input(MOMENT_GRAD).template data<float>();
const auto* momentDeltaIn = Input(MOMENT_DELTA).template data<float>();
auto* paramOut = Output(OUTPUT_PARAM)->template mutable_data<float>();
auto* momentOut =
Output(OUTPUT_MOMENT_GRAD)->template mutable_data<float>();
auto* momentDeltaOut =
Output(OUTPUT_MOMENT_DELTA)->template mutable_data<float>();
auto n = Input(INDICES).numel();
if (n == 0) {
return true;
}
auto block_size = Input(GRAD).numel() / n;
for (const auto i : c10::irange(n)) {
auto idx = indices[i];
if (block_size == 1) {
float gi = gradIn[i];
float di = momentDeltaIn[idx];
float hi = momentOut[idx] =
decay_ * momentIn[idx] + (1.0f - decay_) * gi * gi;
float ng = (std::sqrt(di + epsilon_) / std::sqrt(hi + epsilon_)) * gi;
paramOut[idx] = paramIn[idx] + lr[0] * ng;
momentDeltaOut[idx] = decay_ * di + (1.0f - decay_) * ng * ng;
} else {
auto offsetI = i * block_size;
auto offsetIdx = idx * block_size;
#ifndef NDEBUG
CAFFE_ENFORCE_GE(
Input(PARAM).numel(),
block_size + offsetIdx,
this->debug_def().input(PARAM),
", out of bound, idx:",
idx,
" for input i:",
i,
" and block size:",
block_size);
CAFFE_ENFORCE_GE(
Input(GRAD).numel(),
block_size + offsetI,
this->debug_def().input(GRAD),
", out of bound idx, idx:",
idx,
" for input i:",
i);
#endif
AdadeltaUpdate(
block_size,
paramIn + offsetIdx,
gradIn + offsetI,
momentIn + offsetIdx,
momentDeltaIn + offsetIdx,
epsilon_,
decay_,
lr,
paramOut + offsetIdx,
momentOut + offsetIdx,
momentDeltaOut + offsetIdx,
&context_);
}
}
return true;
}
protected:
const float epsilon_;
const float decay_;
INPUT_TAGS(PARAM, MOMENT_GRAD, MOMENT_DELTA, INDICES, GRAD, LR);
OUTPUT_TAGS(OUTPUT_PARAM, OUTPUT_MOMENT_GRAD, OUTPUT_MOMENT_DELTA);
};
} // namespace caffe2
| 5,897
| 31.054348
| 78
|
h
|
null |
pytorch-main/caffe2/sgd/adagrad_fused.h
|
#pragma once
#include "caffe2/sgd/adagrad_op.h"
#include "caffe2/sgd/math_lp.h"
namespace caffe2 {
namespace {
template <
typename Tdata, // embedding and momentum types
typename T, // everything else
typename TLengths,
typename adagradT,
bool is_mean = false>
class SparseAdagradFusedWithSparseLengthsSumGradientOp final
: public Operator<CPUContext> {
public:
SparseAdagradFusedWithSparseLengthsSumGradientOp(
const OperatorDef& operator_def,
Workspace* ws)
: Operator<CPUContext>(operator_def, ws),
epsilon_(this->template GetSingleArgument<float>("epsilon", 1e-5)),
weight_decay_(
this->template GetSingleArgument<float>("weight_decay", 0.f)) {
VLOG(1) << "gradient optimization operator in use: "
<< "SparseAdagradFusedWithSparseLengthsSumGradientOp"
<< " weight_decay_=" << weight_decay_;
const T decay = this->template GetSingleArgument<T>("decay", 1.0);
CAFFE_ENFORCE_EQ(
decay, 1.0, "Decay is not supported for SparseSimdAdagradOp");
}
bool RunOnDevice() override {
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(
this, Input(INDICES));
}
template <typename SIndex>
bool DoRunWithType() {
const auto* lr = Input(LR).template data<T>();
Output(OUTPUT_PARAM)->ResizeLike(Input(PARAM));
Output(OUTPUT_MOMENT_1)->ResizeLike(Input(MOMENT_1));
auto& segmentGradsInput = Input(GRAD);
auto& lengthsInput = Input(LENGTHS);
CAFFE_ENFORCE_EQ(lengthsInput.dim(), 1, "LENGTHS must be a vector");
auto numSegments = lengthsInput.size(0);
CAFFE_ENFORCE_GT(segmentGradsInput.dim(), 0);
CAFFE_ENFORCE_EQ(numSegments, segmentGradsInput.size(0));
const auto* lengths = lengthsInput.template data<TLengths>();
auto n = Input(INDICES).numel();
const auto* indices = Input(INDICES).template data<SIndex>();
const auto* gradIn = segmentGradsInput.template data<T>();
const auto* paramIn = Input(PARAM).template data<Tdata>();
const auto* momentIn = Input(MOMENT_1).template data<Tdata>();
auto* paramOut = Output(OUTPUT_PARAM)->template mutable_data<Tdata>();
auto* momentOut = Output(OUTPUT_MOMENT_1)->template mutable_data<Tdata>();
if (numSegments == 0) {
return true;
}
auto block_size = segmentGradsInput.size_from_dim(1);
// Enforce:
// input(embedding/momentum) == outputs(embedding/momentum)
CAFFE_ENFORCE_EQ(
Input(PARAM).numel(),
Input(MOMENT_1).numel(),
"Input Param size: ",
Input(PARAM).numel(),
" Input Moment size: ",
Input(MOMENT_1).numel());
int dataIndex = 0;
if (is_mean) {
grad_buffer_.ResizeLike(Input(GRAD));
}
auto* grad_buffer_data =
is_mean ? grad_buffer_.template mutable_data<T>() : NULL;
if (is_mean) {
for (const auto rangeIndex : c10::irange(numSegments)) {
for (const auto tmpIndex : c10::irange(block_size)) {
auto offsetI = rangeIndex * block_size;
grad_buffer_data[offsetI + tmpIndex] = lengths[rangeIndex] > 0
? gradIn[offsetI + tmpIndex] / lengths[rangeIndex]
: gradIn[offsetI + tmpIndex];
}
}
}
for (const auto rangeIndex : c10::irange(numSegments)) {
for (auto start = dataIndex; dataIndex < start + lengths[rangeIndex];
++dataIndex) {
std::size_t idx = indices[dataIndex];
auto offsetI = rangeIndex * block_size;
auto offsetIdx = idx * block_size;
// Enforce:
// access within range
// gradient access within range
CAFFE_ENFORCE_GE(
Input(PARAM).numel(),
block_size + offsetIdx,
this->debug_def().input(PARAM),
", out of bound, idx:",
idx,
" for input dataIndex:",
dataIndex,
" and block size:",
block_size,
" max size:",
Input(PARAM).numel());
if (block_size == 1) {
float gi = std::fma(
weight_decay_,
paramIn[idx],
is_mean ? grad_buffer_data[offsetI] : gradIn[offsetI]);
float hi = momentOut[idx] = momentIn[idx] + gi * gi;
paramOut[idx] =
paramIn[idx] + lr[0] * gi / (std::sqrt(hi) + epsilon_);
} else {
// prefetching
const int prefdist_T0 = 16;
int i_pref = (dataIndex < n - prefdist_T0) ? dataIndex + prefdist_T0
: dataIndex;
std::size_t idx_pref = indices[i_pref];
kernel_(
block_size,
paramIn + offsetIdx,
¶mIn[idx_pref * block_size],
is_mean ? grad_buffer_data + offsetI : gradIn + offsetI,
momentIn + offsetIdx,
&momentIn[idx_pref * block_size],
paramOut + offsetIdx,
¶mOut[idx_pref * block_size],
momentOut + offsetIdx,
&momentOut[idx_pref * block_size],
epsilon_,
lr[0],
weight_decay_);
}
}
}
CAFFE_ENFORCE_EQ(dataIndex, n);
return true;
}
protected:
T epsilon_;
T weight_decay_;
adagradT kernel_;
Tensor grad_buffer_{CPU};
INPUT_TAGS(PARAM, MOMENT_1, INDICES, GRAD, LR, LENGTHS);
OUTPUT_TAGS(OUTPUT_PARAM, OUTPUT_MOMENT_1);
};
template <typename Tdata, typename T, typename TLengths, typename adagradT>
class SparseAdagradFusedWithSparseLengthsWeightedSumGradientOp final
: public Operator<CPUContext> {
public:
SparseAdagradFusedWithSparseLengthsWeightedSumGradientOp(
const OperatorDef& operator_def,
Workspace* ws)
: Operator<CPUContext>(operator_def, ws),
epsilon_(this->template GetSingleArgument<float>("epsilon", 1e-5)),
weight_decay_(
this->template GetSingleArgument<float>("weight_decay", 0.f)) {
VLOG(1) << "gradient optimization operator in use: "
<< "SparseAdagradFusedWithSparseLengthsWeightedSumGradientOp";
const T decay = this->template GetSingleArgument<T>("decay", 1.0);
CAFFE_ENFORCE_EQ(
decay, 1.0, "Decay is not supported for SparseSimdAdagradOp");
}
bool RunOnDevice() override {
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(
this, Input(INDICES));
}
template <typename SIndex>
bool DoRunWithType() {
const auto* lr = Input(LR).template data<T>();
Output(OUTPUT_PARAM)->ResizeLike(Input(PARAM));
Output(OUTPUT_MOMENT_1)->ResizeLike(Input(MOMENT_1));
auto& segmentGradsInput = Input(GRAD);
auto& lengthsInput = Input(LENGTHS);
CAFFE_ENFORCE_EQ(lengthsInput.dim(), 1, "LENGTHS must be a vector");
auto numSegments = lengthsInput.size(0);
CAFFE_ENFORCE_GT(segmentGradsInput.dim(), 0);
CAFFE_ENFORCE_EQ(numSegments, segmentGradsInput.size(0));
const auto* lengths = lengthsInput.template data<TLengths>();
auto n = Input(INDICES).numel();
const auto* indices = Input(INDICES).template data<SIndex>();
const auto* gradIn = segmentGradsInput.template data<T>();
const auto* paramIn = Input(PARAM).template data<Tdata>();
const auto* momentIn = Input(MOMENT_1).template data<Tdata>();
const auto* auxParamIn = Input(AUX_PARAM).template data<T>();
auto* paramOut = Output(OUTPUT_PARAM)->template mutable_data<Tdata>();
auto* momentOut = Output(OUTPUT_MOMENT_1)->template mutable_data<Tdata>();
Output(AUX_GRAD)->Resize(n);
auto* auxGrad = Output(AUX_GRAD)->template mutable_data<T>();
if (numSegments == 0) {
return true;
}
auto block_size = segmentGradsInput.size_from_dim(1);
// Enforce:
// input(embedding/momentum) == outputs(embedding/momentum)
CAFFE_ENFORCE_EQ(
Input(PARAM).numel(),
Input(MOMENT_1).numel(),
"Input Param size: ",
Input(PARAM).numel(),
" Input Moment size: ",
Input(MOMENT_1).numel());
// Cannot fuse this loop with the loop below because paramIn is updated
// by the second loop. Specifically, there could be dataIndex1 != dataIndex2
// s.t. indices[dataIndex1] == indices[dataIndex2], and fusing these two
// loops would violate dependencies w.r.t.
// paramIn[indices[dataIndex1]:block_size] The approximate version.
// (RowWiseSparseSimdAdagradFusedWithSparseLengthsWeightedSumGradientApproxOp)
// ignores this dependency and fuses these two loops.
std::vector<T> temp_grad(block_size);
int dataIndex = 0;
for (const auto rangeIndex : c10::irange(numSegments)) {
for (auto start = dataIndex; dataIndex < start + lengths[rangeIndex];
++dataIndex) {
std::size_t idx = indices[dataIndex];
auto offsetI = rangeIndex * block_size;
auto offsetIdx = idx * block_size;
// Enforce:
// access within range
// gradient access within range
CAFFE_ENFORCE_GE(
Input(PARAM).numel(),
block_size + offsetIdx,
this->debug_def().input(PARAM),
", out of bound, idx:",
idx,
" for input dataIndex:",
dataIndex,
" and block size:",
block_size,
" max size:",
Input(PARAM).numel());
internal::dot<T, Tdata, T>(
block_size,
gradIn + offsetI,
paramIn + offsetIdx,
auxGrad + dataIndex,
&context_);
}
}
CAFFE_ENFORCE_EQ(dataIndex, n);
dataIndex = 0;
for (const auto rangeIndex : c10::irange(numSegments)) {
for (auto start = dataIndex; dataIndex < start + lengths[rangeIndex];
++dataIndex) {
std::size_t idx = indices[dataIndex];
auto offsetI = rangeIndex * block_size;
auto offsetIdx = idx * block_size;
auto localOffset = dataIndex - start;
for (const auto i : c10::irange(block_size)) {
temp_grad[i] = auxParamIn[localOffset] * gradIn[offsetI + i];
}
if (block_size == 1) {
float gi = std::fma(weight_decay_, paramIn[idx], temp_grad[0]);
float hi = momentOut[idx] = momentIn[idx] + gi * gi;
paramOut[idx] =
paramIn[idx] + lr[0] * gi / (std::sqrt(hi) + epsilon_);
} else {
// prefetching
const int prefdist_T0 = 16;
int i_pref = (dataIndex < n - prefdist_T0) ? dataIndex + prefdist_T0
: dataIndex;
std::size_t idx_pref = indices[i_pref];
kernel_(
block_size,
paramIn + offsetIdx,
¶mIn[idx_pref * block_size],
temp_grad.data(),
momentIn + offsetIdx,
&momentIn[idx_pref * block_size],
paramOut + offsetIdx,
¶mOut[idx_pref * block_size],
momentOut + offsetIdx,
&momentOut[idx_pref * block_size],
epsilon_,
lr[0],
weight_decay_);
}
}
}
return true;
}
protected:
T epsilon_;
T weight_decay_;
adagradT kernel_;
INPUT_TAGS(PARAM, MOMENT_1, AUX_PARAM, INDICES, GRAD, LR, LENGTHS);
OUTPUT_TAGS(OUTPUT_PARAM, OUTPUT_MOMENT_1, AUX_GRAD);
};
template <
typename Tdata, // embedding and momentum types
typename T, // everything else
typename TLengths,
typename adagradT>
class SparseAdagradFusedWithSparseLengthsWeightedSumGradientApproxOp final
: public Operator<CPUContext> {
public:
SparseAdagradFusedWithSparseLengthsWeightedSumGradientApproxOp(
const OperatorDef& operator_def,
Workspace* ws)
: Operator<CPUContext>(operator_def, ws),
epsilon_(this->template GetSingleArgument<float>("epsilon", 1e-5)),
weight_decay_(
this->template GetSingleArgument<float>("weight_decay", 0.f)) {
VLOG(1) << "gradient optimization operator in use: "
<< "SparseAdagradFusedWithSparseLengthsWeightedSumGradientApproxOp";
const T decay = this->template GetSingleArgument<T>("decay", 1.0);
CAFFE_ENFORCE_EQ(
decay, 1.0, "Decay is not supported for SparseSimdAdagradOp");
}
bool RunOnDevice() override {
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(
this, Input(INDICES));
}
template <typename SIndex>
bool DoRunWithType() {
const auto* lr = Input(LR).template data<T>();
Output(OUTPUT_PARAM)->ResizeLike(Input(PARAM));
Output(OUTPUT_MOMENT_1)->ResizeLike(Input(MOMENT_1));
auto& segmentGradsInput = Input(GRAD);
auto& lengthsInput = Input(LENGTHS);
CAFFE_ENFORCE_EQ(lengthsInput.dim(), 1, "LENGTHS must be a vector");
auto numSegments = lengthsInput.size(0);
CAFFE_ENFORCE_GT(segmentGradsInput.dim(), 0);
CAFFE_ENFORCE_EQ(numSegments, segmentGradsInput.size(0));
const auto* lengths = lengthsInput.template data<TLengths>();
auto n = Input(INDICES).numel();
const auto* indices = Input(INDICES).template data<SIndex>();
const auto* gradIn = segmentGradsInput.template data<T>();
const auto* paramIn = Input(PARAM).template data<Tdata>();
const auto* momentIn = Input(MOMENT_1).template data<Tdata>();
const auto* auxParamIn = Input(AUX_PARAM).template data<T>();
auto* paramOut = Output(OUTPUT_PARAM)->template mutable_data<Tdata>();
auto* momentOut = Output(OUTPUT_MOMENT_1)->template mutable_data<Tdata>();
Output(AUX_GRAD)->Resize(n);
auto* auxGrad = Output(AUX_GRAD)->template mutable_data<T>();
if (numSegments == 0) {
return true;
}
auto block_size = segmentGradsInput.size_from_dim(1);
// Enforce:
// input(embedding/momentum) == outputs(embedding/momentum)
CAFFE_ENFORCE_EQ(
Input(PARAM).numel(),
Input(MOMENT_1).numel(),
"Input Param size: ",
Input(PARAM).numel(),
" Input Moment size: ",
Input(MOMENT_1).numel());
std::vector<T> temp_grad(block_size);
int dataIndex = 0;
for (const auto rangeIndex : c10::irange(numSegments)) {
for (auto start = dataIndex; dataIndex < start + lengths[rangeIndex];
++dataIndex) {
std::size_t idx = indices[dataIndex];
auto offsetI = rangeIndex * block_size;
auto offsetIdx = idx * block_size;
auto localOffset = dataIndex - start;
// Enforce:
// access within range
// gradient access within range
CAFFE_ENFORCE_GE(
Input(PARAM).numel(),
block_size + offsetIdx,
this->debug_def().input(PARAM),
", out of bound, idx:",
idx,
" for input dataIndex:",
dataIndex,
" and block size:",
block_size,
" max size:",
Input(PARAM).numel());
internal::dot<T, Tdata, T>(
block_size,
gradIn + offsetI,
paramIn + offsetIdx,
auxGrad + dataIndex,
&context_);
for (const auto i : c10::irange(block_size)) {
temp_grad[i] = auxParamIn[localOffset] * gradIn[offsetI + i];
}
if (block_size == 1) {
float gi = std::fma(weight_decay_, paramIn[idx], temp_grad[0]);
float hi = momentOut[idx] = momentIn[idx] + gi * gi;
paramOut[idx] =
paramIn[idx] + lr[0] * gi / (std::sqrt(hi) + epsilon_);
} else {
// prefetching
const int prefdist_T0 = 16;
int i_pref = (dataIndex < n - prefdist_T0) ? dataIndex + prefdist_T0
: dataIndex;
std::size_t idx_pref = indices[i_pref];
kernel_(
block_size,
paramIn + offsetIdx,
¶mIn[idx_pref * block_size],
temp_grad.data(),
momentIn + offsetIdx,
&momentIn[idx_pref * block_size],
paramOut + offsetIdx,
¶mOut[idx_pref * block_size],
momentOut + offsetIdx,
&momentOut[idx_pref * block_size],
epsilon_,
lr[0],
weight_decay_);
}
}
}
CAFFE_ENFORCE_EQ(dataIndex, n);
return true;
}
protected:
T epsilon_;
T weight_decay_;
adagradT kernel_;
INPUT_TAGS(PARAM, MOMENT_1, AUX_PARAM, INDICES, GRAD, LR, LENGTHS);
OUTPUT_TAGS(OUTPUT_PARAM, OUTPUT_MOMENT_1, AUX_GRAD);
};
} // namespace
} // namespace caffe2
| 16,586
| 32.374245
| 82
|
h
|
null |
pytorch-main/caffe2/sgd/adagrad_op.h
|
#pragma once
#include "caffe2/core/operator.h"
#include "caffe2/perfkernels/adagrad.h"
#if defined(USE_FBGEMM) && !defined(__NVCC__)
#include "fbgemm/FbgemmEmbedding.h"
#endif
namespace caffe2 {
template <typename Context>
void adagrad_update(
int N,
const float* w,
const float* g,
const float* h,
float* nw,
float* nh,
float epsilon,
float decay,
const float* lr,
Context* /*context*/,
float weight_decay = 0.f) {
return adagrad_update(
N, w, g, h, nw, nh, epsilon, decay, lr[0], weight_decay);
}
template <typename Context>
void adagrad_update_output_effective_lr(
int N,
const float* paramIn,
const float* gradIn,
const float* momentIn,
float* paramOut,
float* momentOut,
float* effectiveLROut,
float epsilon,
float decay,
const float* lr,
Context* /*context*/,
float weight_decay = 0.f) {
for (const auto i : c10::irange(N)) {
float grad = std::fma(weight_decay, paramIn[i], gradIn[i]);
float moment = momentOut[i] = decay * momentIn[i] + grad * grad;
float effective_lr = effectiveLROut[i] =
lr[0] / (std::sqrt(moment) + epsilon);
paramOut[i] = paramIn[i] + effective_lr * grad;
}
}
template <typename Context>
void adagrad_update_output_effective_lr_and_update(
int N,
const float* paramIn,
const float* gradIn,
const float* momentIn,
float* paramOut,
float* momentOut,
float* effectiveLROut,
float* updateOut,
float epsilon,
float decay,
const float* lr,
Context* /*context*/,
float weight_decay = 0.f) {
for (const auto i : c10::irange(N)) {
float grad = std::fma(weight_decay, paramIn[i], gradIn[i]);
float moment = momentOut[i] = decay * momentIn[i] + grad * grad;
float effective_lr = effectiveLROut[i] =
lr[0] / (std::sqrt(moment) + epsilon);
float update = updateOut[i] = effective_lr * grad;
paramOut[i] = paramIn[i] + update;
}
}
template <class Context>
class AdagradOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
AdagradOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
epsilon_(this->template GetSingleArgument<float>("epsilon", 1e-5f)),
decay_(this->template GetSingleArgument<float>("decay", 1.0f)),
weight_decay_(
this->template GetSingleArgument<float>("weight_decay", 0.f)) {
VLOG(1) << "gradient optimization operator in use: "
<< "AdagradOp"
<< " weight_decay_=" << weight_decay_;
}
bool RunOnDevice() override {
CAFFE_ENFORCE_EQ(
Input(GRAD).numel(),
Input(MOMENT_1).numel(),
"PARAM size: ",
Input(PARAM).numel(),
", GRAD size: ",
Input(GRAD).numel(),
", MOMENT_1 size: ",
Input(MOMENT_1).numel(),
", LR size: ",
Input(LR).numel());
CAFFE_ENFORCE_EQ(Input(GRAD).numel(), Input(PARAM).numel());
Output(OUTPUT_PARAM)->ResizeLike(Input(PARAM));
Output(OUTPUT_MOMENT_1)->ResizeLike(Input(MOMENT_1));
if (OutputSize() == 2) {
adagrad_update<Context>(
Input(GRAD).numel(),
Input(PARAM).template data<float>(),
Input(GRAD).template data<float>(),
Input(MOMENT_1).template data<float>(),
Output(OUTPUT_PARAM)->template mutable_data<float>(),
Output(OUTPUT_MOMENT_1)->template mutable_data<float>(),
epsilon_,
decay_,
Input(LR).template data<float>(),
&context_,
weight_decay_);
} else if (OutputSize() == 3) {
Output(OUTPUT_EFFECTIVE_LR)->ResizeLike(Input(GRAD));
adagrad_update_output_effective_lr<Context>(
Input(GRAD).numel(),
Input(PARAM).template data<float>(),
Input(GRAD).template data<float>(),
Input(MOMENT_1).template data<float>(),
Output(OUTPUT_PARAM)->template mutable_data<float>(),
Output(OUTPUT_MOMENT_1)->template mutable_data<float>(),
Output(OUTPUT_EFFECTIVE_LR)->template mutable_data<float>(),
epsilon_,
decay_,
Input(LR).template data<float>(),
&context_,
weight_decay_);
} else {
Output(OUTPUT_EFFECTIVE_LR)->ResizeLike(Input(GRAD));
Output(OUTPUT_UPDATE)->ResizeLike(Input(GRAD));
adagrad_update_output_effective_lr_and_update<Context>(
Input(GRAD).numel(),
Input(PARAM).template data<float>(),
Input(GRAD).template data<float>(),
Input(MOMENT_1).template data<float>(),
Output(OUTPUT_PARAM)->template mutable_data<float>(),
Output(OUTPUT_MOMENT_1)->template mutable_data<float>(),
Output(OUTPUT_EFFECTIVE_LR)->template mutable_data<float>(),
Output(OUTPUT_UPDATE)->template mutable_data<float>(),
epsilon_,
decay_,
Input(LR).template data<float>(),
&context_,
weight_decay_);
}
return true;
}
protected:
float epsilon_;
float decay_;
float weight_decay_;
INPUT_TAGS(PARAM, MOMENT_1, GRAD, LR);
OUTPUT_TAGS(
OUTPUT_PARAM,
OUTPUT_MOMENT_1,
OUTPUT_EFFECTIVE_LR,
OUTPUT_UPDATE);
};
class SparseAdagradOp final : public Operator<CPUContext> {
public:
SparseAdagradOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<CPUContext>(operator_def, ws),
epsilon_(this->template GetSingleArgument<float>("epsilon", 1e-5f)),
weight_decay_(
this->template GetSingleArgument<float>("weight_decay", 0.f)) {
VLOG(1) << "gradient optimization operator in use: "
<< "SparseAdagradOp"
<< " weight_decay_=" << weight_decay_;
const float decay = this->template GetSingleArgument<float>("decay", 1.0);
CAFFE_ENFORCE_EQ(
decay, 1.0, "Decay is not supported for SparseSimdAdagradOp");
}
bool RunOnDevice() override {
// Enforce shapes
// input(embedding/momentum) == outputs(embedding/momentum)
CAFFE_ENFORCE_EQ(
Input(PARAM).numel(),
Input(MOMENT_1).numel(),
"Input Param size: ",
Input(PARAM).numel(),
" Input Moment size: ",
Input(MOMENT_1).numel());
CAFFE_ENFORCE_EQ(Input(LR).numel(), 1);
CAFFE_ENFORCE_EQ(
Input(PARAM).size_from_dim(1),
Input(GRAD).size_from_dim(Input(INDICES).dim()));
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(
this, Input(INDICES));
}
template <typename SIndex>
bool DoRunWithType() {
const auto* lr = Input(LR).template data<float>();
auto n = Input(INDICES).numel();
const auto* indices = Input(INDICES).template data<SIndex>();
const auto* gradIn = Input(GRAD).template data<float>();
auto* paramOut = Output(OUTPUT_PARAM)->template mutable_data<float>();
auto* momentOut = Output(OUTPUT_MOMENT_1)->template mutable_data<float>();
if (n == 0) {
return true;
}
auto block_size = Input(GRAD).numel() / n;
// input(grad) is compatible with size of indexes
CAFFE_ENFORCE_EQ(
Input(GRAD).numel() % n,
0,
"Incorrect gradient size:",
Input(GRAD).numel(),
" size of indexes:",
n);
#if defined(USE_FBGEMM) && !defined(__NVCC__)
VLOG(1) << "using fbgemm::GenerateSparseAdaGrad in SparseAdagradOp";
if (block_size != last_block_size_) {
last_block_size_ = block_size;
if (std::is_same<SIndex, std::int32_t>::value) {
kernel_i32_ = fbgemm::GenerateSparseAdaGrad<std::int32_t>(
block_size,
/*rowwise=*/false,
/*prefetch=*/16,
weight_decay_ != 0.0f);
} else {
CAFFE_ENFORCE((std::is_same<SIndex, std::int64_t>::value));
kernel_i64_ = fbgemm::GenerateSparseAdaGrad<std::int64_t>(
block_size,
/*rowwise=*/false,
/*prefetch=*/16,
weight_decay_ != 0.0f);
}
}
int num_rows_processed;
if (std::is_same<SIndex, std::int32_t>::value) {
num_rows_processed = kernel_i32_(
n,
Input(PARAM).numel(),
paramOut,
gradIn,
momentOut,
reinterpret_cast<const std::int32_t*>(indices),
epsilon_,
lr[0],
weight_decay_,
/*counter=*/nullptr,
/*counter_halflife=*/0);
} else {
num_rows_processed = kernel_i64_(
n,
Input(PARAM).numel(),
paramOut,
gradIn,
momentOut,
reinterpret_cast<const std::int64_t*>(indices),
epsilon_,
lr[0],
weight_decay_,
/*counter=*/nullptr,
/*counter_halflife=*/0);
}
if (num_rows_processed < n) {
CAFFE_ENFORCE_GE(
Input(PARAM).numel(),
(indices[num_rows_processed] + 1) * block_size,
this->debug_def().input(PARAM),
", out of bound, idx:",
indices[num_rows_processed],
" for input i:",
num_rows_processed,
" and block_size:",
block_size,
" max size:",
Input(PARAM).numel());
return false;
} else {
return true;
}
#endif
VLOG(1)
<< "using internal::adagrad_update_prefetch_inlined in SparseAdagradOp";
const auto* paramIn = Input(PARAM).template data<float>();
const auto* momentIn = Input(MOMENT_1).template data<float>();
std::vector<float> grad(block_size);
for (const auto i : c10::irange(n)) {
auto idx = indices[i];
auto offsetI = i * block_size;
auto offsetIdx = idx * block_size;
// Enforce:
// access within range
// gradient access within range
CAFFE_ENFORCE_GE(
Input(PARAM).numel(),
block_size + offsetIdx,
this->debug_def().input(PARAM),
", out of bound, idx:",
idx,
" for input i:",
i,
" and block size:",
block_size,
" max size:",
Input(PARAM).numel());
if (block_size == 1) {
float gi = std::fma(weight_decay_, paramIn[idx], gradIn[i]);
float hi = momentOut[idx] = momentIn[idx] + gi * gi;
paramOut[idx] = paramIn[idx] + lr[0] * gi / (std::sqrt(hi) + epsilon_);
} else {
// prefetching
const int prefdist_T0 = 16;
int i_pref = (i < n - prefdist_T0) ? i + prefdist_T0 : i;
std::size_t idx_pref = indices[i_pref];
internal::adagrad_update_prefetch_inlined(
block_size,
paramIn + offsetIdx,
¶mIn[idx_pref * block_size],
gradIn + offsetI,
momentIn + offsetIdx,
&momentIn[idx_pref * block_size],
paramOut + offsetIdx,
¶mOut[idx_pref * block_size],
momentOut + offsetIdx,
&momentOut[idx_pref * block_size],
epsilon_,
lr[0],
weight_decay_);
}
}
return true;
}
protected:
float epsilon_;
const float weight_decay_;
#if defined(USE_FBGEMM) && !defined(__NVCC__)
fbgemm::SparseAdaGradSignature<std::int32_t>::Type kernel_i32_;
fbgemm::SparseAdaGradSignature<std::int64_t>::Type kernel_i64_;
std::int64_t last_block_size_{-1};
#endif
INPUT_TAGS(PARAM, MOMENT_1, INDICES, GRAD, LR);
OUTPUT_TAGS(OUTPUT_PARAM, OUTPUT_MOMENT_1);
};
template <class Context>
class RowWiseSparseAdagradOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
RowWiseSparseAdagradOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
epsilon_(this->template GetSingleArgument<float>("epsilon", 1e-5f)),
weight_decay_(
this->template GetSingleArgument<float>("weight_decay", 0.f)),
counter_halflife_(
this->template GetSingleArgument<int64_t>("counter_halflife", -1)) {
VLOG(1) << "gradient optimization operator in use: "
<< "RowWiseSparseAdagradOp"
<< " weight_decay_=" << weight_decay_
<< " counter_halflife=" << counter_halflife_;
}
bool RunOnDevice() override {
// Enforce shapes
CAFFE_ENFORCE_EQ(Input(PARAM).sizes()[0], Input(MOMENT_1).numel());
CAFFE_ENFORCE_EQ(Input(LR).numel(), 1);
CAFFE_ENFORCE_EQ(
Input(PARAM).size_from_dim(1),
Input(GRAD).size_from_dim(Input(INDICES).dim()));
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(
this, Input(INDICES));
}
template <typename SIndex>
bool DoRunWithType() {
const auto* lr = Input(LR).template data<float>();
auto* param = Output(OUTPUT_PARAM)->template mutable_data<float>();
auto* moment = Output(OUTPUT_MOMENT_1)->template mutable_data<float>();
const auto* indices = Input(INDICES).template data<SIndex>();
const auto* gradIn = Input(GRAD).template data<float>();
const auto* count = counter_halflife_ == -1
? nullptr
: Input(COUNTER).template data<double>();
auto n = Input(INDICES).numel();
if (n == 0) {
return true;
}
auto block_size = Input(GRAD).numel() / n;
// Enforce:
// Input(embedding/momentum) == outputs(embedding/momentum)
CAFFE_ENFORCE_EQ(
Input(PARAM).numel() / block_size,
Input(MOMENT_1).numel(),
"Input Param size: ",
Input(PARAM).numel(),
" Block size: ",
block_size,
" Input Moment size: ",
Input(MOMENT_1).numel());
// input(grad) is compatible with size of indexes
CAFFE_ENFORCE_EQ(
Input(GRAD).numel() % n,
0,
"Incorrect gradient size:",
Input(GRAD).numel(),
" size of indexes:",
n);
#if defined(USE_FBGEMM) && !defined(__NVCC__)
VLOG(1) << "using fbgemm::GenerateSparseAdaGrad in RowWiseSparseAdagradOp";
if (block_size != last_block_size_) {
last_block_size_ = block_size;
if (std::is_same<SIndex, std::int32_t>::value) {
kernel_i32_ = fbgemm::GenerateSparseAdaGrad<std::int32_t>(
block_size,
/*rowwise=*/true,
/*prefetch=*/16,
weight_decay_ != 0.0f);
} else {
CAFFE_ENFORCE((std::is_same<SIndex, std::int64_t>::value));
kernel_i64_ = fbgemm::GenerateSparseAdaGrad<std::int64_t>(
block_size,
/*rowwise=*/true,
/*prefetch=*/16,
weight_decay_ != 0.0f);
}
}
int num_rows_processed;
if (std::is_same<SIndex, std::int32_t>::value) {
num_rows_processed = kernel_i32_(
n,
Input(PARAM).numel(),
param,
gradIn,
moment,
reinterpret_cast<const std::int32_t*>(indices),
epsilon_,
lr[0],
weight_decay_,
(counter_halflife_ > 0) ? count : nullptr,
counter_halflife_);
} else {
num_rows_processed = kernel_i64_(
n,
Input(PARAM).numel(),
param,
gradIn,
moment,
reinterpret_cast<const std::int64_t*>(indices),
epsilon_,
lr[0],
weight_decay_,
(counter_halflife_ > 0) ? count : nullptr,
counter_halflife_);
}
if (num_rows_processed < n) {
// Enforce:
// access within range
CAFFE_ENFORCE_GE(
Input(PARAM).numel(),
(indices[num_rows_processed] + 1) * block_size,
this->debug_def().input(PARAM),
", out of bound, idx:",
indices[num_rows_processed],
" for input i:",
num_rows_processed,
" and block size:",
block_size,
" max size:",
Input(PARAM).numel());
return false;
} else {
return true;
}
#else
VLOG(1) << "using plain adagrad updates in RowWiseSparseAdagradOp";
for (const auto i : c10::irange(n)) {
auto idx = indices[i];
float freq = (counter_halflife_ > 0 && count[idx] > 0)
? counter_halflife_ / count[idx]
: 1.0;
if (block_size == 1) {
float gi = std::fma(weight_decay_ * freq, param[idx], gradIn[i]);
float hi = moment[idx] = moment[idx] + gi * gi;
param[idx] = param[idx] + lr[0] * gi / (std::sqrt(hi) + epsilon_);
} else {
auto offsetI = i * block_size;
auto offsetIdx = idx * block_size;
#ifndef NDEBUG
CAFFE_ENFORCE_GE(
Input(PARAM).numel(),
block_size + offsetIdx,
this->debug_def().input(PARAM),
", out of bound, idx:",
idx,
" for input i:",
i,
" and block size:",
block_size);
CAFFE_ENFORCE_GE(
Input(GRAD).numel(),
block_size + offsetI,
this->debug_def().input(GRAD),
", out of bound idx, idx:",
idx,
" for input i:",
i);
#endif
float* w = param + offsetIdx;
const float* g = gradIn + offsetI;
float* h = moment + idx;
float hs = 0.;
for (const auto j : c10::irange(block_size)) {
float gj = std::fma(weight_decay_ * freq, w[j], g[j]);
hs += gj * gj;
}
float hi = h[0] = h[0] + hs / block_size;
float step = lr[0] / (std::sqrt(hi) + epsilon_);
for (const auto j : c10::irange(block_size)) {
float gj = std::fma(weight_decay_ * freq, w[j], g[j]);
w[j] = w[j] + gj * step;
}
}
}
return true;
#endif // !USE_FBGEMM
}
protected:
float epsilon_;
const float weight_decay_;
const int64_t counter_halflife_;
#if defined(USE_FBGEMM) && !defined(__NVCC__)
fbgemm::SparseAdaGradSignature<std::int32_t>::Type kernel_i32_;
fbgemm::SparseAdaGradSignature<std::int64_t>::Type kernel_i64_;
std::int64_t last_block_size_{-1};
#endif
INPUT_TAGS(PARAM, MOMENT_1, INDICES, GRAD, LR, COUNTER);
OUTPUT_TAGS(OUTPUT_PARAM, OUTPUT_MOMENT_1);
};
} // namespace caffe2
| 18,035
| 30.366957
| 80
|
h
|
null |
pytorch-main/caffe2/sgd/clip_tensor_op.h
|
#ifndef CAFFE2_OPERATORS_CLIP_TENSOR_OP_H_
#define CAFFE2_OPERATORS_CLIP_TENSOR_OP_H_
#include <vector>
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/core/tensor.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <typename Context>
class ClipTensorByScalingOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
ClipTensorByScalingOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws) {
threshold_ = this->template GetSingleArgument<float>("threshold", 0.0);
CAFFE_ENFORCE_GT(threshold_, 0, "Threshold must be greater than 0");
}
bool RunOnDevice() override {
const auto& input_tensor = Input(0);
CAFFE_ENFORCE_GT(input_tensor.numel(), 0);
const auto& val = Input(1);
CAFFE_ENFORCE_EQ(val.numel(), 1);
const auto* input_tensor_data = input_tensor.template data<float>();
const auto* val_data = val.template data<float>();
auto* clipped = Output(0, input_tensor.sizes(), at::dtype<float>());
float* clipped_tensor_data = clipped->template mutable_data<float>();
if (InputSize() > 2) {
const auto& additional_threshold = Input(2);
CAFFE_ENFORCE_EQ(additional_threshold.numel(), 1);
threshold_ *= *(additional_threshold.template data<float>());
}
if (*val_data > threshold_) {
float ratio = threshold_ / *val_data;
math::Scale<float, float, Context>(
clipped->numel(),
ratio,
input_tensor_data,
clipped_tensor_data,
&context_);
} else {
if (input_tensor_data != clipped_tensor_data) {
clipped->CopyFrom(input_tensor, /*async*/ true);
}
}
return true;
}
private:
float threshold_;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_CLIP_TENSOR_OP_H_
| 1,853
| 26.671642
| 75
|
h
|
null |
pytorch-main/caffe2/sgd/decay_adagrad_op.h
|
#pragma once
#include "caffe2/core/operator.h"
#include "caffe2/utils/eigen_utils.h"
namespace caffe2 {
template <typename Context>
void decay_adagrad_compute(
int N,
const float* w,
const float* g,
const float* m,
const float* v,
float* nw,
float* nm,
float* nv,
float beta1,
float beta2,
float eps_hat,
float weight_decay,
float c,
const float* lr,
Context* /*context*/) {
ConstEigenVectorArrayMap<float> w_arr(w, N);
ConstEigenVectorArrayMap<float> g_arr(g, N);
ConstEigenVectorArrayMap<float> m_arr(m, N);
ConstEigenVectorArrayMap<float> v_arr(v, N);
EigenVectorArrayMap<float> nw_arr(nw, N);
EigenVectorArrayMap<float> nm_arr(nm, N);
EigenVectorArrayMap<float> nv_arr(nv, N);
nm_arr = m_arr * beta1 + g_arr * (1.0f - beta1);
nv_arr = v_arr + g_arr.square();
nw_arr = w_arr + *lr * (nm_arr / c / (nv_arr.sqrt() + eps_hat) + weight_decay * w_arr);
}
template <typename T, class Context>
class DecayAdagradOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
DecayAdagradOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
beta1_(this->template GetSingleArgument<float>("beta1", 0.9f)),
beta2_(this->template GetSingleArgument<float>("beta2", 0.999f)),
epsilon_(this->template GetSingleArgument<float>("epsilon", 1e-5f)),
weight_decay_(this->template GetSingleArgument<float>("weight_decay", 0.0f)),
bias_correction_first_(this->template GetSingleArgument<bool>("bias_correction_first", true)) {}
bool RunOnDevice() override {
// Iter live on the CPU
CAFFE_ENFORCE(OperatorBase::InputIsTensorType(ITER, CPU));
CAFFE_ENFORCE(Input(LR).numel() == 1);
CAFFE_ENFORCE(Input(GRAD).numel() == Input(PARAM).numel());
CAFFE_ENFORCE(Input(GRAD).numel() == Input(MOMENT_1).numel());
CAFFE_ENFORCE(Input(GRAD).numel() == Input(MOMENT_2).numel());
Output(OUTPUT_PARAM)->ResizeLike(Input(PARAM));
Output(OUTPUT_MOMENT_1)->ResizeLike(Input(MOMENT_1));
Output(OUTPUT_MOMENT_2)->ResizeLike(Input(MOMENT_2));
const auto iter =
OperatorBase::Input<Tensor>(ITER, CPU).template data<int64_t>()[0];
const auto t = iter + 1;
const auto c = (bias_correction_first_)? (T(1.) - std::pow(beta1_, t)) : 1.0;
decay_adagrad_compute<Context>(
Input(GRAD).numel(),
Input(PARAM).template data<T>(),
Input(GRAD).template data<T>(),
Input(MOMENT_1).template data<T>(),
Input(MOMENT_2).template data<T>(),
Output(OUTPUT_PARAM)->template mutable_data<T>(),
Output(OUTPUT_MOMENT_1)->template mutable_data<T>(),
Output(OUTPUT_MOMENT_2)->template mutable_data<T>(),
beta1_,
beta2_,
epsilon_,
weight_decay_,
c,
Input(LR).template data<T>(),
&context_);
return true;
}
protected:
T beta1_{0.9};
T beta2_{0.999};
T epsilon_{1e-8};
T weight_decay_{0.0};
bool bias_correction_first_{true};
INPUT_TAGS(PARAM, MOMENT_1, MOMENT_2, GRAD, LR, ITER);
OUTPUT_TAGS(OUTPUT_PARAM, OUTPUT_MOMENT_1, OUTPUT_MOMENT_2);
};
} // namespace caffe2
| 3,215
| 32.5
| 104
|
h
|
null |
pytorch-main/caffe2/sgd/fp16_momentum_sgd_op.h
|
#pragma once
#include "caffe2/core/operator.h"
#include "caffe2/core/timer.h"
namespace caffe2 {
template <class Context>
void fp16_momentum_sgd_update(
int N,
const at::Half* g,
const at::Half* m,
at::Half* ng,
at::Half* nm,
const float* lr,
float momentum,
bool nesterov,
float weight_decay,
bool fp32_update,
at::Half* param,
Context* /*context*/) {}
template <typename T, class Context>
class FP16MomentumSGDUpdateOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
FP16MomentumSGDUpdateOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
momentum_(this->template GetSingleArgument<float>("momentum", 0.0)),
weight_decay_(
this->template GetSingleArgument<float>("weight_decay", 0.0)),
nesterov_(this->template GetSingleArgument<int>("nesterov", 0)),
// when set, fp32_update will read in the fp16 data but
// perform all the compute in fp32 precision.
fp32_update_(this->template GetSingleArgument<int>("fp32_update", 0)) {}
bool RunOnDevice() override {
auto device_type = Context::GetDeviceType();
// Iter live on the CPU
CAFFE_ENFORCE(OperatorBase::InputIsTensorType(GRAD, device_type));
CAFFE_ENFORCE(OperatorBase::InputIsTensorType(MOMENTUM, device_type));
CAFFE_ENFORCE(Input(LR).size() == 1);
CAFFE_ENFORCE(Input(GRAD).size() == Input(MOMENTUM).size());
Output(OUTPUT_GRAD)->ResizeLike(Input(GRAD));
Output(OUTPUT_MOMENTUM)->ResizeLike(Input(MOMENTUM));
fp16_momentum_sgd_update<Context>(
Input(GRAD).size(),
Input(GRAD).template data<T>(),
Input(MOMENTUM).template data<T>(),
Output(OUTPUT_GRAD)->template mutable_data<T>(),
Output(OUTPUT_MOMENTUM)->template mutable_data<T>(),
Input(LR).template data<float>(),
momentum_,
nesterov_,
weight_decay_,
fp32_update_,
Output(OUTPUT_PARAM)->template mutable_data<T>(),
&context_);
return true;
}
protected:
float momentum_{0.9};
float weight_decay_{0.0};
bool nesterov_;
bool fp32_update_;
INPUT_TAGS(GRAD, MOMENTUM, LR, PARAM);
OUTPUT_TAGS(OUTPUT_GRAD, OUTPUT_MOMENTUM, OUTPUT_PARAM);
};
}
| 2,292
| 30.410959
| 80
|
h
|
null |
pytorch-main/caffe2/sgd/fp32_momentum_sgd_op.h
|
#pragma once
#include "caffe2/core/operator.h"
#include "caffe2/core/timer.h"
namespace caffe2 {
template <class Context>
void fp32_momentum_sgd_update(
int N,
const float* g,
const float* m,
float* ng,
float* nm,
const float* lr,
float momentum,
bool nesterov,
float weight_decay,
float* param,
Context* /*context*/) {}
template <typename T, class Context>
class FP32MomentumSGDUpdateOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
FP32MomentumSGDUpdateOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
momentum_(this->template GetSingleArgument<float>("momentum", 0.0)),
weight_decay_(
this->template GetSingleArgument<float>("weight_decay", 0.0)),
nesterov_(this->template GetSingleArgument<int>("nesterov", 0)) {}
bool RunOnDevice() override {
auto device_type = Context::GetDeviceType();
// Iter live on the CPU
CAFFE_ENFORCE(OperatorBase::InputIsTensorType(GRAD, device_type));
CAFFE_ENFORCE(OperatorBase::InputIsTensorType(MOMENTUM, device_type));
CAFFE_ENFORCE(Input(LR).size() == 1);
CAFFE_ENFORCE(Input(GRAD).size() == Input(MOMENTUM).size());
Output(OUTPUT_GRAD)->ResizeLike(Input(GRAD));
Output(OUTPUT_MOMENTUM)->ResizeLike(Input(MOMENTUM));
fp32_momentum_sgd_update<Context>(
Input(GRAD).size(),
Input(GRAD).template data<T>(),
Input(MOMENTUM).template data<T>(),
Output(OUTPUT_GRAD)->template mutable_data<T>(),
Output(OUTPUT_MOMENTUM)->template mutable_data<T>(),
Input(LR).template data<float>(),
momentum_,
nesterov_,
weight_decay_,
Output(OUTPUT_PARAM)->template mutable_data<T>(),
&context_);
return true;
}
protected:
float momentum_{0.9};
float weight_decay_{0.0};
bool nesterov_;
INPUT_TAGS(GRAD, MOMENTUM, LR, PARAM);
OUTPUT_TAGS(OUTPUT_GRAD, OUTPUT_MOMENTUM, OUTPUT_PARAM);
};
}
| 2,015
| 29.089552
| 76
|
h
|
null |
pytorch-main/caffe2/sgd/ftrl_op.h
|
#pragma once
#include "caffe2/core/operator.h"
namespace caffe2 {
template <typename T>
struct FtrlParams {
explicit FtrlParams(OperatorBase* op)
: alphaInv(1.0 / op->GetSingleArgument<float>("alpha", 0.005f)),
beta(op->GetSingleArgument<float>("beta", 1.0f)),
lambda1(op->GetSingleArgument<float>("lambda1", 0.001f)),
lambda2(op->GetSingleArgument<float>("lambda2", 0.001f)) {}
T alphaInv;
T beta;
T lambda1;
T lambda2;
};
// TODO(dzhulgakov): implement GPU version if necessary
template <typename T, class Context>
class FtrlOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
FtrlOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws), params_(this) {
CAFFE_ENFORCE(
!HasArgument("alpha") || ALPHA >= InputSize(),
"Cannot specify alpha by both input and argument");
}
bool RunOnDevice() override;
protected:
FtrlParams<T> params_;
INPUT_TAGS(VAR, N_Z, GRAD, ALPHA);
OUTPUT_TAGS(OUTPUT_VAR, OUTPUT_N_Z);
};
template <typename T>
class SparseFtrlOp final : public Operator<CPUContext> {
public:
SparseFtrlOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<CPUContext>(operator_def, ws), params_(this) {
CAFFE_ENFORCE(
!HasArgument("alpha") || ALPHA >= InputSize(),
"Cannot specify alpha by both input and argument");
}
bool RunOnDevice() override {
// run time learning rate override
if (ALPHA < InputSize()) {
CAFFE_ENFORCE_EQ(Input(ALPHA).numel(), 1, "alpha should be real-valued");
params_.alphaInv = 1.0 / *(Input(ALPHA).template data<T>());
}
// Use run-time polymorphism
auto& indices = Input(INDICES);
if (indices.template IsType<int32_t>()) {
DoRun<int32_t>();
} else if (indices.template IsType<int64_t>()) {
DoRun<int64_t>();
} else {
LOG(FATAL) << "Unsupported type of INDICES in SparseFtrlOp: "
<< indices.dtype().name();
}
return true;
}
protected:
FtrlParams<T> params_;
INPUT_TAGS(VAR, N_Z, INDICES, GRAD, ALPHA);
OUTPUT_TAGS(OUTPUT_VAR, OUTPUT_N_Z);
private:
template <typename SIndex>
void DoRun();
};
}
| 2,219
| 27.101266
| 79
|
h
|
null |
pytorch-main/caffe2/sgd/gftrl_op.h
|
#pragma once
#include "caffe2/core/operator.h"
namespace caffe2 {
template <typename T>
struct GFtrlParams {
explicit GFtrlParams(OperatorBase* op)
: alphaInv(1.0 / op->GetSingleArgument<float>("alpha", 0.005f)),
beta(op->GetSingleArgument<float>("beta", 1.0f)),
lambda1(op->GetSingleArgument<float>("lambda1", 0.001f)),
lambda2(op->GetSingleArgument<float>("lambda2", 0.001f)) {}
T alphaInv;
T beta;
T lambda1;
T lambda2;
};
template <typename T, class Context>
class GFtrlOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
GFtrlOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws), params_(this) {
CAFFE_ENFORCE(
!HasArgument("alpha") || ALPHA >= InputSize(),
"Cannot specify alpha by both input and argument");
}
bool RunOnDevice() override;
protected:
GFtrlParams<T> params_;
INPUT_TAGS(VAR, N_Z, GRAD, ALPHA);
OUTPUT_TAGS(OUTPUT_VAR, OUTPUT_N_Z);
};
} // namespace caffe2
| 1,028
| 25.384615
| 70
|
h
|
null |
pytorch-main/caffe2/sgd/iter_op.h
|
#ifndef CAFFE2_SGD_ITER_OP_H_
#define CAFFE2_SGD_ITER_OP_H_
#include <limits>
#include <mutex>
#include "caffe2/core/blob_serialization.h"
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/core/stats.h"
namespace caffe2 {
inline void IncrementIter(TensorCPU* output) {
CAFFE_ENFORCE_EQ(
output->numel(),
1,
"The output of IterOp exists, but not of the right size.");
int64_t* iter = output->template mutable_data<int64_t>();
CAFFE_ENFORCE(*iter >= 0, "Previous iteration number is negative.");
CAFFE_ENFORCE(
*iter < std::numeric_limits<int64_t>::max(), "Overflow will happen!");
(*iter)++;
}
// IterOp runs an iteration counter. I cannot think of a case where we would
// need to access the iter variable on device, so this will always produce a
// tensor on the CPU side. If the blob already exists and is a tensor<int64_t>
// object, we will simply increment it (this emulates the case when we want to
// resume training). Otherwise we will have the iter starting with 0.
template <class Context>
class IterOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
IterOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws) {}
bool RunOnDevice() override {
if (InputSize() == 0) {
VLOG(1) << "[Input size is zero]";
if (!OperatorBase::OutputIsTensorType(0, CPU)) {
// This is the first run; set the iter to start with 0.
LOG(ERROR) << "You are using an old definition of IterOp that will "
"be deprecated soon. More specifically, IterOp now "
"requires an explicit in-place input and output.";
VLOG(1) << "Initializing iter counter.";
auto* output = OperatorBase::OutputTensor(
0, {1}, at::dtype<int64_t>().device(CPU));
output->template mutable_data<int64_t>()[0] = 0;
}
}
IncrementIter(OperatorBase::Output<Tensor>(0, CPU));
return true;
}
};
template <class Context>
class AtomicIterOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
AtomicIterOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
stats_(std::string("atomic_iter/stats/") + operator_def.input(1)) {}
bool RunOnDevice() override {
auto& mutex = OperatorBase::Input<std::unique_ptr<std::mutex>>(0);
std::lock_guard<std::mutex> lg(*mutex);
IncrementIter(OperatorBase::Output<Tensor>(0, CPU));
// NOLINTNEXTLINE(clang-diagnostic-unused-variable)
CAFFE_EVENT(stats_, num_iter);
return true;
}
private:
struct AtomicIterOpStats {
CAFFE_STAT_CTOR(AtomicIterOpStats);
CAFFE_EXPORTED_STAT(num_iter);
} stats_;
};
class MutexSerializer : public BlobSerializerBase {
public:
/**
* Serializes a std::unique_ptr<std::mutex>. Note that this blob has to
* contain std::unique_ptr<std::mutex>, otherwise this function produces a
* fatal error.
*/
void Serialize(
const void* pointer,
TypeMeta typeMeta,
const string& name,
BlobSerializerBase::SerializationAcceptor acceptor) override;
};
class MutexDeserializer : public BlobDeserializerBase {
public:
void Deserialize(const BlobProto& proto, Blob* blob) override;
};
} // namespace caffe2
#endif // CAFFE2_SGD_ITER_OP_H_
| 3,379
| 30.886792
| 78
|
h
|
null |
pytorch-main/caffe2/sgd/lars_op.h
|
#ifndef CAFFE2_OPERATORS_LARS_OP_H_
#define CAFFE2_OPERATORS_LARS_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <typename T, class Context>
class LarsOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
LarsOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
offset_(this->template GetSingleArgument<float>("offset", 0.5)),
lr_min_(this->template GetSingleArgument<float>("lr_min", 0.02)) {}
bool RunOnDevice() override {
auto& X = Input(0);
auto& dX = Input(1);
CAFFE_ENFORCE(
dX.numel() == X.numel(), "Gradient size doesn't match parameter size.");
CAFFE_ENFORCE_GE(offset_, 0);
CAFFE_ENFORCE_GE(lr_min_, 0);
auto& wd = Input(2);
auto& trust = Input(3);
auto& lr_max = Input(4);
auto* lr_rescaled = Output(0, vector<int64_t>{1}, at::dtype<T>());
ReinitializeTensor(&X_norm_tensor_, {1}, at::dtype<T>().device(Context::GetDeviceType()));
T* X_norm_ = X_norm_tensor_.template mutable_data<T>();
ReinitializeTensor(&dX_norm_tensor_, {1}, at::dtype<T>().device(Context::GetDeviceType()));
T* dX_norm_ = dX_norm_tensor_.template mutable_data<T>();
ComputeNorms(
dX.numel(),
X.template data<T>(),
dX.template data<T>(),
X_norm_,
dX_norm_);
ComputeLearningRate(
wd.template data<T>(),
trust.template data<T>(),
lr_max.template data<T>(),
offset_,
lr_min_,
X_norm_,
dX_norm_,
lr_rescaled->template mutable_data<T>());
return true;
}
private:
// Compute the l2 norm of X_data and dX_data
void ComputeNorms(
int64_t N,
const T* X_data,
const T* dX_data,
T* X_norm,
T* dX_norm) {
math::SumSqr(N, X_data, X_norm, &context_);
math::Sqrt(1, X_norm, X_norm, &context_);
math::SumSqr(N, dX_data, dX_norm, &context_);
math::Sqrt(1, dX_norm, dX_norm, &context_);
}
// Compute the learning rate and apply clipping
void ComputeLearningRate(
const T* wd,
const T* trust,
const T* lr_max,
T offset,
T lr_min,
T* X_norm,
T* dX_norm,
T* lr_rescaled);
T offset_;
T lr_min_;
Tensor X_norm_tensor_;
Tensor dX_norm_tensor_;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_LARS_OP_H_
| 2,479
| 25.382979
| 95
|
h
|
null |
pytorch-main/caffe2/sgd/learning_rate_adaption_op.h
|
#pragma once
#include <cfloat>
#include <cmath>
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <typename Context>
void lr_update(
int n,
const float* grad,
const float* effgrad,
const float* lr,
float* nlr,
float lr_alpha,
bool normalized_lr_adaption,
Context* /*context*/) {
float x = 0;
float y = 0, z = 0;
const float kEps = 1e-12f;
for (const auto i : c10::irange(n)) {
x += grad[i] * effgrad[i];
if (normalized_lr_adaption) {
y += grad[i] * grad[i];
z += effgrad[i] * effgrad[i];
}
}
if (normalized_lr_adaption) {
y = fmax(std::sqrt(y), kEps);
z = fmax(std::sqrt(z), kEps);
nlr[0] = lr[0] * (1 - lr_alpha * x / (y * z));
} else {
nlr[0] = lr[0] - lr_alpha * x;
}
}
template <typename T, class Context>
class LearningRateAdaptionOp final : public Operator<Context> {
public:
LearningRateAdaptionOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
lr_alpha_(this->template GetSingleArgument<float>("lr_alpha", 0.01f)),
normalized_lr_adaption_(this->template GetSingleArgument<bool>(
"normalized_lr_adaption",
true)) {}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override {
CAFFE_ENFORCE(Input(LR).numel() == 1);
CAFFE_ENFORCE(Input(GRAD).numel() == Input(EFFGRAD).numel());
Output(OUTPUT_LR)->ResizeLike(Input(LR));
lr_update<Context>(
Input(GRAD).numel(),
Input(GRAD).template data<T>(),
Input(EFFGRAD).template data<T>(),
Input(LR).template data<T>(),
Output(OUTPUT_LR)->template mutable_data<T>(),
lr_alpha_,
normalized_lr_adaption_,
&context_);
return true;
}
protected:
T lr_alpha_{1e-2};
bool normalized_lr_adaption_{true};
INPUT_TAGS(LR, GRAD, EFFGRAD);
OUTPUT_TAGS(OUTPUT_LR);
};
} // namespace caffe2
| 1,981
| 25.426667
| 78
|
h
|
null |
pytorch-main/caffe2/sgd/learning_rate_functors.h
|
#ifndef CAFFE2_SGD_LEARNING_RATE_FUNCTORS_H_
#define CAFFE2_SGD_LEARNING_RATE_FUNCTORS_H_
#include <cmath>
#include <list>
#include <map>
#ifdef _MSC_VER
#ifndef _USE_MATH_DEFINES
#define _USE_MATH_DEFINES
#endif
#include <math.h>
#endif // _MSC_VER
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
namespace caffe2 {
// LearningRateFunctor is a functor that when fed with an iter number, produces
// the learning rate for the corresponding iteration.
template <typename T>
class LearningRateFunctor {
public:
virtual ~LearningRateFunctor() {}
virtual T operator()(const int64_t iter) const = 0;
};
// Fixed: not changing the learning rate at all.
template <typename T>
class FixedLearningRate : public LearningRateFunctor<T> {
public:
T operator()(const int64_t /*iter*/) const override {
return 1.;
}
};
// Alter: alternatate learning rate with active_period and inactive_period.
// update for a duration of active_period and then stop for a duration of
// inactive_period if active_first, and vice versa
template <typename T>
class AlternateLearningRate : public LearningRateFunctor<T> {
public:
AlternateLearningRate(
const int64_t active_period,
const int64_t inactive_period,
const bool active_first)
: active_period_(active_period),
inactive_period_(inactive_period),
active_first_(active_first) {}
T operator()(const int64_t iter) const override {
if (iter % (active_period_ + inactive_period_) <
(active_first_ ? active_period_ : inactive_period_)) {
return active_first_ ? 1. : 0.;
} else {
return active_first_ ? 0. : 1.;
};
};
int64_t active_period_;
int64_t inactive_period_;
bool active_first_;
};
// Step: return gamma ^ (floor(iter / step))
template <typename T>
class StepLearningRate : public LearningRateFunctor<T> {
public:
StepLearningRate(const int stepsize, const T gamma)
: stepsize_(stepsize), gamma_(gamma) {}
T operator()(const int64_t iter) const override {
return std::pow(gamma_, static_cast<T>(iter / stepsize_));
}
int stepsize_;
T gamma_;
};
// Exp: return gamma ^ iter
template <typename T>
class ExpLearningRate : public LearningRateFunctor<T> {
public:
explicit ExpLearningRate(const T gamma) : gamma_(gamma) {}
T operator()(const int64_t iter) const override {
return std::pow(gamma_, static_cast<T>(iter));
}
T gamma_;
};
// Gate: return multiplier_1 if before num_iter, else multiplier_2
template <typename T>
class GateLearningRate : public LearningRateFunctor<T> {
public:
GateLearningRate(
const T multiplier_1,
const T multiplier_2,
const int64_t num_iter)
: multiplier_1_(multiplier_1),
multiplier_2_(multiplier_2),
num_iter_(num_iter) {}
T operator()(const int64_t iter) const override {
if (iter >= int64_t(num_iter_)) {
return T(multiplier_2_);
}
return T(multiplier_1_);
}
T multiplier_1_;
T multiplier_2_;
uint64_t num_iter_;
};
// Inv: return (1 + gamma * iter) ^ (-power)
template <typename T>
class InvLearningRate : public LearningRateFunctor<T> {
public:
InvLearningRate(const T gamma, const T power)
: gamma_(gamma), power_(power) {}
T operator()(const int64_t iter) const override {
return std::pow(T(1) + gamma_ * iter, -power_);
}
T gamma_;
T power_;
};
// Poly: return (1 - iter/max_iter) ^ (power)
template <typename T>
class PolyLearningRate : public LearningRateFunctor<T> {
public:
PolyLearningRate(const T power, const int64_t max_iter)
: power_(power), max_iter_(max_iter) {}
T operator()(const int64_t iter) const override {
return std::pow(1 - T(iter) / T(max_iter_), power_);
}
T power_;
uint64_t max_iter_;
};
// LinearWarmup: return max(iter/num_iter, 1)
template <typename T>
class LinearWarmupLearningRate : public LearningRateFunctor<T> {
public:
LinearWarmupLearningRate(const T start_multiplier, const int64_t num_iter)
: start_multiplier_(start_multiplier), num_iter_(num_iter) {}
T operator()(const int64_t iter) const override {
if (iter >= int64_t(num_iter_)) {
return 1.;
}
return start_multiplier_ +
(1. - start_multiplier_) * T(iter) / T(num_iter_);
}
T start_multiplier_;
uint64_t num_iter_;
};
// ConstantWarmup: return scale when iter < num_iter, and 1 otherwise
template <typename T>
class ConstantWarmupLearningRate : public LearningRateFunctor<T> {
public:
ConstantWarmupLearningRate(const T multiplier, const int64_t num_iter)
: multiplier_(multiplier), num_iter_(num_iter) {}
T operator()(const int64_t iter) const override {
if (iter >= int64_t(num_iter_)) {
return 1.;
}
return T(multiplier_);
}
T multiplier_;
uint64_t num_iter_;
};
// ConstantWarmup: return scale when iter < num_iter, and 1 otherwise
template <typename T>
class PieceWarmupLearningRate : public LearningRateFunctor<T> {
public:
PieceWarmupLearningRate(
const T m1,
const int64_t n1,
const T m2,
const int64_t n2,
const T m3)
: m1_(m1), m2_(m2), m3_(m3), n1_(n1), n2_(n2){};
T operator()(const int64_t iter) const override {
if (iter < int64_t(n1_)) {
return m1_;
} else if (iter < int64_t(n2_)) {
return m2_;
}
return m3_;
}
T m1_, m2_, m3_;
uint64_t n1_, n2_;
};
// hill: the learning rate changes according to following 3 stages
// 1) linear warmup (increasing) at first num_iter steps from start_multiplier
// 2) inverse shrink (decreasing) afterwards (gamma, power)
// 3) lower bounded by end_multiplier
template <typename T>
class HillLearningRate : public LearningRateFunctor<T> {
public:
HillLearningRate(
const int64_t num_iter,
const T start_multiplier,
const T gamma,
const T power,
const T end_multiplier)
: linear_warmup_lr_(start_multiplier, num_iter),
inv_lr_(gamma, power),
num_iter_(num_iter),
end_multiplier_(end_multiplier) {}
T operator()(const int64_t iter) const override {
if (iter < num_iter_) {
return linear_warmup_lr_(iter);
} else {
return std::max(end_multiplier_, inv_lr_(iter - num_iter_));
}
}
LinearWarmupLearningRate<T> linear_warmup_lr_;
InvLearningRate<T> inv_lr_;
int64_t num_iter_;
T end_multiplier_;
};
// slope: the learning rate changes according to 2 stages
// 1) constantWarmup with multiplier_1
// 2) linearly shink to multiplier_2:
// max{
// multiplier_1 + (iter - num_iter_1) * (multiplier_2 - multiplier_1) / (num_iter_2 - num_iter_1),
// multiplier_2
// }
template <typename T>
class SlopeLearningRate : public LearningRateFunctor<T> {
public:
SlopeLearningRate(
const int64_t num_iter_1,
const T multiplier_1,
const T num_iter_2,
const T multiplier_2)
: num_iter_1_(num_iter_1),
multiplier_1_(multiplier_1),
num_iter_2_(num_iter_2),
multiplier_2_(multiplier_2) {}
T operator()(const int64_t iter) const override {
if (iter < num_iter_1_) {
return multiplier_1_;
} else {
return std::max(
multiplier_2_,
multiplier_1_ + (iter - num_iter_1_) * (multiplier_2_ - multiplier_1_) / (num_iter_2_ - num_iter_1_)
);
}
}
int64_t num_iter_1_;
T multiplier_1_;
int64_t num_iter_2_;
T multiplier_2_;
};
template <typename T>
class CompositeLearningRateItem {
public:
CompositeLearningRateItem(
int64_t num_iter,
float lr_scale,
LearningRateFunctor<T>* policy)
: num_iter_(num_iter), lr_scale_(lr_scale), policy_(policy) {}
int64_t num_iter_;
float lr_scale_;
LearningRateFunctor<T>* policy_;
};
// composite: the learning policy changes according to current iteration #
template <typename T>
class CompositeLearningRate : public LearningRateFunctor<T> {
public:
CompositeLearningRate(
const std::list<CompositeLearningRateItem<T>>& sub_policies) {
TORCH_DCHECK_GT(sub_policies.size(), 0);
int64_t num_iter_start = 1;
for (auto it = sub_policies.begin(); it != sub_policies.end(); ++it) {
TORCH_DCHECK_GT(it->num_iter_, 0);
sub_policies_[num_iter_start].reset(it->policy_);
sub_policy_lr_scales_[num_iter_start] = it->lr_scale_;
num_iter_start += it->num_iter_;
}
}
T operator()(const int64_t iter) const override {
auto sub_policy = sub_policies_.upper_bound(iter);
DCHECK(sub_policy != sub_policies_.begin());
--sub_policy;
auto sub_policy_lr_scale = sub_policy_lr_scales_.upper_bound(iter);
DCHECK(sub_policy_lr_scale != sub_policy_lr_scales_.begin());
--sub_policy_lr_scale;
return ((*sub_policy->second)(iter)) * (sub_policy_lr_scale->second);
}
private:
std::map<int64_t, std::unique_ptr<LearningRateFunctor<T>>> sub_policies_;
std::map<int64_t, float> sub_policy_lr_scales_;
};
// Cyclical: return a learning rate with period 2 * stepsize and
// lower bound base_lr, upper bound max_lr.
// See https://arxiv.org/pdf/1506.01186.pdf
template <typename T>
class CyclicalLearningRate : public LearningRateFunctor<T> {
public:
CyclicalLearningRate(
const T base_lr,
const T max_lr,
const int stepsize,
const T decay)
: base_lr_(base_lr),
max_lr_(max_lr),
stepsize_(stepsize),
decay_(decay) {}
T operator()(const int64_t iter) const override {
int64_t cycle = static_cast<int>((iter / (2 * stepsize_)) + 1);
T x = std::abs(static_cast<T>(iter) / stepsize_ - 2 * cycle + 1);
return 1 +
(T(std::abs(max_lr_)) / T(std::abs(base_lr_)) - 1) * std::max(T(0.0), (1 - x)) *
std::pow(decay_, static_cast<int>(iter / (2 * stepsize_)));
}
T base_lr_;
T max_lr_;
int stepsize_;
T decay_;
};
// Cosine: return a learning rate with a cosine schedule
// lower bound min_lr, upper bound max_lr.
// See https://arxiv.org/pdf/1608.03983.pdf
template <typename T>
class CosineLearningRate : public LearningRateFunctor<T> {
public:
CosineLearningRate(
const T min_lr,
const T max_lr,
const int64_t period,
const T t_mult,
const T lr_shrink)
: min_lr_(min_lr),
max_lr_(max_lr),
period_(period),
t_mult_(t_mult),
lr_shrink_(lr_shrink) {}
T operator()(const int64_t iter) const override {
T i, t_i, t_curr;
if (t_mult_ != 1.0) {
// the period is changed every time
i = floor(
log(1 - double(iter) / double(period_) * (1.0 - t_mult_)) /
log(t_mult_));
t_i = pow(t_mult_, i) * period_;
t_curr = iter - (1.0 - pow(t_mult_, i)) / (1.0 - t_mult_) * period_;
} else {
// fixed period
i = floor(double(iter) / double(period_));
t_i = period_;
t_curr = iter - t_i * i;
}
T lr_shrink = pow(lr_shrink_, i);
T min_lr = min_lr_ * lr_shrink;
T max_lr = max_lr_ * lr_shrink;
T final_lr =
min_lr + 0.5 * (max_lr - min_lr) * (1 + cos(M_PI * t_curr / t_i));
return final_lr;
}
T min_lr_;
T max_lr_;
int64_t period_;
T t_mult_;
T lr_shrink_;
};
// constantThenLinearWarmup: first use a constant multiplier
// and then ramp up to the global lr
template <typename T>
class ConstantThenLinearWarmupLearningRate : public LearningRateFunctor<T> {
public:
ConstantThenLinearWarmupLearningRate(
const T start_warmup_multiplier,
const int64_t constant_warmup_num_iter,
const int64_t linear_warmup_num_iter)
: constant_warmup_num_iter_(constant_warmup_num_iter),
linear_warmup_num_iter_(linear_warmup_num_iter),
constant_warmup_lr_(start_warmup_multiplier, constant_warmup_num_iter),
linear_warmup_lr_(start_warmup_multiplier, linear_warmup_num_iter) {}
T operator()(const int64_t iter) const override {
if (iter < constant_warmup_num_iter_) {
return constant_warmup_lr_(iter);
} else if (iter < constant_warmup_num_iter_ + linear_warmup_num_iter_) {
return linear_warmup_lr_(iter - constant_warmup_num_iter_);
} else {
return 1.0;
}
}
int64_t constant_warmup_num_iter_;
int64_t linear_warmup_num_iter_;
ConstantWarmupLearningRate<T> constant_warmup_lr_;
LinearWarmupLearningRate<T> linear_warmup_lr_;
};
// CompositeCosineLearningRate: first use a constant multiplier
// and then ramp up to the global lr, and then use a cosine learning rate
template <typename T>
class CompositeCosineLearningRate : public LearningRateFunctor<T> {
public:
CompositeCosineLearningRate(
const T start_warmup_multiplier,
const int64_t constant_warmup_num_iter,
const int64_t linear_warmup_num_iter,
const T cosine_min_lr,
const T cosine_max_lr,
const int64_t cosine_period,
const T consine_t_mult,
const T cosine_lr_shrink)
: constant_warmup_num_iter_(constant_warmup_num_iter),
linear_warmup_num_iter_(linear_warmup_num_iter),
constant_then_linear_warmup_lr_(
start_warmup_multiplier,
constant_warmup_num_iter,
linear_warmup_num_iter),
cosine_lr_(
cosine_min_lr,
cosine_max_lr,
cosine_period,
consine_t_mult,
cosine_lr_shrink) {}
T operator()(const int64_t iter) const override {
if (iter < constant_warmup_num_iter_ + linear_warmup_num_iter_) {
return constant_then_linear_warmup_lr_(iter);
}
return cosine_lr_(
iter - constant_warmup_num_iter_ - linear_warmup_num_iter_);
}
int64_t constant_warmup_num_iter_;
int64_t linear_warmup_num_iter_;
ConstantThenLinearWarmupLearningRate<T> constant_then_linear_warmup_lr_;
CosineLearningRate<T> cosine_lr_;
};
// CompositeCyclicalLearningRate: first use a constant multiplier
// and then ramp up to the global lr, and then use a cyclical learning rate
template <typename T>
class CompositeCyclicalLearningRate : public LearningRateFunctor<T> {
public:
CompositeCyclicalLearningRate(
const T base_lr,
const T start_warmup_multiplier,
const int64_t constant_warmup_num_iter,
const int64_t linear_warmup_num_iter,
const T cyclical_max_lr,
const int cyclical_step_size,
const T cyclical_decay)
: constant_warmup_num_iter_(constant_warmup_num_iter),
linear_warmup_num_iter_(linear_warmup_num_iter),
constant_then_linear_warmup_lr_(
start_warmup_multiplier,
constant_warmup_num_iter,
linear_warmup_num_iter),
cyclical_lr_(
base_lr,
cyclical_max_lr,
cyclical_step_size,
cyclical_decay) {}
T operator()(const int64_t iter) const override {
if (iter < constant_warmup_num_iter_ + linear_warmup_num_iter_) {
return constant_then_linear_warmup_lr_(iter);
}
return cyclical_lr_(
iter - constant_warmup_num_iter_ - linear_warmup_num_iter_);
}
int64_t constant_warmup_num_iter_;
int64_t linear_warmup_num_iter_;
ConstantThenLinearWarmupLearningRate<T> constant_then_linear_warmup_lr_;
CyclicalLearningRate<T> cyclical_lr_;
};
} // namespace caffe2
#endif // CAFFE2_SGD_LEARNING_RATE_FUNCTORS_H_
| 15,158
| 29.873727
| 108
|
h
|
null |
pytorch-main/caffe2/sgd/learning_rate_op.h
|
#ifndef CAFFE2_SGD_LEARNING_RATE_OP_H_
#define CAFFE2_SGD_LEARNING_RATE_OP_H_
#include <cfloat>
#include <cmath>
#include "caffe2/core/context.h"
#include "caffe2/core/export_caffe2_op_to_c10.h"
#include <c10/util/irange.h>
#include "caffe2/core/operator.h"
#include "caffe2/sgd/learning_rate_functors.h"
C10_DECLARE_EXPORT_CAFFE2_OP_TO_C10(LearningRate);
namespace caffe2 {
template <typename T, class Context>
class LearningRateOp final : public Operator<Context> {
public:
template <class... Args>
LearningRateOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
functor_(nullptr),
base_lr_(this->template GetSingleArgument<float>("base_lr", FLT_MAX)) {
CAFFE_ENFORCE_NE(base_lr_, FLT_MAX, "Base learning rate must be set.");
const string policy =
this->template GetSingleArgument<string>("policy", "");
CAFFE_ENFORCE(policy.size(), "Must specify a learning rate policy.");
functor_.reset(createLearningRateFunctor(policy));
}
USE_OPERATOR_CONTEXT_FUNCTIONS;
bool RunOnDevice() override {
int64_t iter =
OperatorBase::Input<Tensor>(0, CPU).template data<int64_t>()[0];
T learning_rate = base_lr_ * (*functor_)(iter);
// Write to output.
auto* output = Output(0);
output->Resize(vector<int64_t>());
context_.template CopyFromCPU<T>(
1, &learning_rate, Output(0)->template mutable_data<T>());
return true;
}
private:
unique_ptr<LearningRateFunctor<T>> functor_;
T base_lr_;
LearningRateFunctor<T>* createLearningRateFunctor(
const string& policy,
const string& arg_prefix = "") {
if (policy == "fixed") {
return new FixedLearningRate<T>();
} else if (policy == "alter") {
bool active_first = this->template GetSingleArgument<bool>(
arg_prefix + "active_first", true);
int64_t active_period = this->template GetSingleArgument<int64_t>(
arg_prefix + "active_period", -1);
int64_t inactive_period = this->template GetSingleArgument<int64_t>(
arg_prefix + "inactive_period", -1);
TORCH_DCHECK_GE(active_period, 0);
TORCH_DCHECK_GE(inactive_period, 0);
return new AlternateLearningRate<T>(
active_period, inactive_period, active_first);
} else if (policy == "hill") {
int64_t num_iter =
this->template GetSingleArgument<int64_t>(arg_prefix + "num_iter", 0);
TORCH_DCHECK_GT(num_iter, 0);
T start_multiplier = this->template GetSingleArgument<float>(
arg_prefix + "start_multiplier", 0.);
TORCH_DCHECK_GE(start_multiplier, 0); // start_multiplier in range [0, 1]
TORCH_DCHECK_LE(start_multiplier, 1);
T gamma =
this->template GetSingleArgument<float>(arg_prefix + "gamma", 0);
TORCH_DCHECK_GT(gamma, 0);
T power =
this->template GetSingleArgument<float>(arg_prefix + "power", 0);
TORCH_DCHECK_GT(power, 0);
T end_multiplier = this->template GetSingleArgument<float>(
arg_prefix + "end_multiplier", 0);
TORCH_DCHECK_GE(end_multiplier, 0); // end_multiplier in range [0, 1]
TORCH_DCHECK_LE(end_multiplier, 1);
return new HillLearningRate<T>(
num_iter, start_multiplier, gamma, power, end_multiplier);
} else if (policy == "slope") {
int64_t num_iter_1 = this->template GetSingleArgument<int64_t>(
arg_prefix + "num_iter_1", 0);
TORCH_DCHECK_GT(num_iter_1, 0);
T multiplier_1 = this->template GetSingleArgument<float>(
arg_prefix + "multiplier_1", 0.);
int64_t num_iter_2 = this->template GetSingleArgument<int64_t>(
arg_prefix + "num_iter_2", 0);
TORCH_DCHECK_GT(num_iter_1, 0);
T multiplier_2 = this->template GetSingleArgument<float>(
arg_prefix + "multiplier_2", 0.);
TORCH_DCHECK_GT(num_iter_2, num_iter_1);
return new SlopeLearningRate<T>(
num_iter_1, multiplier_1, num_iter_2, multiplier_2);
} else if (policy == "step") {
int stepsize =
this->template GetSingleArgument<int>(arg_prefix + "stepsize", 0);
T gamma =
this->template GetSingleArgument<float>(arg_prefix + "gamma", 0);
TORCH_DCHECK_GT(stepsize, 0);
TORCH_DCHECK_GT(gamma, 0);
return new StepLearningRate<T>(stepsize, gamma);
} else if (policy == "exp") {
T gamma =
this->template GetSingleArgument<float>(arg_prefix + "gamma", 0);
TORCH_DCHECK_GT(gamma, 0);
return new ExpLearningRate<T>(gamma);
} else if (policy == "gate") {
T multiplier_1 = this->template GetSingleArgument<float>(
arg_prefix + "multiplier_1", 1);
T multiplier_2 = this->template GetSingleArgument<float>(
arg_prefix + "multiplier_2", 1);
int num_iter =
this->template GetSingleArgument<int>(arg_prefix + "num_iter", 0);
// no constraint on the range of multiplier_1 and multiplier_2
return new GateLearningRate<T>(multiplier_1, multiplier_2, num_iter);
} else if (policy == "inv") {
T gamma =
this->template GetSingleArgument<float>(arg_prefix + "gamma", 0);
T power =
this->template GetSingleArgument<float>(arg_prefix + "power", 0);
TORCH_DCHECK_GT(gamma, 0);
TORCH_DCHECK_GT(power, 0);
return new InvLearningRate<T>(gamma, power);
} else if (policy == "poly") {
int max_iter =
this->template GetSingleArgument<int>(arg_prefix + "max_iter", -1);
T power =
this->template GetSingleArgument<float>(arg_prefix + "power", 0);
TORCH_DCHECK_GT(power, 0);
return new PolyLearningRate<T>(power, max_iter);
} else if (policy == "linearWarmup") {
T start_multiplier = this->template GetSingleArgument<float>(
arg_prefix + "start_multiplier", 0.);
int num_iter =
this->template GetSingleArgument<int>(arg_prefix + "num_iter", 0);
TORCH_DCHECK_GE(start_multiplier, 0);
return new LinearWarmupLearningRate<T>(start_multiplier, num_iter);
} else if (policy == "constantWarmup") {
T multiplier = this->template GetSingleArgument<float>(
arg_prefix + "multiplier", 0.5);
int num_iter =
this->template GetSingleArgument<int>(arg_prefix + "num_iter", 0);
TORCH_DCHECK_GT(multiplier, 0);
return new ConstantWarmupLearningRate<T>(multiplier, num_iter);
} else if (policy == "pieceWarmup") {
T m1 = this->template GetSingleArgument<float>(arg_prefix + "m1", 0.5);
int64_t n1 =
this->template GetSingleArgument<int64_t>(arg_prefix + "n1", 0);
T m2 = this->template GetSingleArgument<float>(arg_prefix + "m2", 0.5);
int64_t n2 =
this->template GetSingleArgument<int64_t>(arg_prefix + "n2", 0);
T m3 = this->template GetSingleArgument<float>(arg_prefix + "m3", 0.5);
return new PieceWarmupLearningRate<T>(m1, n1, m2, n2, m3);
} else if (policy == "composite") {
std::vector<int> sub_policy_num_iters =
this->template GetRepeatedArgument<int>("sub_policy_num_iters");
std::list<CompositeLearningRateItem<T>> sub_policies;
CAFFE_ENFORCE_GT(
sub_policy_num_iters.size(),
0,
"Must specify at least one sub learning rate policy.");
for (const auto i : c10::irange(sub_policy_num_iters.size())) {
CAFFE_ENFORCE_GT(
sub_policy_num_iters[i],
0,
"The number of iterations for sub learning rate policy should be positive.");
std::stringstream sub_policy_arg_prefix;
sub_policy_arg_prefix << "sub_policy_" << i << "_";
const string sub_policy_arg_prefix_str = sub_policy_arg_prefix.str();
const string sub_policy = this->template GetSingleArgument<string>(
sub_policy_arg_prefix_str + "policy", "");
if (sub_policy == "composite") {
CAFFE_THROW(
"Defining composite LR policy as a subpolicy of composite LR "
"policy is not allowed.");
}
const float scale_lr = this->template GetSingleArgument<float>(
sub_policy_arg_prefix_str + "lr_scale", 1.0);
sub_policies.push_back(CompositeLearningRateItem<T>(
sub_policy_num_iters[i],
scale_lr,
createLearningRateFunctor(sub_policy, sub_policy_arg_prefix_str)));
}
return new CompositeLearningRate<T>(sub_policies);
} else if (policy == "cyclical") {
T max_lr =
this->template GetSingleArgument<float>(arg_prefix + "max_lr", 0.005);
int stepsize =
this->template GetSingleArgument<int>(arg_prefix + "stepsize", 0);
T decay =
this->template GetSingleArgument<float>(arg_prefix + "decay", 1.0);
TORCH_DCHECK_GT(stepsize, 0);
TORCH_DCHECK_GE(max_lr, base_lr_);
return new CyclicalLearningRate<T>(base_lr_, max_lr, stepsize, decay);
} else if (policy == "constantThenLinearWarmup") {
T start_warmup_multiplier = this->template GetSingleArgument<float>(
arg_prefix + "start_warmup_multiplier", 0.1);
int64_t constant_warmup_num_iter = this->template GetSingleArgument<int64_t>(
arg_prefix + "constant_warmup_num_iter", 10000000);
int64_t linear_warmup_num_iter = this->template GetSingleArgument<int64_t>(
arg_prefix + "linear_warmup_num_iter", 10000000);
return new ConstantThenLinearWarmupLearningRate<T>(
start_warmup_multiplier,
constant_warmup_num_iter,
linear_warmup_num_iter);
} else if (policy == "compositeCyclical") {
T start_warmup_multiplier = this->template GetSingleArgument<float>(
arg_prefix + "start_warmup_multiplier", 0.1);
int64_t constant_warmup_num_iter = this->template GetSingleArgument<int64_t>(
arg_prefix + "constant_warmup_num_iter", 10000000);
int64_t linear_warmup_num_iter = this->template GetSingleArgument<int64_t>(
arg_prefix + "linear_warmup_num_iter", 10000000);
T cyclical_max_lr = this->template GetSingleArgument<float>(
arg_prefix + "cyclical_max_lr", 0.05);
int cyclical_step_size = this->template GetSingleArgument<int>(
arg_prefix + "cyclical_step_size", 1000000);
T cyclical_decay = this->template GetSingleArgument<float>(
arg_prefix + "cyclical_decay", 1.0);
TORCH_DCHECK_GE(cyclical_max_lr, base_lr_);
return new CompositeCyclicalLearningRate<T>(
base_lr_,
start_warmup_multiplier,
constant_warmup_num_iter,
linear_warmup_num_iter,
cyclical_max_lr,
cyclical_step_size,
cyclical_decay);
} else if (policy == "cosine") {
T max_lr =
this->template GetSingleArgument<float>(arg_prefix + "max_lr", 0.5);
T min_lr =
this->template GetSingleArgument<float>(arg_prefix + "min_lr", 0.1);
int64_t period =
this->template GetSingleArgument<int>(arg_prefix + "period", 50);
T t_mult =
this->template GetSingleArgument<float>(arg_prefix + "t_mult", 1.0);
T lr_shrink = this->template GetSingleArgument<float>(
arg_prefix + "lr_shrink", 0.99);
TORCH_DCHECK_GE(max_lr, min_lr);
return new CosineLearningRate<T>(
min_lr, max_lr, period, t_mult, lr_shrink);
} else if (policy == "compositeCosine") {
T start_warmup_multiplier = this->template GetSingleArgument<float>(
arg_prefix + "start_warmup_multiplier", 0.1);
int64_t constant_warmup_num_iter = this->template GetSingleArgument<int64_t>(
arg_prefix + "constant_warmup_num_iter", 10000000);
int64_t linear_warmup_num_iter = this->template GetSingleArgument<int64_t>(
arg_prefix + "linear_warmup_num_iter", 10000000);
T cosine_max_lr = this->template GetSingleArgument<float>(
arg_prefix + "cosine_max_lr", 0.5);
T cosine_min_lr = this->template GetSingleArgument<float>(
arg_prefix + "cosine_min_lr", 0.1);
int64_t cosine_period = this->template GetSingleArgument<int>(
arg_prefix + "cosine_period", 50);
T cosine_t_mult = this->template GetSingleArgument<float>(
arg_prefix + "cosine_t_mult", 1.0);
T cosine_lr_shrink = this->template GetSingleArgument<float>(
arg_prefix + "cosine_lr_shrink", 0.99);
TORCH_DCHECK_GE(cosine_max_lr, cosine_min_lr);
return new CompositeCosineLearningRate<T>(
start_warmup_multiplier,
constant_warmup_num_iter,
linear_warmup_num_iter,
cosine_min_lr,
cosine_max_lr,
cosine_period,
cosine_t_mult,
cosine_lr_shrink);
} else {
CAFFE_THROW("Unknown learning rate policy: ", policy);
return NULL;
}
}
};
} // namespace caffe2
#endif // CAFFE2_SGD_LEARNING_RATE_OP_H_
| 12,880
| 44.355634
| 89
|
h
|
null |
pytorch-main/caffe2/sgd/momentum_sgd_op.h
|
#pragma once
#include "caffe2/core/operator.h"
namespace caffe2 {
template <typename Context>
void momentum_sgd_update(
const int N,
const float* g,
const float* m,
float* ng,
float* nm,
const float* lr,
const float momentum,
const bool nesterov,
float* param,
Context* /*context*/) {
const float LR = lr[0];
for (const auto i : c10::irange(N)) {
if (!nesterov) {
const float adjusted_gradient = LR * g[i] + momentum * m[i];
nm[i] = adjusted_gradient;
ng[i] = adjusted_gradient;
} else {
const float mi = m[i];
const float mi_new = momentum * mi + LR * g[i];
nm[i] = mi_new;
ng[i] = (1 + momentum) * mi_new - momentum * mi;
}
if (param) {
param[i] -= ng[i];
}
}
}
template <typename T, class Context>
class MomentumSGDOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
MomentumSGDOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
momentum_(this->template GetSingleArgument<T>("momentum", 0.0)),
nesterov_(this->template GetSingleArgument<bool>("nesterov", false)) {}
bool RunOnDevice() override {
auto device_type = Context::GetDeviceType();
// Iter live on the CPU
CAFFE_ENFORCE(OperatorBase::InputIsTensorType(GRAD, device_type));
CAFFE_ENFORCE(OperatorBase::InputIsTensorType(MOMENTUM, device_type));
CAFFE_ENFORCE(Input(LR).numel() == 1);
CAFFE_ENFORCE(Input(GRAD).numel() == Input(MOMENTUM).numel());
Output(OUTPUT_GRAD)->ResizeLike(Input(GRAD));
Output(OUTPUT_MOMENTUM)->ResizeLike(Input(MOMENTUM));
momentum_sgd_update<Context>(
Input(GRAD).numel(),
Input(GRAD).template data<T>(),
Input(MOMENTUM).template data<T>(),
Output(OUTPUT_GRAD)->template mutable_data<T>(),
Output(OUTPUT_MOMENTUM)->template mutable_data<T>(),
Input(LR).template data<T>(),
momentum_,
nesterov_,
NULL,
&context_);
return true;
}
protected:
T momentum_{0.9};
bool nesterov_;
INPUT_TAGS(GRAD, MOMENTUM, LR);
OUTPUT_TAGS(OUTPUT_GRAD, OUTPUT_MOMENTUM);
};
template <typename T, class Context>
class MomentumSGDUpdateOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
MomentumSGDUpdateOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
momentum_(this->template GetSingleArgument<T>("momentum", 0.0)),
nesterov_(this->template GetSingleArgument<bool>("nesterov", false)) {}
bool RunOnDevice() override {
auto device_type = Context::GetDeviceType();
// Iter live on the CPU
CAFFE_ENFORCE(OperatorBase::InputIsTensorType(GRAD, device_type));
CAFFE_ENFORCE(OperatorBase::InputIsTensorType(MOMENTUM, device_type));
CAFFE_ENFORCE_EQ(Input(LR).numel(), 1);
CAFFE_ENFORCE_EQ(Input(GRAD).numel(), Input(MOMENTUM).numel());
Output(OUTPUT_GRAD)->ResizeLike(Input(GRAD));
Output(OUTPUT_MOMENTUM)->ResizeLike(Input(MOMENTUM));
momentum_sgd_update<Context>(
Input(GRAD).numel(),
Input(GRAD).template data<T>(),
Input(MOMENTUM).template data<T>(),
Output(OUTPUT_GRAD)->template mutable_data<T>(),
Output(OUTPUT_MOMENTUM)->template mutable_data<T>(),
Input(LR).template data<T>(),
momentum_,
nesterov_,
Output(OUTPUT_PARAM)->template mutable_data<T>(),
&context_);
return true;
}
protected:
T momentum_{0.9};
bool nesterov_;
INPUT_TAGS(GRAD, MOMENTUM, LR, PARAM);
OUTPUT_TAGS(OUTPUT_GRAD, OUTPUT_MOMENTUM, OUTPUT_PARAM);
};
template <typename T, class Context>
class SparseMomentumSGDUpdateOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
SparseMomentumSGDUpdateOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
momentum_(this->template GetSingleArgument<T>("momentum", 0.0)),
nesterov_(this->template GetSingleArgument<bool>("nesterov", false)) {}
bool RunOnDevice() override {
// Resize [potentially] out-of-place blobs
Output(OUTPUT_GRAD)->ResizeLike(Input(GRAD));
// Enforce shapes
CAFFE_ENFORCE_EQ(Input(LR).numel(), 1);
CAFFE_ENFORCE_EQ(Input(PARAM).numel(), Input(MOMENTUM).numel());
CAFFE_ENFORCE_EQ(
Input(PARAM).size_from_dim(1),
Input(GRAD).size_from_dim(Input(INDICES).dim()));
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(
this, Input(INDICES));
}
template <typename SIndex>
bool DoRunWithType() {
auto block_size = Input(PARAM).numel() / Input(PARAM).size(0);
auto n = Input(GRAD).numel() / block_size;
const auto* gradIn = Input(GRAD).template data<T>();
const auto* momentumIn = Input(MOMENTUM).template data<T>();
const auto* lr = Input(LR).template data<T>();
// const auto* paramIn = Input(PARAM).template data<T>();
const auto* indices = Input(INDICES).template data<SIndex>();
auto* gradOut = Output(OUTPUT_GRAD)->template mutable_data<T>();
auto* momentumOut = Output(OUTPUT_MOMENTUM)->template mutable_data<T>();
auto* paramOut = Output(OUTPUT_PARAM)->template mutable_data<T>();
for (const auto i : c10::irange(n)) {
auto idx = indices[i];
auto offsetI = i * block_size;
auto offsetIdx = idx * block_size;
CAFFE_ENFORCE(offsetIdx + block_size <= Input(PARAM).numel());
CAFFE_ENFORCE(offsetI + block_size <= Input(GRAD).numel());
momentum_sgd_update<Context>(
block_size,
gradIn + offsetI,
momentumIn + offsetIdx,
gradOut + offsetI,
momentumOut + offsetIdx,
lr,
momentum_,
nesterov_,
paramOut + offsetIdx,
&context_);
}
return true;
}
protected:
T momentum_;
bool nesterov_;
INPUT_TAGS(GRAD, MOMENTUM, LR, PARAM, INDICES);
OUTPUT_TAGS(OUTPUT_GRAD, OUTPUT_MOMENTUM, OUTPUT_PARAM);
};
} // namespace caffe2
| 6,049
| 31.352941
| 79
|
h
|
null |
pytorch-main/caffe2/sgd/rmsprop_op.h
|
#pragma once
#include "caffe2/core/common_omp.h"
#include "caffe2/core/operator.h"
namespace caffe2 {
template <typename Context>
void rmsprop_update(
int N,
const float* g,
const float* ms,
const float* mom,
float* ng,
float* nms,
float* nmom,
float decay,
float momentum,
float epsilon,
const float* lr,
Context* context);
template <typename T, class Context>
class RmsPropOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
RmsPropOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
decay_(this->template GetSingleArgument<float>("decay", 0.9f)),
momentum_(this->template GetSingleArgument<float>("momentum", 0.0f)),
epsilon_(this->template GetSingleArgument<float>("epsilon", 1e-5f)) {}
bool RunOnDevice() override {
CAFFE_ENFORCE(Input(LR).numel() == 1);
CAFFE_ENFORCE(Input(GRAD).numel() == Input(MEAN_SQUARES).numel());
CAFFE_ENFORCE(Input(GRAD).numel() == Input(OUTPUT_MOMENTUM).numel());
Output(OUTPUT_GRAD)->ResizeLike(Input(GRAD));
Output(OUTPUT_GRAD)->ResizeLike(Input(GRAD));
Output(OUTPUT_MEAN_SQUARES)->ResizeLike(Input(MEAN_SQUARES));
Output(OUTPUT_MOMENTUM)->ResizeLike(Input(MOMENTUM));
rmsprop_update<Context>(
Input(GRAD).numel(),
Input(GRAD).template data<T>(),
Input(MEAN_SQUARES).template data<T>(),
Input(MOMENTUM).template data<T>(),
Output(OUTPUT_GRAD)->template mutable_data<T>(),
Output(OUTPUT_MEAN_SQUARES)->template mutable_data<T>(),
Output(OUTPUT_MOMENTUM)->template mutable_data<T>(),
decay_,
momentum_,
epsilon_,
Input(LR).template data<T>(),
&context_);
return true;
}
protected:
T decay_{0.9};
T momentum_{0.0};
T epsilon_{1e-8};
INPUT_TAGS(GRAD, MEAN_SQUARES, MOMENTUM, LR);
OUTPUT_TAGS(OUTPUT_GRAD, OUTPUT_MEAN_SQUARES, OUTPUT_MOMENTUM);
};
}
| 1,980
| 29.953125
| 78
|
h
|
null |
pytorch-main/caffe2/sgd/rowwise_counter.h
|
#pragma once
#include "caffe2/core/operator.h"
namespace caffe2 {
class RowWiseCounterOp final : public Operator<CPUContext> {
public:
RowWiseCounterOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<CPUContext>(operator_def, ws),
counter_halflife_(
this->template GetSingleArgument<int64_t>("counter_halflife", -1)),
counter_neg_log_rho_(0.0) {
if (counter_halflife_ > 0) {
counter_neg_log_rho_ = std::log(2.0) / counter_halflife_;
}
}
bool RunOnDevice() override {
CAFFE_ENFORCE_EQ(Input(PREV_ITER).numel(), Input(COUNTER).numel());
CAFFE_ENFORCE_EQ(Input(ITER).numel(), 1);
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(
this, Input(INDICES));
}
template <typename SIndex>
bool DoRunWithType() {
auto* prev_iter =
Output(OUTPUT_PREV_ITER)->template mutable_data<int64_t>();
auto* counter = Output(OUTPUT_COUNTER)->template mutable_data<double>();
const int64_t curr_iter = Input(ITER).template data<int64_t>()[0];
const auto* indices = Input(INDICES).template data<SIndex>();
auto n = Input(INDICES).numel();
if (n == 0) {
return true;
}
if (counter_halflife_ <= 0) {
return true;
}
for (const auto i : c10::irange(n)) {
const std::size_t idx = indices[i];
CAFFE_ENFORCE_GE(
Input(COUNTER).numel(),
idx,
this->debug_def().input(COUNTER),
", out of bound, idx:",
idx,
" for input i:",
i,
" max size:",
Input(COUNTER).numel());
const int64_t iter_delta =
std::max<int64_t>(0, curr_iter - prev_iter[idx]);
counter[idx] =
1.0 + std::exp(-iter_delta * counter_neg_log_rho_) * counter[idx];
prev_iter[idx] = std::max<int64_t>(curr_iter, prev_iter[idx]);
}
return true;
}
protected:
int64_t counter_halflife_;
double counter_neg_log_rho_;
INPUT_TAGS(PREV_ITER, COUNTER, INDICES, ITER);
OUTPUT_TAGS(OUTPUT_PREV_ITER, OUTPUT_COUNTER);
};
} // namespace caffe2
| 2,090
| 27.643836
| 79
|
h
|
null |
pytorch-main/caffe2/sgd/storm_op.h
|
#pragma once
#include "caffe2/core/operator.h"
namespace caffe2 {
template <typename Context>
void storm_update(
const int N,
const float* paramIn,
const float* momentIn,
const float* gradSqSumIn,
const float* gradIn,
const float* lr,
float* paramOut,
float* momentOut,
float* gradSqSumOut,
const float momentum,
const float beta,
Context* /*context*/) {
float gradSqSumTmp = 0.0;
for (const auto i : c10::irange(N)) {
const float gi = gradIn[i];
gradSqSumTmp += gi * gi;
}
gradSqSumOut[0] = gradSqSumIn[0] + gradSqSumTmp;
const float nlr = lr[0] * std::pow(beta + gradSqSumOut[0], -1.0 / 3.0);
const float alpha = momentum * nlr * nlr;
for (const auto i : c10::irange(N)) {
const float gi = gradIn[i];
const float mi = momentIn[i];
float new_mi = momentOut[i] = gi + (1.0 - alpha) * (mi - gi);
paramOut[i] = paramIn[i] + nlr * new_mi;
}
}
template <class Context>
class StormOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
StormOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
OP_SINGLE_ARG(float, "momentum", momentum_, 10.0),
OP_SINGLE_ARG(float, "beta", beta_, 0.1) {}
bool RunOnDevice() override {
// Enforce shapes
CAFFE_ENFORCE_EQ(Input(GRAD).numel(), Input(PARAM).numel());
CAFFE_ENFORCE_EQ(Input(GRAD).numel(), Input(MOMENT).numel());
CAFFE_ENFORCE_EQ(Input(GRADSQSUM).numel(), 1);
CAFFE_ENFORCE_EQ(Input(LR).numel(), 1);
// Resize [potentially] out-of-place blobs
Output(OUTPUT_PARAM)->ResizeLike(Input(PARAM));
Output(OUTPUT_MOMENT)->ResizeLike(Input(MOMENT));
Output(OUTPUT_GRAGSQSUM)->ResizeLike(Input(GRADSQSUM));
storm_update<Context>(
Input(GRAD).numel(),
Input(PARAM).template data<float>(),
Input(MOMENT).template data<float>(),
Input(GRADSQSUM).template data<float>(),
Input(GRAD).template data<float>(),
Input(LR).template data<float>(),
Output(OUTPUT_PARAM)->template mutable_data<float>(),
Output(OUTPUT_MOMENT)->template mutable_data<float>(),
Output(OUTPUT_GRAGSQSUM)->template mutable_data<float>(),
momentum_,
beta_,
&context_);
return true;
}
protected:
const float momentum_;
const float beta_;
INPUT_TAGS(PARAM, MOMENT, GRADSQSUM, GRAD, LR);
OUTPUT_TAGS(OUTPUT_PARAM, OUTPUT_MOMENT, OUTPUT_GRAGSQSUM);
};
template <class Context>
class SparseStormOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
SparseStormOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
OP_SINGLE_ARG(float, "momentum", momentum_, 10.0),
OP_SINGLE_ARG(float, "beta", beta_, 0.1) {}
bool RunOnDevice() override {
// Enforce shapes
CAFFE_ENFORCE_EQ(Input(PARAM).numel(), Input(MOMENT).numel());
CAFFE_ENFORCE_EQ(Input(GRADSQSUM).numel(), 1);
CAFFE_ENFORCE_EQ(Input(LR).numel(), 1);
CAFFE_ENFORCE_EQ(
Input(PARAM).size_from_dim(1),
Input(GRAD).size_from_dim(Input(INDICES).dim()));
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(
this, Input(INDICES));
}
template <typename SIndex>
bool DoRunWithType() {
const auto* paramIn = Input(PARAM).template data<float>();
const auto* momentIn = Input(MOMENT).template data<float>();
const auto* gradSqSumIn = Input(GRADSQSUM).template data<float>();
const auto* gradIn = Input(GRAD).template data<float>();
const auto* indices = Input(INDICES).template data<SIndex>();
const auto* lr = Input(LR).template data<float>();
auto* paramOut = Output(OUTPUT_PARAM)->template mutable_data<float>();
auto* momentOut = Output(OUTPUT_MOMENT)->template mutable_data<float>();
auto* gradSqSumOut =
Output(OUTPUT_GRAGSQSUM)->template mutable_data<float>();
auto n = Input(INDICES).numel();
if (n == 0) {
return true;
}
float gradSqSumTmp = 0.0;
for (const auto i : c10::irange(Input(GRAD).numel())) {
const float gi = gradIn[i];
gradSqSumTmp += gi * gi;
}
gradSqSumOut[0] = gradSqSumIn[0] + gradSqSumTmp;
const float nlr = lr[0] * std::pow(beta_ + gradSqSumOut[0], -1.0 / 3.0);
const float alpha = momentum_ * nlr * nlr;
const auto block_size = Input(GRAD).numel() / n;
for (const auto i : c10::irange(n)) {
auto idx = indices[i];
if (block_size == 1) {
const float gi = gradIn[i];
const float mi = momentIn[idx];
float new_mi = momentOut[idx] = gi + (1.0 - alpha) * (mi - gi);
paramOut[idx] = paramIn[idx] + nlr * new_mi;
} else {
auto offsetI = i * block_size;
auto offsetIdx = idx * block_size;
#ifndef NDEBUG
CAFFE_ENFORCE_GE(
Input(PARAM).numel(),
block_size + offsetIdx,
this->debug_def().input(PARAM),
", out of bound, idx:",
idx,
" for input i:",
i,
" and block size:",
block_size);
CAFFE_ENFORCE_GE(
Input(GRAD).numel(),
block_size + offsetI,
this->debug_def().input(GRAD),
", out of bound idx, idx:",
idx,
" for input i:",
i);
#endif
for (const auto j : c10::irange(block_size)) {
const float gi = gradIn[offsetI + j];
const float mi = momentIn[offsetIdx + j];
float new_mi = momentOut[offsetIdx + j] =
gi + (1.0 - alpha) * (mi - gi);
paramOut[offsetIdx + j] = paramIn[offsetIdx + j] + nlr * new_mi;
}
}
}
return true;
}
protected:
const float momentum_;
const float beta_;
INPUT_TAGS(PARAM, MOMENT, GRADSQSUM, GRAD, INDICES, LR);
OUTPUT_TAGS(OUTPUT_PARAM, OUTPUT_MOMENT, OUTPUT_GRAGSQSUM);
};
} // namespace caffe2
| 5,950
| 31.167568
| 76
|
h
|
null |
pytorch-main/caffe2/sgd/weight_scale_op.h
|
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "caffe2/core/operator.h"
#include <stdlib.h>
#include <time.h>
namespace caffe2 {
template <typename T, class Context>
void weight_scale_update(
int N,
const T* w,
const T scale,
int64_t iter,
int64_t stepsize,
int64_t update_upper_bound,
T* nw,
Context* context) {
const auto w_size = N * sizeof(float);
if (iter % stepsize != 0 || iter >= update_upper_bound) {
memcpy(nw, w, w_size);
return;
}
// perform the weight scaling
caffe2::math::Scale<T, T, Context>(N, scale, w, nw, context);
}
template <class Context>
class WeightScaleOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
WeightScaleOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
stepsize_(OperatorBase::GetSingleArgument<int64_t>(
"stepsize",
std::numeric_limits<int64_t>::max())),
update_upper_bound_(OperatorBase::GetSingleArgument<int64_t>(
"upper_bound_iter",
std::numeric_limits<int64_t>::max())),
scale_(this->template GetSingleArgument<float>("scale", 1.0f)) {}
bool RunOnDevice() override {
Output(OUTPUT_WEIGHTS)->ResizeLike(Input(WEIGHTS));
return DispatchHelper<TensorTypes<float>>::call(this, Input(WEIGHTS));
}
template <typename T>
bool DoRunWithType() {
const auto iter =
OperatorBase::Input<Tensor>(ITER, CPU).template data<int64_t>()[0] + 1;
weight_scale_update<T, Context>(
Input(WEIGHTS).size(),
Input(WEIGHTS).template data<T>(),
scale_,
iter,
stepsize_,
update_upper_bound_,
Output(OUTPUT_WEIGHTS)->template mutable_data<T>(),
&context_);
return true;
}
protected:
int64_t stepsize_;
int64_t update_upper_bound_;
float scale_;
INPUT_TAGS(WEIGHTS, ITER);
OUTPUT_TAGS(OUTPUT_WEIGHTS);
};
} // namespace caffe2
| 2,552
| 27.366667
| 79
|
h
|
null |
pytorch-main/caffe2/sgd/wngrad_op.h
|
#pragma once
#include "caffe2/core/operator.h"
namespace caffe2 {
template <typename Context>
void wngrad_update(
int N,
const float* w,
const float* g,
const float* h,
float* nw,
float* nh,
float epsilon,
const float* lr,
Context* /*context*/) {
for (const auto i : c10::irange(N)) {
float gi = g[i];
nw[i] = w[i] + lr[0] * gi / (h[0] + epsilon);
}
float nhTmp = 0.0;
for (const auto i : c10::irange(N)) {
float gi = g[i];
nhTmp += gi * gi;
}
nhTmp /= (h[0] + epsilon);
nh[0] = h[0] + nhTmp;
}
template <typename Context>
void wngrad_update_output_effective_lr(
int N,
const float* paramIn,
const float* gradIn,
const float* seqBIn,
float* paramOut,
float* seqBOut,
float* effectiveLROut,
float epsilon,
const float* lr,
Context* /*context*/) {
effectiveLROut[0] = lr[0] / (seqBIn[0] + epsilon);
float seqBTmp = 0.0;
for (const auto i : c10::irange(N)) {
float gi = gradIn[i];
seqBTmp += gi * gi;
}
seqBTmp /= (seqBIn[0] + epsilon);
seqBOut[0] = seqBIn[0] + seqBTmp;
for (const auto i : c10::irange(N)) {
float grad = gradIn[i];
paramOut[i] = paramIn[i] + effectiveLROut[0] * grad;
}
}
template <typename Context>
void wngrad_update_output_effective_lr_and_update(
int N,
const float* paramIn,
const float* gradIn,
const float* seqBIn,
float* paramOut,
float* seqBOut,
float* effectiveLROut,
float* updateOut,
float epsilon,
const float* lr,
Context* /*context*/) {
effectiveLROut[0] = lr[0] / (seqBIn[0] + epsilon);
float seqBTmp = 0.0;
for (const auto i : c10::irange(N)) {
float gi = gradIn[i];
seqBTmp += gi * gi;
}
seqBTmp /= (seqBIn[0] + epsilon);
seqBOut[0] = seqBIn[0] + seqBTmp;
for (const auto i : c10::irange(N)) {
float grad = gradIn[i];
float update = updateOut[i] = effectiveLROut[0] * grad;
paramOut[i] = paramIn[i] + update;
}
}
template <typename T, class Context>
class WngradOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
WngradOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
epsilon_(this->template GetSingleArgument<T>("epsilon", 1e-5f)) {}
bool RunOnDevice() override {
CAFFE_ENFORCE_EQ(
Input(GRAD).numel(),
Input(PARAM).numel(),
"PARAM size: ",
Input(PARAM).numel(),
", GRAD size: ",
Input(GRAD).numel(),
", SEQ_B size: ",
Input(SEQ_B).numel(),
", LR size: ",
Input(LR).numel());
Output(OUTPUT_PARAM)->ResizeLike(Input(PARAM));
Output(OUTPUT_SEQ_B)->ResizeLike(Input(SEQ_B));
if (OutputSize() == 2) {
wngrad_update<Context>(
Input(GRAD).numel(),
Input(PARAM).template data<T>(),
Input(GRAD).template data<T>(),
Input(SEQ_B).template data<T>(),
Output(OUTPUT_PARAM)->template mutable_data<T>(),
Output(OUTPUT_SEQ_B)->template mutable_data<T>(),
epsilon_,
Input(LR).template data<T>(),
&context_);
} else if (OutputSize() == 3) {
Output(OUTPUT_EFFECTIVE_LR)->ResizeLike(Input(SEQ_B));
wngrad_update_output_effective_lr<Context>(
Input(GRAD).numel(),
Input(PARAM).template data<T>(),
Input(GRAD).template data<T>(),
Input(SEQ_B).template data<T>(),
Output(OUTPUT_PARAM)->template mutable_data<T>(),
Output(OUTPUT_SEQ_B)->template mutable_data<T>(),
Output(OUTPUT_EFFECTIVE_LR)->template mutable_data<T>(),
epsilon_,
Input(LR).template data<T>(),
&context_);
} else {
Output(OUTPUT_EFFECTIVE_LR)->ResizeLike(Input(SEQ_B));
Output(OUTPUT_UPDATE)->ResizeLike(Input(GRAD));
wngrad_update_output_effective_lr_and_update<Context>(
Input(GRAD).numel(),
Input(PARAM).template data<T>(),
Input(GRAD).template data<T>(),
Input(SEQ_B).template data<T>(),
Output(OUTPUT_PARAM)->template mutable_data<T>(),
Output(OUTPUT_SEQ_B)->template mutable_data<T>(),
Output(OUTPUT_EFFECTIVE_LR)->template mutable_data<T>(),
Output(OUTPUT_UPDATE)->template mutable_data<T>(),
epsilon_,
Input(LR).template data<T>(),
&context_);
}
return true;
}
protected:
T epsilon_;
INPUT_TAGS(PARAM, SEQ_B, GRAD, LR);
OUTPUT_TAGS(OUTPUT_PARAM, OUTPUT_SEQ_B, OUTPUT_EFFECTIVE_LR, OUTPUT_UPDATE);
};
template <typename T, class Context>
class SparseWngradOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
SparseWngradOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
epsilon_(this->template GetSingleArgument<float>("epsilon", 1e-5f)) {}
bool RunOnDevice() override {
// Enforce shapes
CAFFE_ENFORCE_EQ(Input(SEQ_B).numel(), 1);
CAFFE_ENFORCE_EQ(Input(LR).numel(), 1);
CAFFE_ENFORCE_EQ(
Input(PARAM).size_from_dim(1),
Input(GRAD).size_from_dim(Input(INDICES).dim()));
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(
this, Input(INDICES));
}
template <typename SIndex>
bool DoRunWithType() {
const auto* lr = Input(LR).template data<T>();
const auto* indices = Input(INDICES).template data<SIndex>();
const auto* gradIn = Input(GRAD).template data<T>();
const auto* paramIn = Input(PARAM).template data<T>();
const auto* seqBIn = Input(SEQ_B).template data<T>();
auto* paramOut = Output(OUTPUT_PARAM)->template mutable_data<T>();
auto* seqBOut = Output(OUTPUT_SEQ_B)->template mutable_data<T>();
auto n = Input(INDICES).numel();
if (n == 0) {
return true;
}
auto block_size = Input(GRAD).numel() / n;
for (const auto i : c10::irange(n)) {
auto idx = indices[i];
if (block_size == 1) {
float gi = gradIn[i];
paramOut[idx] = paramIn[idx] + lr[0] * gi / (seqBIn[0] + epsilon_);
} else {
auto offsetI = i * block_size;
auto offsetIdx = idx * block_size;
#ifndef NDEBUG
CAFFE_ENFORCE_GE(
Input(PARAM).numel(),
block_size + offsetIdx,
this->debug_def().input(PARAM),
", out of bound, idx:",
idx,
" for input i:",
i,
" and block size:",
block_size);
CAFFE_ENFORCE_GE(
Input(GRAD).numel(),
block_size + offsetI,
this->debug_def().input(GRAD),
", out of bound idx, idx:",
idx,
" for input i:",
i);
#endif
for (const auto j : c10::irange(block_size)) {
float gi = gradIn[offsetI + j];
paramOut[offsetIdx + j] =
paramIn[offsetIdx + j] + lr[0] * gi / (seqBIn[0] + epsilon_);
}
}
}
float seqBTmp = 0.0;
for (const auto i : c10::irange(Input(GRAD).numel())) {
float gi = gradIn[i];
seqBTmp += gi * gi;
}
seqBTmp /= seqBIn[0];
seqBOut[0] = seqBTmp + seqBIn[0];
return true;
}
protected:
T epsilon_;
INPUT_TAGS(PARAM, SEQ_B, INDICES, GRAD, LR);
OUTPUT_TAGS(OUTPUT_PARAM, OUTPUT_SEQ_B);
};
} // namespace caffe2
| 7,335
| 28.461847
| 78
|
h
|
null |
pytorch-main/caffe2/sgd/yellowfin_op.h
|
// YellowFin: An automatic tuner for momentum SGD
// (https://arxiv.org/abs/1706.03471)
// The YellowFinOp tunes learning rate and momentum and performs momentum SGD
// steps. The learning rate and momentum are separate for any matrix of
// parameters.
#pragma once
#include <cmath>
#include <cstring>
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
template <typename T, class Context>
class YellowFinOp final : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
YellowFinOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
curv_win_width_(
this->template GetSingleArgument<int>("curv_win_width", 20)),
nesterov_(this->template GetSingleArgument<int>("nesterov", false)),
zero_debias_(
this->template GetSingleArgument<bool>("zero_debias", true)),
epsilon_(this->template GetSingleArgument<T>("epsilon", 1e-6f)),
beta_(this->template GetSingleArgument<T>("beta", 0.999f)) {}
protected:
// GetLrMu and MomentumSgdUpdate have different implementations for GPU and
// CPU. All other methods are generic.
void GetLrMu();
void MomentumSgdUpdate();
void AfterApply() {
// g
MovingAverage(D_, grad_, g_avg_, g_avg_out_, g_deb_);
// g2
math::Mul(D_, grad_, grad_, aux_vector_, &context_);
MovingAverage(D_, aux_vector_, g2_avg_, g2_avg_out_, g2_deb_);
// g_norm2
math::Dot(D_, grad_, grad_, g_norm2_, &context_);
math::Maximum(1, epsilon_, g_norm2_, g_norm2_, &context_);
MovingAverage(1, g_norm2_, g_norm2_avg_, g_norm2_avg_out_, g_norm2_deb_);
// g_norm
math::Sqrt(1, g_norm2_, g_norm_, &context_);
MovingAverage(1, g_norm_, g_norm_avg_, g_norm_avg_out_, g_norm_deb_);
math::Maximum(1, epsilon_, g_norm_deb_, g_norm_deb_, &context_);
// Curvature range: g_norm2_min, g_norm2_max
math::CopyVector(curv_win_width_, curv_win_, curv_win_out_, &context_);
T* curv_win_cell = curv_win_out_ + (iter_ - 1) % curv_win_width_;
math::Log(1, g_norm2_, curv_win_cell, &context_);
int valid_end = std::min(curv_win_width_, iter_);
math::ReduceMin(
valid_end, curv_win_out_, g_norm2_min_, &scratch_tensor_, &context_);
math::ReduceMax(
valid_end, curv_win_out_, g_norm2_max_, &scratch_tensor_, &context_);
MovingAverage(
1,
g_norm2_min_,
g_norm2_min_avg_,
g_norm2_min_avg_out_,
g_norm2_min_deb_);
MovingAverage(
1,
g_norm2_max_,
g_norm2_max_avg_,
g_norm2_max_avg_out_,
g_norm2_max_deb_);
math::Exp(1, g_norm2_min_deb_, g_norm2_min_deb_, &context_);
math::Exp(1, g_norm2_max_deb_, g_norm2_max_deb_, &context_);
math::Maximum(1, epsilon_, g_norm2_min_deb_, g_norm2_min_deb_, &context_);
math::Maximum(1, epsilon_, g_norm2_max_deb_, g_norm2_max_deb_, &context_);
// Gradient variance
math::Dot(D_, g_deb_, g_deb_, aux_scalar_, &context_);
math::Sub(1, g_norm2_deb_, aux_scalar_, variance_, &context_);
math::Maximum(1, epsilon_, variance_, variance_, &context_);
// Distance to opt
math::Div(1, g_norm_avg_out_, g_norm2_avg_out_, distance_, &context_);
MovingAverage(
1, distance_, distance_avg_, distance_avg_out_, distance_deb_);
if (iter_ > 1) {
GetLrMu();
}
}
void MovingAverage(
const int N,
const T* elt,
const T* avg,
T* new_avg,
T* debias_avg) {
const T one = 1;
math::Scale(N, beta_, avg, new_avg, &context_);
math::Axpy(N, one - beta_, elt, new_avg, &context_);
math::Scale(N, debias_factor_, new_avg, debias_avg, &context_);
}
T ZeroDebiasFactor() {
if (zero_debias_) {
const T one = 1;
return one / (one - std::pow(beta_, iter_));
} else {
return 1;
}
}
public:
bool RunOnDevice() override {
// Iter live on the CPU
#define CAFFE2_YF_READ_INPUT(INPUT_NAME, VAR_NAME) \
const auto& VAR_NAME##_tensor = Input(INPUT_NAME); \
VAR_NAME##_ = VAR_NAME##_tensor.template data<T>();
CAFFE2_YF_READ_INPUT(PARAM, param)
CAFFE2_YF_READ_INPUT(MOMENT, moment)
CAFFE2_YF_READ_INPUT(LR_AVG, lr_avg)
CAFFE2_YF_READ_INPUT(MU_AVG, mu_avg)
CAFFE2_YF_READ_INPUT(CURV_WIN, curv_win)
CAFFE2_YF_READ_INPUT(G_AVG, g_avg)
CAFFE2_YF_READ_INPUT(G2_AVG, g2_avg)
CAFFE2_YF_READ_INPUT(SCALARS_MEMORY, scalars_memory)
CAFFE2_YF_READ_INPUT(GRAD, grad)
#undef CAFFE2_YF_READ_OUTPUT
CAFFE_ENFORCE(OperatorBase::InputIsTensorType(ITER, CPU));
CAFFE_ENFORCE_EQ(lr_avg_tensor.numel(), 1);
CAFFE_ENFORCE_EQ(mu_avg_tensor.numel(), 1);
CAFFE_ENFORCE_EQ(param_tensor.dim(), moment_tensor.dim());
CAFFE_ENFORCE_EQ(param_tensor.dim(), g_avg_tensor.dim());
CAFFE_ENFORCE_EQ(param_tensor.dim(), g2_avg_tensor.dim());
CAFFE_ENFORCE_EQ(param_tensor.dim(), grad_tensor.dim());
for (const auto i : c10::irange(param_tensor.dim())) {
CAFFE_ENFORCE_EQ(param_tensor.dim32(i), moment_tensor.dim32(i));
CAFFE_ENFORCE_EQ(param_tensor.dim32(i), g_avg_tensor.dim32(i));
CAFFE_ENFORCE_EQ(param_tensor.dim32(i), g2_avg_tensor.dim32(i));
CAFFE_ENFORCE_EQ(param_tensor.dim32(i), grad_tensor.dim32(i));
}
iter_ = OperatorBase::Input<Tensor>(ITER, CPU).template data<int64_t>()[0];
D_ = param_tensor.numel();
// Input data - persistent memory for internal scalars
// Note: Memory for these scalars is being allocated during initialization
// of the network. If you want to add / remove a scalar, make a
// suitable change of memory size in the initialization.
const T* memory_it = scalars_memory_ - 1;
g_norm_avg_ = ++memory_it;
g_norm2_avg_ = ++memory_it;
g_norm2_min_avg_ = ++memory_it;
g_norm2_max_avg_ = ++memory_it;
distance_avg_ = ++memory_it;
// Output data
#define CAFFE2_YF_READ_OUTPUT(OUTPUT_NAME, VAR_NAME) \
auto VAR_NAME##_out_tensor = \
Output(OUTPUT_##OUTPUT_NAME, VAR_NAME##_tensor.sizes(), at::dtype<T>()); \
VAR_NAME##_out_ = VAR_NAME##_out_tensor->template mutable_data<T>();
CAFFE2_YF_READ_OUTPUT(PARAM, param)
CAFFE2_YF_READ_OUTPUT(MOMENT, moment)
CAFFE2_YF_READ_OUTPUT(LR_AVG, lr_avg)
CAFFE2_YF_READ_OUTPUT(MU_AVG, mu_avg)
CAFFE2_YF_READ_OUTPUT(CURV_WIN, curv_win)
CAFFE2_YF_READ_OUTPUT(G_AVG, g_avg)
CAFFE2_YF_READ_OUTPUT(G2_AVG, g2_avg)
CAFFE2_YF_READ_OUTPUT(SCALARS_MEMORY, scalars_memory)
#undef CAFFE2_YF_READ_OUTPUT
T* out_memory_it = scalars_memory_out_ - 1;
g_norm_avg_out_ = ++out_memory_it;
g_norm2_avg_out_ = ++out_memory_it;
g_norm2_min_avg_out_ = ++out_memory_it;
g_norm2_max_avg_out_ = ++out_memory_it;
distance_avg_out_ = ++out_memory_it;
#define CAFFE2_YF_INIT_VECTOR(NAME) \
ReinitializeTensor(&NAME##_tensor_, {D_}, at::dtype<T>().device(Context::GetDeviceType())); \
NAME##_ = NAME##_tensor_.template mutable_data<T>();
CAFFE2_YF_INIT_VECTOR(aux_vector)
CAFFE2_YF_INIT_VECTOR(g_deb)
CAFFE2_YF_INIT_VECTOR(g2_deb)
CAFFE2_YF_INIT_VECTOR(g_deb2)
#undef CAFFE2_YF_INIT_VECTOR
#define CAFFE2_YF_INIT_SCALAR(NAME) \
ReinitializeTensor(&NAME##_tensor_, {1}, at::dtype<T>().device(Context::GetDeviceType())); \
NAME##_ = NAME##_tensor_.template mutable_data<T>();
CAFFE2_YF_INIT_SCALAR(aux_scalar)
CAFFE2_YF_INIT_SCALAR(distance)
CAFFE2_YF_INIT_SCALAR(distance_deb)
CAFFE2_YF_INIT_SCALAR(g_norm)
CAFFE2_YF_INIT_SCALAR(g_norm_deb)
CAFFE2_YF_INIT_SCALAR(g_norm2)
CAFFE2_YF_INIT_SCALAR(g_norm2_max)
CAFFE2_YF_INIT_SCALAR(g_norm2_max_deb)
CAFFE2_YF_INIT_SCALAR(g_norm2_min)
CAFFE2_YF_INIT_SCALAR(g_norm2_min_deb)
CAFFE2_YF_INIT_SCALAR(g_norm2_deb)
CAFFE2_YF_INIT_SCALAR(lr)
CAFFE2_YF_INIT_SCALAR(lr_deb)
CAFFE2_YF_INIT_SCALAR(mu_deb)
CAFFE2_YF_INIT_SCALAR(mu)
CAFFE2_YF_INIT_SCALAR(variance)
#undef CAFFE2_YF_INIT_SCALAR
debias_factor_ = ZeroDebiasFactor();
MomentumSgdUpdate();
AfterApply();
return true;
}
protected:
int curv_win_width_;
bool nesterov_;
bool zero_debias_;
T epsilon_;
T beta_;
T debias_factor_;
int D_;
// Temporary memory on device, listed all variables used in calculations
#define CAFFE2_YF_DEFINE_TENSOR(NAME) \
Tensor NAME##_tensor_; \
T* NAME##_;
CAFFE2_YF_DEFINE_TENSOR(aux_vector)
CAFFE2_YF_DEFINE_TENSOR(g_deb)
CAFFE2_YF_DEFINE_TENSOR(g2_deb)
CAFFE2_YF_DEFINE_TENSOR(g_deb2)
CAFFE2_YF_DEFINE_TENSOR(aux_scalar)
CAFFE2_YF_DEFINE_TENSOR(distance)
CAFFE2_YF_DEFINE_TENSOR(distance_deb)
CAFFE2_YF_DEFINE_TENSOR(g_norm)
CAFFE2_YF_DEFINE_TENSOR(g_norm_deb)
CAFFE2_YF_DEFINE_TENSOR(g_norm2)
CAFFE2_YF_DEFINE_TENSOR(g_norm2_deb)
CAFFE2_YF_DEFINE_TENSOR(g_norm2_max)
CAFFE2_YF_DEFINE_TENSOR(g_norm2_max_deb)
CAFFE2_YF_DEFINE_TENSOR(g_norm2_min)
CAFFE2_YF_DEFINE_TENSOR(g_norm2_min_deb)
CAFFE2_YF_DEFINE_TENSOR(lr)
CAFFE2_YF_DEFINE_TENSOR(lr_deb)
CAFFE2_YF_DEFINE_TENSOR(mu)
CAFFE2_YF_DEFINE_TENSOR(mu_deb)
CAFFE2_YF_DEFINE_TENSOR(variance)
Tensor scratch_tensor_{Context::GetDeviceType()};
#undef CAFFE2_YF_DEFINE_TENSOR
// Input tensors' data
const T* param_;
const T* moment_;
const T* lr_avg_;
const T* mu_avg_;
const T* curv_win_;
const T* g_avg_;
const T* g2_avg_;
const T* scalars_memory_;
const T* grad_;
int iter_;
// Scalar data from scalars_memory_ input tensor
const T* g_norm_avg_;
const T* g_norm2_avg_;
const T* g_norm2_min_avg_;
const T* g_norm2_max_avg_;
const T* distance_avg_;
// Output tensors' data
T* param_out_;
T* moment_out_;
T* lr_avg_out_;
T* mu_avg_out_;
T* curv_win_out_;
T* g_avg_out_;
T* g2_avg_out_;
T* scalars_memory_out_;
// Scalar data from scalars_memory_ output tensor
T* g_norm_avg_out_;
T* g_norm2_avg_out_;
T* g_norm2_min_avg_out_;
T* g_norm2_max_avg_out_;
T* distance_avg_out_;
INPUT_TAGS(
PARAM,
MOMENT,
LR_AVG,
MU_AVG,
CURV_WIN,
G_AVG,
G2_AVG,
SCALARS_MEMORY,
GRAD,
ITER);
OUTPUT_TAGS(
OUTPUT_PARAM,
OUTPUT_MOMENT,
OUTPUT_LR_AVG,
OUTPUT_MU_AVG,
OUTPUT_CURV_WIN,
OUTPUT_G_AVG,
OUTPUT_G2_AVG,
OUTPUT_SCALARS_MEMORY);
};
} // namespace caffe2
| 10,292
| 30.965839
| 98
|
h
|
null |
pytorch-main/caffe2/share/contrib/zstd/quant_decomp_zstd_op.h
|
#ifndef QUANT_DECOMP_OP_H_
#define QUANT_DECOMP_OP_H_
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
namespace caffe2 {
// Decompress a set of tensors compressed using zstd,
// see quant_decomp_op_test.py for how to compress
class QuantDecompZstdOp final : public Operator<CPUContext> {
public:
USE_OPERATOR_FUNCTIONS(CPUContext);
QuantDecompZstdOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<CPUContext>(operator_def, ws) {}
~QuantDecompZstdOp() {}
bool RunOnDevice() override;
};
} // namespace caffe2
#endif // QUANT_DECOMP_OP_H_
| 589
| 23.583333
| 67
|
h
|
null |
pytorch-main/caffe2/transforms/common_subexpression_elimination.h
|
#pragma once
#include "caffe2/core/common.h"
#include "caffe2/core/transform.h"
#include "caffe2/proto/caffe2_pb.h"
#include "caffe2/utils/proto_utils.h"
namespace caffe2 {
/**
* Common Subexpression Elimination
*
* This transforms looks for specific operators (denoted by allowed_ops_),
* and removes unnecessary repetition of that operator.
*
* Consider some operator of X, that reads from blob b_ written to by W.
* X_a and X_b read the output of X. However, another operator Y, is the same
* type as X, has the same arguments as X, and reads from the same input b_,
* written to by W. It's output is the same as X. Y_a, Y_b, and Y_c read from Y.
*
* Then, we can eliminate the common subexpressions X and Y, and merge them to
* Z, where X_a, X_b, Y_a, Y_b, and Y_c all read from Z.
*
*
* TODO(benz): Fix the error to not match nodes that write to external output.
*/
class TORCH_API CommonSubexpressionEliminationTransform : public Transform {
public:
CommonSubexpressionEliminationTransform() {
SetPatternMatchType(SORTED_WRT_EXECUTION_ORDER);
}
protected:
bool PatternRule(
const transform::Graph& g,
const std::vector<int>& subgraph,
int idx) override;
bool ValidatorRule(
const transform::Graph& g,
const std::vector<int>& subgraph) override;
bool ReplaceRule(const std::vector<int>& subgraph, transform::Graph* g_ptr)
override;
private:
bool IsAllowed(string op_type) {
return allowed_ops_.count(op_type);
}
std::set<string> allowed_ops_ = {"LearningRate", "FC"};
};
} // namespace caffe2
| 1,584
| 28.90566
| 80
|
h
|
null |
pytorch-main/caffe2/transforms/conv_to_nnpack_transform.h
|
#pragma once
#include "caffe2/core/common.h"
#include "caffe2/proto/caffe2_pb.h"
#include "caffe2/transforms/single_op_transform.h"
#include "caffe2/utils/proto_utils.h"
namespace caffe2 {
class TORCH_API ConvToNNPackTransform : public SingleOpTransform {
protected:
// Specify what the op needs to be to match the pattern.
bool MatchOperator(const OperatorDef& op) override {
return (
op.type() == "Conv" && op.device_option().device_type() == PROTO_CPU &&
op.engine() != "NNPACK");
}
// Specify how the operator should be replaced.
void ReplaceOperator(OperatorDef* op) override {
op->set_engine("NNPACK");
}
};
} // namespace caffe2
| 678
| 25.115385
| 79
|
h
|
null |
pytorch-main/caffe2/transforms/pattern_net_transform.h
|
#pragma once
#include "caffe2/core/common.h"
#include "caffe2/core/transform.h"
#include "caffe2/proto/caffe2_pb.h"
#include "caffe2/utils/proto_utils.h"
namespace caffe2 {
/**
* PatternNetTransform allows you to create transforms using a simple
* interface.
*
* Simply provide a Pattern NetDef and a Replace NetDef,
* and this Transform will find subgraphs which fit the pattern net,
* and replace it with the replace net.
*/
class TORCH_API PatternNetTransform : public Transform {
public:
PatternNetTransform(const NetDef& pattern_net, const NetDef& replace_net)
: p_(transform::Graph(pattern_net)), r_(transform::Graph(replace_net)) {
// external input and output must match!
CAFFE_ENFORCE(
p_.external_input() == r_.external_input(),
"External inputs do not match!");
CAFFE_ENFORCE(
p_.external_output() == r_.external_output(),
"External outputs do not match!");
ordered_ops_ = GetPatternTraversalOrder(p_);
inverse_ops_.resize(ordered_ops_.size());
for (const auto i : c10::irange(ordered_ops_.size())) {
inverse_ops_[ordered_ops_[i]] = i;
}
}
void EnableArgumentMatching() {
argument_match_ = true;
}
void DisableArgumentMatching() {
argument_match_ = false;
}
protected:
/**
* We want to the final result of subgraph to match the PatternNet in the
* order of ordered_ops, operator by operator.
*
* [[[ ie. g.node(subgraph[i]) should match p.node(ordered_ops[i]) ]]]
*
* PatternRule for PatternNetTransform does the following:
*
* When trying to insert node idx into subgraph[p_idx],
* we need to see if the edges between index and the
* subgraph match the edges between p[ordered_ops[idx]]
* and p[ordered_ops[0]...ordered_ops[p_idx-1]].
*/
bool PatternRule(
const transform::Graph& g,
const std::vector<int>& subgraph,
int idx) override;
/**
* ValidatorRule for PatternNetTransform does the following:
*
* Checks if the size of subgraph and p.size() are the same. That's it!
*/
bool ValidatorRule(
const transform::Graph& g,
const std::vector<int>& subgraph) override;
/**
* ReplaceRule for PatternNet Transform does the following:
*
* 1) Figure out edge renamings for edges going into/out of the subgraph.
* That is, for each blob in the pattern graph, what is it called in the
* matched subgraph?
*
* 2) Remove the matched subgraph.
*
* 3) Append the replace graph's operators to the graph's operators, and use
* the renamings to rename the blob names.
*
* 4) Create all the children/parent relationships within the replaced graph,
* and stitch together the inputs and outputs into the rest of the graph,
* matching the removed subgraph.
*/
bool ReplaceRule(const std::vector<int>& subgraph, transform::Graph* g_ptr)
override;
private:
/**
* This returns a permutation of the Pattern Net's operators.
* The permutation satisfies this property:
* - For any index i, order(i) is a neighbor of some node from
* {order(1), ..., order(i-1)}.
*
* Why is this important? Consider the following case:
* PatternNet: 0 ---> 2 <--- 1
*
* When we have matched onto [0], and trying to add [1] to our subgraph,
* we cannot, since PatternMatch only considers neighbors of the current
* subgraph as a candidate next node.
*
* Therefore, we must present the subgraph in an order such that each node is
* a neighbor of its prefix subgraph. One ordering for the above example is
* [0, 2, 1].
*/
std::vector<int> GetPatternTraversalOrder(const transform::Graph& g);
// Graph of Pattern NetDef
transform::Graph p_;
// The Traversal Order of the Pattern Net's Operators
// This is a permutation of the numbers from {0, ..., p.size()-1}
std::vector<int> ordered_ops_;
// The Inverse of the Traversal Order of the Pattern Net's Operators
// That is, inverse_ops[ordered_ops[i]] == i is always true.
std::vector<int> inverse_ops_;
// Graph of Replace NetDef
transform::Graph r_;
// This flag determines if the transform will match operator arguments.
bool argument_match_ = false;
const string TransformBlobWrapper(const string& blob_name) {
return "transform/" + blob_name + "_" + c10::to_string(ssa_id_);
}
int ssa_id_ = 0;
};
} // namespace caffe2
| 4,399
| 31.835821
| 79
|
h
|
null |
pytorch-main/caffe2/transforms/single_op_transform.h
|
#pragma once
#include "caffe2/core/common.h"
#include "caffe2/core/transform.h"
#include "caffe2/proto/caffe2_pb.h"
#include "caffe2/utils/proto_utils.h"
namespace caffe2 {
/**
* Single Op Transform Base class
*
* A transform which is applied to a single node, in place.
*
* Transforms which derive from SingleOpTransform need to override:
* ReplaceOperator and MatchOperator.
*/
class TORCH_API SingleOpTransform : public Transform {
protected:
bool PatternRule(
const transform::Graph& g,
const std::vector<int>& subgraph,
int idx) override;
bool ValidatorRule(
const transform::Graph& g,
const std::vector<int>& subgraph) override;
bool ReplaceRule(const std::vector<int>& subgraph, transform::Graph* g_ptr)
override;
// Specify what the op needs to be to match the pattern.
virtual bool MatchOperator(const OperatorDef& op) = 0;
// Specify how the operator should be replaced.
virtual void ReplaceOperator(OperatorDef* op) = 0;
};
} // namespace caffe2
| 1,021
| 25.894737
| 77
|
h
|
null |
pytorch-main/caffe2/utils/bench_utils.h
|
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef CAFFE2_UTILS_BENCH_UTILS_H_
#define CAFFE2_UTILS_BENCH_UTILS_H_
#include <stdint.h>
#include <c10/macros/Export.h>
namespace caffe2 {
TORCH_API uint32_t wipe_cache();
} // namespace caffe2
#endif // CAFFE2_UTILS_BENCH_UTILS_H_
| 854
| 26.580645
| 75
|
h
|
null |
pytorch-main/caffe2/utils/cast.h
|
#pragma once
#include <caffe2/utils/proto_utils.h>
namespace caffe2 {
namespace cast {
inline TensorProto_DataType GetCastDataType(const ArgumentHelper& helper, std::string arg) {
TensorProto_DataType to;
if (helper.HasSingleArgumentOfType<string>(arg)) {
string s = helper.GetSingleArgument<string>(arg, "float");
std::transform(s.begin(), s.end(), s.begin(), ::toupper);
#ifndef CAFFE2_USE_LITE_PROTO
CAFFE_ENFORCE(TensorProto_DataType_Parse(s, &to), "Unknown 'to' argument: ", s);
#else
// Manually implement in the lite proto case.
#define X(t) \
if (s == #t) { \
return TensorProto_DataType_##t; \
}
X(FLOAT);
X(INT32);
X(BYTE);
X(STRING);
X(BOOL);
X(UINT8);
X(INT8);
X(UINT16);
X(INT16);
X(INT64);
X(FLOAT16);
X(DOUBLE);
#undef X
CAFFE_THROW("Unhandled type argument: ", s);
#endif
} else {
to = static_cast<TensorProto_DataType>(
helper.GetSingleArgument<int>(arg, TensorProto_DataType_FLOAT));
}
return to;
}
}; // namespace cast
}; // namespace caffe2
| 1,110
| 21.22
| 92
|
h
|
null |
pytorch-main/caffe2/utils/cpu_neon.h
|
#ifndef CAFFE2_UTILS_CPU_NEON_H_
#define CAFFE2_UTILS_CPU_NEON_H_
// Provides a variety of ARM NEON-specific utility functions
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
#include <arm_neon.h>
namespace caffe2 {
template <typename T>
inline bool isPointerAligned(T* p, size_t align) {
return (reinterpret_cast<uintptr_t>(p) % align == 0);
}
inline float32x4_t vert_sum_f32(float32x4_t v0,
float32x4_t v1,
float32x4_t v2,
float32x4_t v3) {
v0 = vaddq_f32(v0, v1);
v2 = vaddq_f32(v2, v3);
return vaddq_f32(v0, v2);
}
inline float horizontal_sum_f32(float32x4_t v0,
float32x4_t v1,
float32x4_t v2,
float32x4_t v3) {
v0 = vert_sum_f32(v0, v1, v2, v3);
float32x2_t v = vadd_f32(vget_high_f32(v0), vget_low_f32(v0));
return vget_lane_f32(vpadd_f32(v, v), 0);
}
// Load/store functions that assume alignment
inline float32x4_t vld1q_f32_aligned(const float* p) {
return vld1q_f32((const float*)
__builtin_assume_aligned(p, sizeof(float32x4_t)));
}
inline void vst1q_f32_aligned(float* p, float32x4_t v) {
vst1q_f32((float*) __builtin_assume_aligned(p, sizeof(float32x4_t)), v);
}
inline void vst4_u8_aligned(uint8_t* p, uint8x8x4_t v) {
vst4_u8((uint8_t*)
__builtin_assume_aligned(p, sizeof(uint8x8x4_t)), v);
}
} // namespace caffe2
#endif // defined(__ARM_NEON__) || defined(__ARM_NEON)
#endif // CAFFE2_UTILS_CPU_NEON_H_
| 1,578
| 28.240741
| 74
|
h
|
null |
pytorch-main/caffe2/utils/eigen_utils.h
|
// Copyright 2004-present Facebook. All Rights Reserved.
#ifndef CAFFE2_OPERATORS_UTILS_EIGEN_H_
#define CAFFE2_OPERATORS_UTILS_EIGEN_H_
#include "Eigen/Core"
#include "Eigen/Dense"
#include <c10/util/Logging.h>
#include <c10/util/irange.h>
namespace caffe2 {
// Common Eigen types that we will often use
template <typename T>
using EigenMatrixMap =
Eigen::Map<Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>>;
template <typename T>
using EigenArrayMap =
Eigen::Map<Eigen::Array<T, Eigen::Dynamic, Eigen::Dynamic>>;
template <typename T>
using EigenVectorMap = Eigen::Map<Eigen::Matrix<T, Eigen::Dynamic, 1>>;
template <typename T>
using EigenVectorArrayMap = Eigen::Map<Eigen::Array<T, Eigen::Dynamic, 1>>;
template <typename T>
using ConstEigenMatrixMap =
Eigen::Map<const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>>;
template <typename T>
using ConstEigenArrayMap =
Eigen::Map<const Eigen::Array<T, Eigen::Dynamic, Eigen::Dynamic>>;
template <typename T>
using ConstEigenVectorMap =
Eigen::Map<const Eigen::Matrix<T, Eigen::Dynamic, 1>>;
template <typename T>
using ConstEigenVectorArrayMap =
Eigen::Map<const Eigen::Array<T, Eigen::Dynamic, 1>>;
using EigenOuterStride = Eigen::OuterStride<Eigen::Dynamic>;
using EigenInnerStride = Eigen::InnerStride<Eigen::Dynamic>;
using EigenStride = Eigen::Stride<Eigen::Dynamic, Eigen::Dynamic>;
template <typename T>
using EigenOuterStridedMatrixMap = Eigen::
Map<Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>, 0, EigenOuterStride>;
template <typename T>
using EigenOuterStridedArrayMap = Eigen::
Map<Eigen::Array<T, Eigen::Dynamic, Eigen::Dynamic>, 0, EigenOuterStride>;
template <typename T>
using ConstEigenOuterStridedMatrixMap = Eigen::Map<
const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>,
0,
EigenOuterStride>;
template <typename T>
using ConstEigenOuterStridedArrayMap = Eigen::Map<
const Eigen::Array<T, Eigen::Dynamic, Eigen::Dynamic>,
0,
EigenOuterStride>;
template <typename T>
using EigenStridedMatrixMap = Eigen::
Map<Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>, 0, EigenStride>;
template <typename T>
using EigenStridedArrayMap =
Eigen::Map<Eigen::Array<T, Eigen::Dynamic, Eigen::Dynamic>, 0, EigenStride>;
template <typename T>
using ConstEigenStridedMatrixMap = Eigen::
Map<const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>, 0, EigenStride>;
template <typename T>
using ConstEigenStridedArrayMap = Eigen::
Map<const Eigen::Array<T, Eigen::Dynamic, Eigen::Dynamic>, 0, EigenStride>;
// 1-d array
template <typename T>
using EArrXt = Eigen::Array<T, Eigen::Dynamic, 1>;
using EArrXf = Eigen::ArrayXf;
using EArrXd = Eigen::ArrayXd;
using EArrXi = Eigen::ArrayXi;
using EArrXb = EArrXt<bool>;
using EArrXI32 = EArrXt<int32_t>;
using EArrXU16 = EArrXt<uint16_t>;
using EArrXU8 = EArrXt<uint8_t>;
using EArr3U8 = Eigen::Array<uint8_t, 3, 1>;
// 2-d array, column major
template <typename T>
using EArrXXt = Eigen::Array<T, Eigen::Dynamic, Eigen::Dynamic>;
using EArrXXf = Eigen::ArrayXXf;
using EArrXXI32 = EArrXXt<int32_t>;
using EArrXXU16 = EArrXXt<uint16_t>;
using EArrXXU8 = EArrXXt<uint8_t>;
using EArrXXi = EArrXXt<int>;
// 2-d array, row major
template <typename T>
using ERArrXXt =
Eigen::Array<T, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>;
using ERArrXXf = ERArrXXt<float>;
using ERArrXXI32t = ERArrXXt<int32_t>;
using ERArrXXU16t = ERArrXXt<uint16_t>;
using ERArrXXU8t = ERArrXXt<uint8_t>;
using ERArrXXi = ERArrXXt<int>;
using ERArrXXi64t = ERArrXXt<int64_t>;
using ERArrXXi32t = ERArrXXt<int32_t>;
// 1-d vector
template <typename T>
using EVecXt = Eigen::Matrix<T, Eigen::Dynamic, 1>;
using EVecXd = Eigen::VectorXd;
using EVecXf = Eigen::VectorXf;
// 1-d row vector
using ERVecXd = Eigen::RowVectorXd;
using ERVecXf = Eigen::RowVectorXf;
// 2-d matrix, column major
template <typename T>
using EMatXt = Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>;
using EMatXd = Eigen::MatrixXd;
using EMatXf = Eigen::MatrixXf;
using EMatXU8 = EMatXt<uint8_t>;
using EMatXU16 = EMatXt<uint16_t>;
// 2-d matrix, row major
template <typename T>
using ERMatXt =
Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>;
using ERMatXd = ERMatXt<double>;
using ERMatXf = ERMatXt<float>;
using ERMatXU8 = ERMatXt<uint8_t>;
namespace utils {
template <typename T>
Eigen::Map<const EArrXt<T>> AsEArrXt(const std::vector<T>& arr) {
return {arr.data(), static_cast<int>(arr.size())};
}
template <typename T>
Eigen::Map<EArrXt<T>> AsEArrXt(std::vector<T>& arr) {
return {arr.data(), static_cast<int>(arr.size())};
}
// return a sub array of 'array' based on indices 'indices'
template <class Derived, class Derived1, class Derived2>
void GetSubArray(
const Eigen::ArrayBase<Derived>& array,
const Eigen::ArrayBase<Derived1>& indices,
Eigen::ArrayBase<Derived2>* out_array) {
CAFFE_ENFORCE_EQ(array.cols(), 1);
// using T = typename Derived::Scalar;
out_array->derived().resize(indices.size());
for (const auto i : c10::irange(indices.size())) {
TORCH_DCHECK_LT(indices[i], array.size());
(*out_array)[i] = array[indices[i]];
}
}
// return a sub array of 'array' based on indices 'indices'
template <class Derived, class Derived1>
EArrXt<typename Derived::Scalar> GetSubArray(
const Eigen::ArrayBase<Derived>& array,
const Eigen::ArrayBase<Derived1>& indices) {
using T = typename Derived::Scalar;
EArrXt<T> ret(indices.size());
GetSubArray(array, indices, &ret);
return ret;
}
// return a sub array of 'array' based on indices 'indices'
template <class Derived>
EArrXt<typename Derived::Scalar> GetSubArray(
const Eigen::ArrayBase<Derived>& array,
const std::vector<int>& indices) {
return GetSubArray(array, AsEArrXt(indices));
}
// return 2d sub array of 'array' based on row indices 'row_indices'
template <class Derived, class Derived1, class Derived2>
void GetSubArrayRows(
const Eigen::ArrayBase<Derived>& array2d,
const Eigen::ArrayBase<Derived1>& row_indices,
Eigen::ArrayBase<Derived2>* out_array) {
out_array->derived().resize(row_indices.size(), array2d.cols());
for (const auto i : c10::irange(row_indices.size())) {
TORCH_DCHECK_LT(row_indices[i], array2d.size());
out_array->row(i) =
array2d.row(row_indices[i]).template cast<typename Derived2::Scalar>();
}
}
// return indices of 1d array for elements evaluated to true
template <class Derived>
std::vector<int> GetArrayIndices(const Eigen::ArrayBase<Derived>& array) {
std::vector<int> ret;
for (const auto i : c10::irange(array.size())) {
if (array[i]) {
ret.push_back(i);
}
}
return ret;
}
} // namespace utils
} // namespace caffe2
#endif
| 6,718
| 31.616505
| 80
|
h
|
null |
pytorch-main/caffe2/utils/filler.h
|
#ifndef CAFFE2_FILLER_H_
#define CAFFE2_FILLER_H_
#include <sstream>
#include "caffe2/core/logging.h"
#include "caffe2/core/tensor.h"
#include "caffe2/utils/math.h"
namespace caffe2 {
// TODO: replace filler distribution enum with a better abstraction
enum FillerDistribution { FD_UNIFORM, FD_FIXEDSUM, FD_SYNTHETIC };
class TensorFiller {
public:
template <class Type, class Context>
void Fill(Tensor* tensor, Context* context) const {
CAFFE_ENFORCE(context, "context is null");
CAFFE_ENFORCE(tensor, "tensor is null");
auto min = (min_ < (double)std::numeric_limits<Type>::min())
? std::numeric_limits<Type>::min()
: static_cast<Type>(min_);
auto max = (max_ > (double)std::numeric_limits<Type>::max())
? std::numeric_limits<Type>::max()
: static_cast<Type>(max_);
CAFFE_ENFORCE_LE(min, max);
Tensor temp_tensor(shape_, Context::GetDeviceType());
std::swap(*tensor, temp_tensor);
Type* data = tensor->template mutable_data<Type>();
// select distribution
switch (dist_) {
case FD_UNIFORM: {
math::RandUniform<Type, Context>(
tensor->numel(), min, max, data, context);
break;
}
case FD_FIXEDSUM: {
auto fixed_sum = static_cast<Type>(fixed_sum_);
CAFFE_ENFORCE_LE(min * tensor->numel(), fixed_sum);
CAFFE_ENFORCE_GE(max * tensor->numel(), fixed_sum);
math::RandFixedSum<Type, Context>(
tensor->numel(), min, max, fixed_sum_, data, context);
break;
}
case FD_SYNTHETIC: {
math::RandSyntheticData<Type, Context>(
tensor->numel(), min, max, data, context);
break;
}
}
}
TensorFiller& Dist(FillerDistribution dist) {
dist_ = dist;
return *this;
}
template <class Type>
TensorFiller& Min(Type min) {
min_ = (double)min;
return *this;
}
template <class Type>
TensorFiller& Max(Type max) {
max_ = (double)max;
return *this;
}
template <class Type>
TensorFiller& FixedSum(Type fixed_sum) {
dist_ = FD_FIXEDSUM;
fixed_sum_ = (double)fixed_sum;
return *this;
}
// A helper function to construct the lengths vector for sparse features
// We try to pad least one index per batch unless the total_length is 0
template <class Type>
TensorFiller& SparseLengths(Type total_length) {
return FixedSum(total_length)
.Min(std::min(static_cast<Type>(1), total_length))
.Max(total_length);
}
// a helper function to construct the segments vector for sparse features
template <class Type>
TensorFiller& SparseSegments(Type max_segment) {
CAFFE_ENFORCE(dist_ != FD_FIXEDSUM);
return Min(0).Max(max_segment).Dist(FD_SYNTHETIC);
}
TensorFiller& Shape(const std::vector<int64_t>& shape) {
shape_ = shape;
return *this;
}
template <class Type>
TensorFiller(const std::vector<int64_t>& shape, Type fixed_sum)
: shape_(shape), dist_(FD_FIXEDSUM), fixed_sum_((double)fixed_sum) {}
TensorFiller(const std::vector<int64_t>& shape)
: shape_(shape), dist_(FD_UNIFORM), fixed_sum_(0) {}
TensorFiller() : TensorFiller(std::vector<int64_t>()) {}
std::string DebugString() const {
std::stringstream stream;
stream << "shape = [" << shape_ << "]; min = " << min_
<< "; max = " << max_;
switch (dist_) {
case FD_FIXEDSUM:
stream << "; dist = FD_FIXEDSUM";
break;
case FD_SYNTHETIC:
stream << "; dist = FD_SYNTHETIC";
break;
default:
stream << "; dist = FD_UNIFORM";
break;
}
return stream.str();
}
private:
std::vector<int64_t> shape_;
// TODO: type is unknown until a user starts to fill data;
// cast everything to double for now.
double min_ = 0.0;
double max_ = 1.0;
FillerDistribution dist_;
double fixed_sum_;
};
} // namespace caffe2
#endif // CAFFE2_FILLER_H_
| 3,927
| 26.858156
| 75
|
h
|
null |
pytorch-main/caffe2/utils/fixed_divisor.h
|
#ifndef CAFFE2_UTILS_FIXED_DIVISOR_H_
#define CAFFE2_UTILS_FIXED_DIVISOR_H_
#include <cstdint>
#include <cstdio>
#include <cstdlib>
// See Note [hip-clang differences to hcc]
#if defined(__CUDA_ARCH__) || defined(__HIP_ARCH__) || defined(__HIP__) || \
(defined(__clang__) && defined(__CUDA__))
#define FIXED_DIVISOR_DECL inline __host__ __device__
#else
#define FIXED_DIVISOR_DECL inline
#endif
namespace caffe2 {
// Utility class for quickly calculating quotients and remainders for
// a known integer divisor
template <typename T>
class FixedDivisor {};
// Works for any positive divisor, 1 to INT_MAX. One 64-bit
// multiplication and one 64-bit shift is used to calculate the
// result.
template <>
class FixedDivisor<std::int32_t> {
public:
FixedDivisor() = default;
explicit FixedDivisor(const std::int32_t d) : d_(d) {
#if !defined(USE_ROCM)
CalcSignedMagic();
#endif // USE_ROCM
}
FIXED_DIVISOR_DECL std::int32_t d() const {
return d_;
}
#if !defined(USE_ROCM)
FIXED_DIVISOR_DECL std::uint64_t magic() const {
return magic_;
}
FIXED_DIVISOR_DECL int shift() const {
return shift_;
}
#endif // USE_ROCM
/// Calculates `q = n / d`.
FIXED_DIVISOR_DECL std::int32_t Div(const std::int32_t n) const {
#if defined(USE_ROCM)
return n / d_;
#else // USE_ROCM
// In lieu of a mulhi instruction being available, perform the
// work in uint64
return (int32_t)((magic_ * (uint64_t)n) >> shift_);
#endif // USE_ROCM
}
/// Calculates `r = n % d`.
FIXED_DIVISOR_DECL std::int32_t Mod(const std::int32_t n) const {
return n - d_ * Div(n);
}
/// Calculates `q = n / d` and `r = n % d` together.
FIXED_DIVISOR_DECL void
DivMod(const std::int32_t n, std::int32_t* q, int32_t* r) const {
*q = Div(n);
*r = n - d_ * *q;
}
private:
#if !defined(USE_ROCM)
// Calculates magic multiplicative value and shift amount for calculating `q =
// n / d` for signed 32-bit integers.
// Implementation taken from Hacker's Delight section 10.
void CalcSignedMagic() {
if (d_ == 1) {
magic_ = UINT64_C(0x1) << 32;
shift_ = 32;
return;
}
const std::uint32_t two31 = UINT32_C(0x80000000);
const std::uint32_t ad = std::abs(d_);
const std::uint32_t t = two31 + ((uint32_t)d_ >> 31);
const std::uint32_t anc = t - 1 - t % ad; // Absolute value of nc.
std::uint32_t p = 31; // Init. p.
std::uint32_t q1 = two31 / anc; // Init. q1 = 2**p/|nc|.
std::uint32_t r1 = two31 - q1 * anc; // Init. r1 = rem(2**p, |nc|).
std::uint32_t q2 = two31 / ad; // Init. q2 = 2**p/|d|.
std::uint32_t r2 = two31 - q2 * ad; // Init. r2 = rem(2**p, |d|).
std::uint32_t delta = 0;
do {
++p;
q1 <<= 1; // Update q1 = 2**p/|nc|.
r1 <<= 1; // Update r1 = rem(2**p, |nc|).
if (r1 >= anc) { // (Must be an unsigned
++q1; // comparison here).
r1 -= anc;
}
q2 <<= 1; // Update q2 = 2**p/|d|.
r2 <<= 1; // Update r2 = rem(2**p, |d|).
if (r2 >= ad) { // (Must be an unsigned
++q2; // comparison here).
r2 -= ad;
}
delta = ad - r2;
} while (q1 < delta || (q1 == delta && r1 == 0));
std::int32_t magic = q2 + 1;
if (d_ < 0) {
magic = -magic;
}
shift_ = p;
magic_ = (std::uint64_t)(std::uint32_t)magic;
}
#endif // USE_ROCM
std::int32_t d_ = 1;
#if !defined(USE_ROCM)
std::uint64_t magic_;
int shift_;
#endif // USE_ROCM
};
} // namespace caffe2
#endif // CAFFE2_UTILS_FIXED_DIVISOR_H_
| 3,532
| 25.56391
| 80
|
h
|
null |
pytorch-main/caffe2/utils/knob_patcher.h
|
#pragma once
#include <memory>
#include <c10/util/string_view.h>
namespace caffe2 {
/**
* Patch the value of a knob during a unit test.
*
* This forces the knob to the specified value for as long as the KnobPatcher
* object exists. When the KnobPatcher object is destroyed the knob will revert
* to its previous value.
*/
class KnobPatcher {
public:
KnobPatcher(c10::string_view name, bool value);
~KnobPatcher();
KnobPatcher(KnobPatcher&&) noexcept;
KnobPatcher& operator=(KnobPatcher&&) noexcept;
KnobPatcher(const KnobPatcher&) = delete;
KnobPatcher& operator=(const KnobPatcher&) = delete;
private:
class PatchState;
std::unique_ptr<PatchState> state_;
};
} // namespace caffe2
| 715
| 20.69697
| 80
|
h
|
null |
pytorch-main/caffe2/utils/map_utils.h
|
#pragma once
namespace caffe2 {
// Get value from map given key. Return supplied default value if not found
// This is a stripped down version from folly:
// https://github.com/facebook/folly/blob/5a07e203d79324b68d69f294fa38e43b9671e9b1/folly/MapUtil.h#L35-L45
template <
class Map,
typename Key = typename Map::key_type,
typename Value = typename Map::mapped_type>
typename Map::mapped_type
get_default(const Map& map, const Key& key, Value&& dflt) {
using M = typename Map::mapped_type;
auto pos = map.find(key);
return (pos != map.end()) ? (pos->second) : M(std::forward<Value>(dflt));
}
} // namespace caffe2
| 635
| 30.8
| 106
|
h
|
null |
pytorch-main/caffe2/utils/math-detail.h
|
#ifndef CAFFE2_UTILS_MATH_DETAIL_H_
#define CAFFE2_UTILS_MATH_DETAIL_H_
namespace caffe2 {
class CPUContext;
namespace math {
namespace detail {
// proxy to a class because of partial specialization limitations for functions
template<typename T, class Context, int FixedSize>
struct ScaleImpl {
inline void operator()(
const int N,
const float alpha,
const T* x,
T* y,
Context* context) {
Scale(N, alpha, x, y, context);
}
};
// Put light-weight implementations in .h file to enable inlining
template<typename T>
struct ScaleImpl<T, CPUContext, 1> {
inline void operator()(
const int N,
const float alpha,
const T* x,
T* y,
CPUContext* /*context*/) {
TORCH_DCHECK_EQ(N, 1);
*y = *x * alpha;
}
};
template<typename T, class Context, int FixedSize>
struct AxpyImpl {
inline void operator()(
const int N,
const float alpha,
const T* x,
T* y,
Context* context) {
Axpy(N, alpha, x, y, context);
}
};
// Put light-weight implementations in .h file to enable inlining
template<typename T>
struct AxpyImpl<T, CPUContext, 1> {
inline void operator()(
const int N,
const float alpha,
const T* x,
T* y,
CPUContext* /*context*/) {
TORCH_DCHECK_EQ(N, 1);
*y += *x * alpha;
}
};
} // namespace detail
template <typename T, class Context, int FixedSize>
inline void ScaleFixedSize(
const int N,
const float alpha,
const T* x,
T* y,
Context* context) {
detail::ScaleImpl<T, Context, FixedSize>()(N, alpha, x, y, context);
}
template <typename T, class Context, int FixedSize>
inline void AxpyFixedSize(
const int N,
const float alpha,
const T* x,
T* y,
Context* context) {
detail::AxpyImpl<T, Context, FixedSize>()(N, alpha, x, y, context);
}
} // namespace math
} // namespace caffe2
#endif // CAFFE2_UTILS_MATH_DETAIL_H_
| 1,928
| 20.197802
| 79
|
h
|
null |
pytorch-main/caffe2/utils/math.h
|
#ifndef CAFFE2_UTILS_MATH_H_
#define CAFFE2_UTILS_MATH_H_
// This is a simple translation from the old Caffe math interfaces. We aim to
// still keep it simple, so all platforms would be able to support it fairly
// easily.
// We include the cblas header here so that we can obtain the macros from cblas.
extern "C" {
#include "caffe2/utils/cblas.h"
}
#ifdef CAFFE2_USE_ACCELERATE
#include <Accelerate/Accelerate.h>
#endif // CAFFE2_USE_ACCELERATE
#include "caffe2/core/common.h"
#include "caffe2/core/types.h"
#include "caffe2/utils/math/broadcast.h"
#include "caffe2/utils/math/elementwise.h"
#include "caffe2/utils/math/reduce.h"
#include "caffe2/utils/math/transpose.h"
#include "caffe2/utils/math/utils.h"
namespace caffe2 {
// TODO: Change dims related arguments to int64_t?
class Tensor;
// An empty class as a placeholder for a math function that has no specific
// engine specified.
class TORCH_API DefaultEngine {};
namespace math {
#define C10_DECLARE_COMPARE_OP(Comp) \
template <typename T, class Context, bool kBroadcast1st = false> \
void Rowwise##Comp( \
const int rows, \
const int cols, \
const T* A, \
const T* B, \
bool* C, \
Context* context); \
\
template <typename T, class Context, bool kBroadcast1st = false> \
void Colwise##Comp( \
const int rows, \
const int cols, \
const T* A, \
const T* B, \
bool* C, \
Context* context); \
\
template <typename T, class Context> \
void Comp( \
const int A_ndim, \
const int* A_dims, \
const int B_ndim, \
const int* B_dims, \
const T* A, \
const T* B, \
bool* C, \
Context* context);
C10_DECLARE_COMPARE_OP(EQ)
C10_DECLARE_COMPARE_OP(NE)
C10_DECLARE_COMPARE_OP(LT)
C10_DECLARE_COMPARE_OP(LE)
C10_DECLARE_COMPARE_OP(GT)
C10_DECLARE_COMPARE_OP(GE)
#undef C10_DECLARE_COMPARE_OP
#define C10_DECLARE_BINARY_OP(Func) \
template <typename T, class Context, bool kBroadcast1st = false> \
void Rowwise##Func( \
const int rows, \
const int cols, \
const T* A, \
const T* B, \
T* C, \
Context* context); \
\
template <typename T, class Context, bool kBroadcast1st = false> \
void Colwise##Func( \
const int rows, \
const int cols, \
const T* A, \
const T* B, \
T* C, \
Context* context); \
\
template <typename T, class Context> \
void Func( \
const int A_ndim, \
const int* A_dims, \
const int B_ndim, \
const int* B_dims, \
const T* A, \
const T* B, \
T* C, \
Context* context);
C10_DECLARE_BINARY_OP(Add)
C10_DECLARE_BINARY_OP(Sub)
C10_DECLARE_BINARY_OP(Mul)
C10_DECLARE_BINARY_OP(Div)
C10_DECLARE_BINARY_OP(And)
C10_DECLARE_BINARY_OP(Or)
C10_DECLARE_BINARY_OP(Xor)
C10_DECLARE_BINARY_OP(BitwiseAnd)
C10_DECLARE_BINARY_OP(BitwiseOr)
C10_DECLARE_BINARY_OP(BitwiseXor)
#undef C10_DECLARE_BINARY_OP
// Broadcasts X with X_dims to Y with Y_dims.
template <typename T, class Context>
TORCH_API void Broadcast(
const int X_ndim,
const int* X_dims,
const int Y_ndim,
const int* Y_dims,
const T alpha,
const T* X,
T* Y,
Context* context,
bool allow_broadcast_fastpath=false);
// Computes inv_std from variance.
template <typename T, class Context>
TORCH_API void InvStd(
const int N,
const T epsilon,
const T* var,
T* inv_std,
Context* context);
// Adds batch sub-tensors elementwise to output. Stripe is the stripe length
// and N is the number of elements to add (size of Y).
template <typename T, class Context>
TORCH_API void AddStripedBatch(
const int N,
const T* first,
T* y,
const int stripe,
const int batch,
Context* context);
// Compute the row-wise max of a N*D matrix X, and write it to a N
// dimensional vector y.
template <typename T, class Context>
TORCH_API void
RowwiseMax(const int N, const int D, const T* x, T* y, Context* context);
// Compute the column-wise max of a N*D matrix X, and write it to a D
// dimensional vector y.
template <typename T, class Context>
TORCH_API void
ColwiseMax(const int N, const int D, const T* x, T* y, Context* context);
// Elemwise maximum of vector x and scalar alpha. y[i] = max(x[i], alpha)
template <typename T, class Context>
TORCH_API void
Maximum(const int N, const float alpha, const T* x, T* y, Context* context);
// Decaf gemm provides a simpler interface to the gemm functions, with the
// limitation that the data has to be contiguous in memory.
template <typename T, class Context, class Engine = DefaultEngine>
TORCH_API void Gemm(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int M,
const int N,
const int K,
const float alpha,
const T* A,
const T* B,
const float beta,
T* C,
Context* context,
TensorProto::DataType math_type = TensorProto_DataType_FLOAT);
// We also provide a gemm that has explicit lda, ldb and ldc specified.
// In most cases you probably want to use the function above, though.
template <typename T, class Context, class Engine = DefaultEngine>
TORCH_API void GemmEx(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int M,
const int N,
const int K,
const T alpha,
const T* A,
const int lda,
const T* B,
const int ldb,
const T beta,
T* C,
const int ldc,
Context* context);
// GemmBatched provides a simple abstraction into library routines
template <typename T, class Context, class Engine = DefaultEngine>
TORCH_API void GemmBatched(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const T** A,
const T** B,
const float beta,
T** C,
Context* context,
TensorProto::DataType math_type = TensorProto_DataType_FLOAT);
template <typename T, class Context, class Engine = DefaultEngine>
TORCH_API void GemmStridedBatched(
const CBLAS_TRANSPOSE trans_A,
const CBLAS_TRANSPOSE trans_B,
const int batch_size,
const int M,
const int N,
const int K,
const float alpha,
const T* A,
const int A_stride,
const T* B,
const int B_stride,
const float beta,
T* C,
const int C_stride,
Context* context,
TensorProto::DataType math_type = TensorProto_DataType_FLOAT);
// Gemv always takes in a M*N matrix A, and depending on whether we set TransA
// to Trans, the output is:
// CblasNoTrans: x is an N dim vector and y is an M dim vector.
// CblasTrans: x is an M dim vector and y is an N dim vector.
template <typename T, class Context, class Engine = DefaultEngine>
TORCH_API void Gemv(
const CBLAS_TRANSPOSE trans_A,
const int M,
const int N,
const float alpha,
const T* A,
const T* x,
const float beta,
T* y,
Context* context,
TensorProto::DataType math_type = TensorProto_DataType_FLOAT);
template <typename T, class Context>
TORCH_API void
RandUniform(const size_t n, const T a, const T b, T* r, Context* context);
// Generate n values that sum up to a fixed sum
// and subject to a restriction a <= x <= b for each x generated
template <typename T, class Context>
TORCH_API void RandFixedSum(
const size_t n,
const T a,
const T b,
const T sum,
T* r,
Context* context);
template <typename T, class Context>
TORCH_API void RandUniformUnique(
const size_t n,
const T a,
const T b,
T* r,
const size_t m,
const T* avoid,
Context* context);
// Generate n values from synthetic data distribution,
// define by unique accesses and stack distances
template <typename T, class Context>
TORCH_API void
RandSyntheticData(const size_t n, const T a, const T b, T* r, Context* context);
template <typename T, class Context>
TORCH_API void
RandGaussian(const size_t n, const T mean, const T std, T* r, Context* context);
// Dot matrix of vector a and b, and writes the result to a single value y.
template <typename T, class Context>
TORCH_API void
Dot(const int N, const T* a, const T* b, T* y, Context* context);
// Sum of vector x, and writes the result to a single value y.
template <typename T, class Context>
TORCH_API void Sum(
const int N,
const T* x,
T* y,
Context* context,
Tensor* scratch_ptr = nullptr);
// Sum of squares of vector x, and writes the result to a single value y.
template <typename T, class Context>
TORCH_API void SumSqr(
const int N,
const T* x,
T* y,
Context* context,
Tensor* scratch_ptr = nullptr);
// Select does index selection of the rows a N*D matrix x, and gives the N
// dimensional vector y that contains the selected data.
template <typename T, class Context>
TORCH_API void Select(
const int N,
const int D,
const T* x,
const int* idx,
T* y,
Context* context);
// groups must be 1 for GPU
// For NHWC order with groups > 1, the result will be layout in
// NHW G RS C/G order to make data within the same group to be contiguous.
// For NCHW order, groups doesn't make any difference because we're doing Im2Col
// for each N and C is the slowest moving dimension among CHW.
template <typename T, class Context, StorageOrder kOrder>
TORCH_API void Im2Col(
const int channels,
const int height,
const int width,
const int kernel_h,
const int kernel_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const T* img_data,
T* col_data,
Context* context,
const int groups = 1);
// groups must be 1 for GPU
template <typename T, class Context, StorageOrder kOrder>
TORCH_API void Im2ColNd(
const int N,
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const T* img_data,
T* col_data,
Context* context,
const int groups = 1);
// groups must be 1 for GPU
// For NHWC order with groups > 1, the result will be layout in
// NHW G RS C/G order to make data within the same group to be contiguous.
// For NCHW order, groups doesn't make any difference because we're doing Im2Col
// for each N and C is the slowest moving dimension among CHW.
template <typename T, class Context, StorageOrder kOrder>
TORCH_API void Col2Im(
const int channels,
const int height,
const int width,
const int patch_h,
const int patch_w,
const int dilation_h,
const int dilation_w,
const int pad_t,
const int pad_l,
const int pad_b,
const int pad_r,
const int stride_h,
const int stride_w,
const T* col_data,
T* img_data,
Context* context,
const int groups = 1);
// groups must be 1 for GPU
// For NHWC order with groups > 1, the result will be layout in
// NHW G RS C/G order to make data within the same group to be contiguous.
// For NCHW order, groups doesn't make any difference because we're doing Im2Col
// for each N and C is the slowest moving dimension among CHW.
template <typename T, class Context, StorageOrder kOrder>
TORCH_API void Col2ImNd(
const int N,
const int img_size,
const int col_size,
const int* img_shape,
const int* col_shape,
const int* kernel_shape,
const int* stride,
const int* dilation,
const int* pad,
const T* col_data,
T* img_data,
Context* context,
const int groups = 1);
// Applies a per-channel bias value to each channel of the input
// image. image_size is H * W
template <typename T, class Context>
TORCH_API void BiasCHW(
const T* bias,
const T* bias_multiplier,
const int bias_channels,
const int image_size,
T* image,
Context* context);
template <class Context>
TORCH_API void CopyMatrix(
const size_t item_size,
const int M,
const int N,
const void* A,
const int lda,
void* B,
const int ldb,
Context* context,
TypeMeta::Copy copy = nullptr);
template <typename T, class Context>
TORCH_API void CopyMatrix(
const int M,
const int N,
const T* A,
const int lda,
T* B,
const int ldb,
Context* context);
template <typename T, class Context>
TORCH_API void CopyMatrix(
const int M,
const int N,
const T* A,
const int A_outer_stride,
const int A_inner_stride,
T* B,
const int B_outer_stride,
const int B_inner_stride,
Context* context);
template <typename T, class Context>
TORCH_API void CopyVector(const int N, const T* A, T* B, Context* context);
} // namespace math
} // namespace caffe2
#include "caffe2/utils/math-detail.h"
#endif // CAFFE2_UTILS_MATH_H_
| 15,251
| 31.589744
| 80
|
h
|
null |
pytorch-main/caffe2/utils/murmur_hash3.h
|
//-----------------------------------------------------------------------------
// MurmurHash3 was written by Austin Appleby, and is placed in the public
// domain. The author hereby disclaims copyright to this source code.
#pragma once
//-----------------------------------------------------------------------------
// Platform-specific functions and macros
// Microsoft Visual Studio
#if defined(_MSC_VER) && (_MSC_VER < 1600)
typedef unsigned char uint8_t;
typedef unsigned int uint32_t;
typedef unsigned __int64 uint64_t;
// Other compilers
#else // defined(_MSC_VER)
#include <stdint.h>
#endif // !defined(_MSC_VER)
namespace caffe2 {
void MurmurHash3_x86_32(const void* key, int len, uint32_t seed, void* out);
void MurmurHash3_x86_128(const void* key, int len, uint32_t seed, void* out);
void MurmurHash3_x64_128(const void* key, int len, uint32_t seed, void* out);
} // namespace caffe2
| 909
| 25
| 79
|
h
|
null |
pytorch-main/caffe2/utils/proto_utils.h
|
#ifndef CAFFE2_UTILS_PROTO_UTILS_H_
#define CAFFE2_UTILS_PROTO_UTILS_H_
#ifdef CAFFE2_USE_LITE_PROTO
#include <google/protobuf/message_lite.h>
#else // CAFFE2_USE_LITE_PROTO
#include <google/protobuf/message.h>
#endif // !CAFFE2_USE_LITE_PROTO
#include <c10/util/Logging.h>
#include <c10/util/string_view.h>
#include <c10/util/irange.h>
#include "caffe2/utils/proto_wrap.h"
#include "caffe2/proto/caffe2_pb.h"
#ifndef C10_ANDROID
#define CAFFE2_ENABLE_REDUCED_STRINGS_IN_ARGUMENT_LOOKUP
#define CAFFE2_ARG_MAP_FIND(map, key) map.find(key)
#else
#define CAFFE2_ARG_MAP_FIND(map, key) map.find(std::string(key))
#endif
namespace caffe2 {
using std::string;
using ::google::protobuf::MessageLite;
// A wrapper function to return device name string for use in blob serialization
// / deserialization. This should have one to one correspondence with
// caffe2/proto/caffe2.proto: enum DeviceType.
//
// Note that we can't use DeviceType_Name, because that is only available in
// protobuf-full, and some platforms (like mobile) may want to use
// protobuf-lite instead.
TORCH_API std::string DeviceTypeName(const int32_t& d);
TORCH_API int DeviceId(const DeviceOption& option);
// Returns if the two DeviceOptions are pointing to the same device.
TORCH_API bool IsSameDevice(const DeviceOption& lhs, const DeviceOption& rhs);
TORCH_API bool IsCPUDeviceType(int device_type);
TORCH_API bool IsGPUDeviceType(int device_type);
// Common interfaces that reads file contents into a string.
TORCH_API bool ReadStringFromFile(const char* filename, string* str);
TORCH_API bool WriteStringToFile(const string& str, const char* filename);
// Common interfaces that are supported by both lite and full protobuf.
TORCH_API bool ReadProtoFromBinaryFile(const char* filename, MessageLite* proto);
inline bool ReadProtoFromBinaryFile(const string filename, MessageLite* proto) {
return ReadProtoFromBinaryFile(filename.c_str(), proto);
}
TORCH_API void WriteProtoToBinaryFile(const MessageLite& proto, const char* filename);
inline void WriteProtoToBinaryFile(const MessageLite& proto,
const string& filename) {
return WriteProtoToBinaryFile(proto, filename.c_str());
}
#ifdef CAFFE2_USE_LITE_PROTO
namespace TextFormat {
inline bool ParseFromString(const string& spec, MessageLite* proto) {
LOG(FATAL) << "If you are running lite version, you should not be "
<< "calling any text-format protobuffers.";
return false;
}
} // namespace TextFormat
TORCH_API string ProtoDebugString(const MessageLite& proto);
TORCH_API bool ParseProtoFromLargeString(const string& str, MessageLite* proto);
// Text format MessageLite wrappers: these functions do nothing but just
// allowing things to compile. It will produce a runtime error if you are using
// MessageLite but still want text support.
inline bool ReadProtoFromTextFile(
const char* /*filename*/,
MessageLite* /*proto*/) {
LOG(FATAL) << "If you are running lite version, you should not be "
<< "calling any text-format protobuffers.";
return false; // Just to suppress compiler warning.
}
inline bool ReadProtoFromTextFile(const string filename, MessageLite* proto) {
return ReadProtoFromTextFile(filename.c_str(), proto);
}
inline void WriteProtoToTextFile(
const MessageLite& /*proto*/,
const char* /*filename*/,
bool throwIfError = true) {
LOG(FATAL) << "If you are running lite version, you should not be "
<< "calling any text-format protobuffers.";
}
inline void WriteProtoToTextFile(const MessageLite& proto,
const string& filename,
bool throwIfError = true) {
return WriteProtoToTextFile(proto, filename.c_str(), throwIfError);
}
inline bool ReadProtoFromFile(const char* filename, MessageLite* proto) {
return (ReadProtoFromBinaryFile(filename, proto) ||
ReadProtoFromTextFile(filename, proto));
}
inline bool ReadProtoFromFile(const string& filename, MessageLite* proto) {
return ReadProtoFromFile(filename.c_str(), proto);
}
#else // CAFFE2_USE_LITE_PROTO
using ::google::protobuf::Message;
namespace TextFormat {
TORCH_API bool ParseFromString(const string& spec, Message* proto);
} // namespace TextFormat
TORCH_API string ProtoDebugString(const Message& proto);
TORCH_API bool ParseProtoFromLargeString(const string& str, Message* proto);
TORCH_API bool ReadProtoFromTextFile(const char* filename, Message* proto);
inline bool ReadProtoFromTextFile(const string filename, Message* proto) {
return ReadProtoFromTextFile(filename.c_str(), proto);
}
TORCH_API void WriteProtoToTextFile(const Message& proto, const char* filename, bool throwIfError = true);
inline void WriteProtoToTextFile(const Message& proto, const string& filename, bool throwIfError = true) {
return WriteProtoToTextFile(proto, filename.c_str(), throwIfError);
}
// Read Proto from a file, letting the code figure out if it is text or binary.
inline bool ReadProtoFromFile(const char* filename, Message* proto) {
return (ReadProtoFromBinaryFile(filename, proto) ||
ReadProtoFromTextFile(filename, proto));
}
inline bool ReadProtoFromFile(const string& filename, Message* proto) {
return ReadProtoFromFile(filename.c_str(), proto);
}
#endif // CAFFE2_USE_LITE_PROTO
template <
class IterableInputs = std::initializer_list<string>,
class IterableOutputs = std::initializer_list<string>,
class IterableArgs = std::initializer_list<Argument>>
OperatorDef CreateOperatorDef(
const string& type,
const string& name,
const IterableInputs& inputs,
const IterableOutputs& outputs,
const IterableArgs& args,
const DeviceOption& device_option = DeviceOption(),
const string& engine = "") {
OperatorDef def;
def.set_type(type);
def.set_name(name);
for (const string& in : inputs) {
def.add_input(in);
}
for (const string& out : outputs) {
def.add_output(out);
}
for (const Argument& arg : args) {
def.add_arg()->CopyFrom(arg);
}
if (device_option.has_device_type()) {
def.mutable_device_option()->CopyFrom(device_option);
}
if (engine.size()) {
def.set_engine(engine);
}
return def;
}
// A simplified version compared to the full CreateOperator, if you do not need
// to specify args.
template <
class IterableInputs = std::initializer_list<string>,
class IterableOutputs = std::initializer_list<string>>
inline OperatorDef CreateOperatorDef(
const string& type,
const string& name,
const IterableInputs& inputs,
const IterableOutputs& outputs,
const DeviceOption& device_option = DeviceOption(),
const string& engine = "") {
return CreateOperatorDef(
type,
name,
inputs,
outputs,
std::vector<Argument>(),
device_option,
engine);
}
TORCH_API bool HasOutput(const OperatorDef& op, const std::string& output);
TORCH_API bool HasInput(const OperatorDef& op, const std::string& input);
/**
* @brief A helper class to index into arguments.
*
* This helper helps us to more easily index into a set of arguments
* that are present in the operator. To save memory, the argument helper
* does not copy the operator def, so one would need to make sure that the
* lifetime of the OperatorDef object outlives that of the ArgumentHelper.
*/
class C10_EXPORT ArgumentHelper {
public:
template <typename Def>
static bool HasArgument(const Def& def, c10::string_view name) {
return ArgumentHelper(def).HasArgument(name);
}
template <typename Def, typename T>
static T GetSingleArgument(
const Def& def,
c10::string_view name,
const T& default_value) {
return ArgumentHelper(def).GetSingleArgument<T>(name, default_value);
}
template <typename Def, typename T>
static bool HasSingleArgumentOfType(const Def& def, c10::string_view name) {
return ArgumentHelper(def).HasSingleArgumentOfType<T>(name);
}
template <typename Def, typename T>
static std::vector<T> GetRepeatedArgument(
const Def& def,
c10::string_view name,
const std::vector<T>& default_value = std::vector<T>()) {
return ArgumentHelper(def).GetRepeatedArgument<T>(name, default_value);
}
template <typename Def, typename MessageType>
static MessageType GetMessageArgument(const Def& def, c10::string_view name) {
return ArgumentHelper(def).GetMessageArgument<MessageType>(name);
}
template <typename Def, typename MessageType>
static std::vector<MessageType> GetRepeatedMessageArgument(
const Def& def,
c10::string_view name) {
return ArgumentHelper(def).GetRepeatedMessageArgument<MessageType>(name);
}
template <typename Def>
static bool RemoveArgument(Def& def, int index) {
if (index >= def.arg_size()) {
return false;
}
if (index < def.arg_size() - 1) {
def.mutable_arg()->SwapElements(index, def.arg_size() - 1);
}
def.mutable_arg()->RemoveLast();
return true;
}
explicit ArgumentHelper(const OperatorDef& def);
explicit ArgumentHelper(const NetDef& netdef);
bool HasArgument(c10::string_view name) const;
template <typename T>
T GetSingleArgument(c10::string_view name, const T& default_value) const;
template <typename T>
bool HasSingleArgumentOfType(c10::string_view name) const;
template <typename T>
std::vector<T> GetRepeatedArgument(
c10::string_view name,
const std::vector<T>& default_value = std::vector<T>()) const;
template <typename MessageType>
MessageType GetMessageArgument(c10::string_view name) const {
auto it = CAFFE2_ARG_MAP_FIND(arg_map_, name);
CAFFE_ENFORCE(it != arg_map_.end(), "Cannot find parameter named ", name);
MessageType message;
if (it->second.has_s()) {
CAFFE_ENFORCE(
message.ParseFromString(it->second.s()),
"Failed to parse content from the string");
} else {
VLOG(1) << "Return empty message for parameter " << name;
}
return message;
}
template <typename MessageType>
std::vector<MessageType> GetRepeatedMessageArgument(c10::string_view name) const {
auto it = CAFFE2_ARG_MAP_FIND(arg_map_, name);
CAFFE_ENFORCE(it != arg_map_.end(), "Cannot find parameter named ", name);
std::vector<MessageType> messages(it->second.strings_size());
for (int i = 0; i < messages.size(); ++i) {
CAFFE_ENFORCE(
messages[i].ParseFromString(it->second.strings(i)),
"Failed to parse content from the string");
}
return messages;
}
private:
std::map<string, Argument
#ifdef CAFFE2_ENABLE_REDUCED_STRINGS_IN_ARGUMENT_LOOKUP
, std::less<>
#endif
> arg_map_;
};
// **** Arguments Utils *****
// Helper methods to get an argument from OperatorDef or NetDef given argument
// name. Throws if argument does not exist.
TORCH_API const Argument& GetArgument(const OperatorDef& def, c10::string_view name);
TORCH_API const Argument& GetArgument(const NetDef& def, c10::string_view name);
// Helper methods to get an argument from OperatorDef or NetDef given argument
// name. Returns nullptr if argument does not exist.
TORCH_API const Argument* GetArgumentPtr(const OperatorDef& def, c10::string_view name);
TORCH_API const Argument* GetArgumentPtr(const NetDef& def, c10::string_view name);
// Helper methods to query a boolean argument flag from OperatorDef or NetDef
// given argument name. If argument does not exist, return default value.
// Throws if argument exists but the type is not boolean.
TORCH_API bool GetFlagArgument(
const OperatorDef& def,
c10::string_view name,
bool default_value = false);
TORCH_API bool GetFlagArgument(
const NetDef& def,
c10::string_view name,
bool default_value = false);
TORCH_API Argument* GetMutableArgument(
const string& name,
const bool create_if_missing,
OperatorDef* def);
TORCH_API Argument* GetMutableArgument(
const string& name,
const bool create_if_missing,
NetDef* def);
template <typename T>
TORCH_API Argument MakeArgument(const string& name, const T& value);
template <typename T, typename Def>
inline void AddArgument(const string& name, const T& value, Def* def) {
GetMutableArgument(name, true, def)->CopyFrom(MakeArgument(name, value));
}
// **** End Arguments Utils *****
bool inline operator==(const DeviceOption& dl, const DeviceOption& dr) {
return IsSameDevice(dl, dr);
}
// Given a net, modify the external inputs/outputs if necessary so that
// the following conditions are met
// - No duplicate external inputs
// - No duplicate external outputs
// - Going through list of ops in order, all op inputs must be outputs
// from other ops, or registered as external inputs.
// - All external outputs must be outputs of some operators.
TORCH_API void cleanupExternalInputsAndOutputs(NetDef* net);
} // namespace caffe2
namespace std {
template <>
struct hash<caffe2::DeviceOption> {
typedef caffe2::DeviceOption argument_type;
typedef std::size_t result_type;
result_type operator()(argument_type const& device_option) const {
std::string serialized;
CAFFE_ENFORCE(device_option.SerializeToString(&serialized));
return std::hash<std::string>{}(serialized);
}
};
} // namespace std
#endif // CAFFE2_UTILS_PROTO_UTILS_H_
| 13,283
| 33.59375
| 106
|
h
|
null |
pytorch-main/caffe2/utils/signal_handler.h
|
#pragma once
#include <c10/util/signal_handler.h>
namespace caffe2 {
#if defined(C10_SUPPORTS_FATAL_SIGNAL_HANDLERS)
class TORCH_API C2FatalSignalHandler : public c10::FatalSignalHandler {
public:
void fatalSignalHandlerPostProcess() override;
static C2FatalSignalHandler& getInstance();
private:
explicit C2FatalSignalHandler();
};
// This works by setting up certain fatal signal handlers. Previous fatal
// signal handlers will still be called when the signal is raised. Defaults
// to being off.
TORCH_API void setPrintStackTracesOnFatalSignal(bool print);
TORCH_API bool printStackTracesOnFatalSignal();
#endif // defined(C10_SUPPORTS_FATAL_SIGNAL_HANDLER)
} // namespace caffe2
| 698
| 26.96
| 75
|
h
|
null |
pytorch-main/caffe2/utils/simple_queue.h
|
#ifndef CAFFE2_UTILS_SIMPLE_QUEUE_H_
#define CAFFE2_UTILS_SIMPLE_QUEUE_H_
#include <condition_variable> // NOLINT
#include <mutex> // NOLINT
#include <queue>
#include <c10/util/Logging.h>
namespace caffe2 {
// This is a very simple queue that Yangqing wrote when bottlefeeding the baby,
// so don't take it seriously. What it does is a minimal thread-safe queue that
// allows me to run network as a DAG.
//
// A usual work pattern looks like this: one or multiple producers push jobs
// into this queue, and one or multiple workers pops jobs from this queue. If
// nothing is in the queue but NoMoreJobs() is not called yet, the pop calls
// will wait. If NoMoreJobs() has been called, pop calls will return false,
// which serves as a message to the workers that they should exit.
template <typename T>
class SimpleQueue {
public:
SimpleQueue() : no_more_jobs_(false) {}
// Pops a value and writes it to the value pointer. If there is nothing in the
// queue, this will wait till a value is inserted to the queue. If there are
// no more jobs to pop, the function returns false. Otherwise, it returns
// true.
bool Pop(T* value) {
std::unique_lock<std::mutex> mutex_lock(mutex_);
while (queue_.size() == 0 && !no_more_jobs_) cv_.wait(mutex_lock);
if (queue_.size() == 0 && no_more_jobs_) return false;
*value = queue_.front();
queue_.pop();
return true;
}
int size() {
std::unique_lock<std::mutex> mutex_lock(mutex_);
return queue_.size();
}
// Push pushes a value to the queue.
void Push(const T& value) {
{
std::lock_guard<std::mutex> mutex_lock(mutex_);
CAFFE_ENFORCE(!no_more_jobs_, "Cannot push to a closed queue.");
queue_.push(value);
}
cv_.notify_one();
}
// NoMoreJobs() marks the close of this queue. It also notifies all waiting
// Pop() calls so that they either check out remaining jobs, or return false.
// After NoMoreJobs() is called, this queue is considered closed - no more
// Push() functions are allowed, and once existing items are all checked out
// by the Pop() functions, any more Pop() function will immediately return
// false with nothing set to the value.
void NoMoreJobs() {
{
std::lock_guard<std::mutex> mutex_lock(mutex_);
no_more_jobs_ = true;
}
cv_.notify_all();
}
private:
std::mutex mutex_;
std::condition_variable cv_;
std::queue<T> queue_;
bool no_more_jobs_{};
// We do not allow copy constructors.
SimpleQueue(const SimpleQueue& /*src*/) {}
};
} // namespace caffe2
#endif // CAFFE2_UTILS_SIMPLE_QUEUE_H_
| 2,602
| 31.5375
| 80
|
h
|
null |
pytorch-main/caffe2/utils/smart_tensor_printer.h
|
#pragma once
#include "caffe2/core/tensor.h"
namespace caffe2 {
// This is a wrapper around the TensorPrinter that doesn't require the user to
// explicit specify the type of the tensor while calling the Print() method.
// It also supports a convenience function with a default constructed printer as
// a static method.
class TORCH_API SmartTensorPrinter {
public:
// The proliferation of constructors is to give the feature parity with
// TensorPrinter
// yet not repeat the default arguments explicitly in case they change in the
// future.
SmartTensorPrinter() = default;
explicit SmartTensorPrinter(const std::string& tensor_name);
SmartTensorPrinter(
const std::string& tensor_name,
const std::string& file_name);
SmartTensorPrinter(
const std::string& tensor_name,
const std::string& file_name,
int limit);
void Print(const Tensor& tensor);
void PrintMeta(const Tensor& tensor) {
tensorPrinter_.PrintMeta(tensor);
}
// Uses a default constructed SmartTensorPrinter
static void PrintTensor(const Tensor& tensor);
// Uses a default constructed SmartTensorPrinter
void PrintTensorMeta(const Tensor& tensor) {
DefaultTensorPrinter().PrintMeta(tensor);
}
private:
// Returns a thread local default constructed TensorPrinter
static SmartTensorPrinter& DefaultTensorPrinter();
TensorPrinter tensorPrinter_;
};
}
| 1,402
| 26.509804
| 80
|
h
|
null |
pytorch-main/caffe2/utils/string_utils.h
|
#pragma once
#include <algorithm>
#include <memory>
#include <string>
#include <vector>
#include <c10/macros/Export.h>
namespace caffe2 {
TORCH_API std::vector<std::string>
split(char separator, const std::string& string, bool ignore_empty = false);
TORCH_API std::string trim(const std::string& str);
TORCH_API size_t editDistance(
const std::string& s1,
const std::string& s2,
size_t max_distance = 0);
TORCH_API inline bool StartsWith(
const std::string& str,
const std::string& prefix) {
return str.length() >= prefix.length() &&
std::mismatch(prefix.begin(), prefix.end(), str.begin()).first ==
prefix.end();
}
TORCH_API inline bool EndsWith(
const std::string& full,
const std::string& ending) {
if (full.length() >= ending.length()) {
return (
0 ==
full.compare(full.length() - ending.length(), ending.length(), ending));
} else {
return false;
}
}
TORCH_API int32_t editDistanceHelper(
const char* s1,
size_t s1_len,
const char* s2,
size_t s2_len,
std::vector<size_t>& current,
std::vector<size_t>& previous,
std::vector<size_t>& previous1,
size_t max_distance);
} // namespace caffe2
| 1,206
| 22.211538
| 80
|
h
|
null |
pytorch-main/caffe2/utils/zmq_helper.h
|
#ifndef CAFFE2_UTILS_ZMQ_HELPER_H_
#define CAFFE2_UTILS_ZMQ_HELPER_H_
#include <zmq.h>
#include "caffe2/core/logging.h"
namespace caffe2 {
class ZmqContext {
public:
explicit ZmqContext(int io_threads) : ptr_(zmq_ctx_new()) {
CAFFE_ENFORCE(ptr_ != nullptr, "Failed to create zmq context.");
int rc = zmq_ctx_set(ptr_, ZMQ_IO_THREADS, io_threads);
CAFFE_ENFORCE_EQ(rc, 0);
rc = zmq_ctx_set(ptr_, ZMQ_MAX_SOCKETS, ZMQ_MAX_SOCKETS_DFLT);
CAFFE_ENFORCE_EQ(rc, 0);
}
~ZmqContext() {
int rc = zmq_ctx_destroy(ptr_);
CAFFE_ENFORCE_EQ(rc, 0);
}
void* ptr() { return ptr_; }
private:
void* ptr_;
C10_DISABLE_COPY_AND_ASSIGN(ZmqContext);
};
class ZmqMessage {
public:
ZmqMessage() {
int rc = zmq_msg_init(&msg_);
CAFFE_ENFORCE_EQ(rc, 0);
}
~ZmqMessage() {
int rc = zmq_msg_close(&msg_);
CAFFE_ENFORCE_EQ(rc, 0);
}
zmq_msg_t* msg() { return &msg_; }
void* data() { return zmq_msg_data(&msg_); }
size_t size() { return zmq_msg_size(&msg_); }
private:
zmq_msg_t msg_;
C10_DISABLE_COPY_AND_ASSIGN(ZmqMessage);
};
class ZmqSocket {
public:
explicit ZmqSocket(int type)
: context_(1), ptr_(zmq_socket(context_.ptr(), type)) {
CAFFE_ENFORCE(ptr_ != nullptr, "Failed to create zmq socket.");
}
~ZmqSocket() {
int rc = zmq_close(ptr_);
CAFFE_ENFORCE_EQ(rc, 0);
}
void Bind(const string& addr) {
int rc = zmq_bind(ptr_, addr.c_str());
CAFFE_ENFORCE_EQ(rc, 0);
}
void Unbind(const string& addr) {
int rc = zmq_unbind(ptr_, addr.c_str());
CAFFE_ENFORCE_EQ(rc, 0);
}
void Connect(const string& addr) {
int rc = zmq_connect(ptr_, addr.c_str());
CAFFE_ENFORCE_EQ(rc, 0);
}
void Disconnect(const string& addr) {
int rc = zmq_disconnect(ptr_, addr.c_str());
CAFFE_ENFORCE_EQ(rc, 0);
}
int Send(const string& msg, int flags) {
int nbytes = zmq_send(ptr_, msg.c_str(), msg.size(), flags);
if (nbytes) {
return nbytes;
} else if (zmq_errno() == EAGAIN) {
return 0;
} else {
LOG(FATAL) << "Cannot send zmq message. Error number: "
<< zmq_errno();
return 0;
}
}
int SendTillSuccess(const string& msg, int flags) {
CAFFE_ENFORCE(msg.size(), "You cannot send an empty message.");
int nbytes = 0;
do {
nbytes = Send(msg, flags);
} while (nbytes == 0);
return nbytes;
}
int Recv(ZmqMessage* msg) {
int nbytes = zmq_msg_recv(msg->msg(), ptr_, 0);
if (nbytes >= 0) {
return nbytes;
} else if (zmq_errno() == EAGAIN || zmq_errno() == EINTR) {
return 0;
} else {
LOG(FATAL) << "Cannot receive zmq message. Error number: "
<< zmq_errno();
return 0;
}
}
int RecvTillSuccess(ZmqMessage* msg) {
int nbytes = 0;
do {
nbytes = Recv(msg);
} while (nbytes == 0);
return nbytes;
}
private:
ZmqContext context_;
void* ptr_;
};
} // namespace caffe2
#endif // CAFFE2_UTILS_ZMQ_HELPER_H_
| 3,020
| 20.891304
| 68
|
h
|
null |
pytorch-main/caffe2/utils/math/elementwise.h
|
#ifndef CAFFE2_UTILS_MATH_ELEMENTWISE_H_
#define CAFFE2_UTILS_MATH_ELEMENTWISE_H_
#include "caffe2/core/common.h"
#include "caffe2/core/types.h"
namespace caffe2 {
namespace math {
template <typename T, class Context>
TORCH_API void Exp(int N, const T* X, T* Y, Context* context);
template <typename T, class Context>
TORCH_API void Log(int N, const T* X, T* Y, Context* context);
template <typename T, class Context>
TORCH_API void Log1p(int N, const T* X, T* Y, Context* context);
template <typename T, class Context>
TORCH_API void Sin(int N, const T* X, T* Y, Context* context);
template <typename T, class Context>
TORCH_API void Asin(int N, const T* X, T* Y, Context* context);
template <typename T, class Context>
TORCH_API void Cos(int N, const T* X, T* Y, Context* context);
template <typename T, class Context>
TORCH_API void Acos(int N, const T* X, T* Y, Context* context);
template <typename T, class Context>
TORCH_API void Tan(int N, const T* X, T* Y, Context* context);
template <typename T, class Context>
TORCH_API void Atan(int N, const T* X, T* Y, Context* context);
template <typename T, class Context>
TORCH_API void Sinh(int N, const T* X, T* Y, Context* context);
template <typename T, class Context>
TORCH_API void Cosh(int N, const T* X, T* Y, Context* context);
template <typename T, class Context>
TORCH_API void SinCos(int N, const T* X, T* S, T* C, Context* context);
template <typename T, class Context>
TORCH_API void Tanh(int N, const T* X, T* Y, Context* context);
template <typename T, class Context>
TORCH_API void Abs(int N, const T* X, T* Y, Context* context);
template <typename T, class Context>
TORCH_API void Sqr(int N, const T* X, T* Y, Context* context);
template <typename T, class Context>
TORCH_API void Sqrt(int N, const T* X, T* Y, Context* context);
template <typename T, class Context>
TORCH_API void Rsqrt(int N, const T* X, T* Y, Context* context);
template <typename T, class Context>
TORCH_API void Cube(int N, const T* X, T* Y, Context* context);
template <typename T, class Context>
TORCH_API void Cbrt(int N, const T* X, T* Y, Context* context);
template <typename T, class Context>
TORCH_API void Neg(int N, const T* X, T* Y, Context* context);
template <typename T, class Context>
TORCH_API void Sign(int N, const T* X, T* Y, Context* context);
template <typename T, class Context>
TORCH_API void Not(int N, const T* X, T* Y, Context* context);
template <typename T, class Context>
TORCH_API void Powx(int N, const T* A, const T b, T* Y, Context* context);
template <typename T, class Context>
TORCH_API void Inv(int N, const T* X, T* Y, Context* context);
template <typename T, class Context>
TORCH_API void Erf(int N, const T* X, T* Y, Context* context);
template <typename T, class Context>
TORCH_API void CdfNorm(int N, const T* X, T* Y, Context* context);
template <typename T, class Context>
TORCH_API void Set(std::int64_t N, T alpha, T* X, Context* context);
template <typename TAlpha, typename TData, class Context>
TORCH_API void
Scale(std::int64_t N, TAlpha alpha, const TData* X, TData* Y, Context* context);
// Different from the Scale function above, if alpha is passed in as a pointer,
// we will assume that it lives on the Context device, for example on GPU.
template <typename TAlpha, typename TData, class Context>
TORCH_API void Scale(
std::int64_t N,
const TAlpha* alpha,
const TData* X,
TData* Y,
Context* context);
template <typename T, class Context>
TORCH_API void Add(int N, const T* A, const T* B, T* C, Context* context);
template <typename T, class Context>
TORCH_API void Sub(int N, const T* A, const T* B, T* C, Context* context);
template <typename T, class Context>
TORCH_API void Mul(int N, const T* A, const T* B, T* C, Context* context);
template <typename T, class Context>
TORCH_API void Div(int N, const T* A, const T* B, T* C, Context* context);
template <typename T, class Context>
TORCH_API void Min(int N, const T* A, const T* B, T* C, Context* context);
template <typename T, class Context>
TORCH_API void Max(int N, const T* A, const T* B, T* C, Context* context);
template <typename T, class Context>
TORCH_API void And(int N, const T* A, const T* B, T* C, Context* context);
template <typename T, class Context>
TORCH_API void Or(int N, const T* A, const T* B, T* C, Context* context);
template <typename T, class Context>
TORCH_API void Xor(int N, const T* A, const T* B, T* C, Context* context);
template <typename T, class Context>
TORCH_API void
BitwiseAnd(int N, const T* A, const T* B, T* C, Context* context);
template <typename T, class Context>
TORCH_API void
BitwiseOr(int N, const T* A, const T* B, T* C, Context* context);
template <typename T, class Context>
TORCH_API void
BitwiseXor(int N, const T* A, const T* B, T* C, Context* context);
template <typename T, class Context>
TORCH_API void EQ(int N, const T* A, const T* B, bool* C, Context* context);
template <typename T, class Context>
TORCH_API void NE(int N, const T* A, const T* B, bool* C, Context* context);
template <typename T, class Context>
TORCH_API void LT(int N, const T* A, const T* B, bool* C, Context* context);
template <typename T, class Context>
TORCH_API void LE(int N, const T* A, const T* B, bool* C, Context* context);
template <typename T, class Context>
TORCH_API void GT(int N, const T* A, const T* B, bool* C, Context* context);
template <typename T, class Context>
TORCH_API void GE(int N, const T* A, const T* B, bool* C, Context* context);
template <typename TAlpha, typename TData, class Context>
TORCH_API void
Axpy(std::int64_t N, TAlpha alpha, const TData* X, TData* Y, Context* context);
// Different from the Axpy function above, if alpha is passed in
// as a pointer, we will assume that it lives on the Context device,
// for example on GPU.
template <typename TAlpha, typename TData, class Context>
TORCH_API void Axpy(
std::int64_t N,
const TAlpha* alpha,
const TData* X,
TData* Y,
Context* context);
template <typename TAlpha, typename TData, class Context>
TORCH_API void Axpby(
std::int64_t N,
TAlpha alpha,
const TData* X,
TAlpha beta,
TData* Y,
Context* context);
template <typename TAlpha, typename TData, class Context>
TORCH_API void Axpby(
std::int64_t N,
const TAlpha* alpha,
const TData* X,
const TAlpha* beta,
TData* Y,
Context* context);
} // namespace math
} // namespace caffe2
#endif // CAFFE2_UTILS_MATH_ELEMENTWISE_H_
| 6,455
| 39.099379
| 80
|
h
|
null |
pytorch-main/caffe2/utils/math/half_utils.h
|
#ifndef CAFFE2_UTILS_MATH_HALF_UTILS_H_
#define CAFFE2_UTILS_MATH_HALF_UTILS_H_
#include "caffe2/core/common.h"
#include "caffe2/core/types.h"
#include "caffe2/utils/conversions.h"
#include "caffe2/utils/math/utils.h"
namespace caffe2 {
namespace math {
namespace utils {
struct HalfAddFunctor {
MATH_UTILS_DECL at::Half operator()(const at::Half a, const at::Half b)
const {
return convert::To<float, at::Half>(
convert::To<at::Half, float>(a) + convert::To<at::Half, float>(b));
}
};
struct HalfSubFunctor {
MATH_UTILS_DECL at::Half operator()(const at::Half a, const at::Half b)
const {
return convert::To<float, at::Half>(
convert::To<at::Half, float>(a) - convert::To<at::Half, float>(b));
}
};
struct HalfMulFunctor {
MATH_UTILS_DECL at::Half operator()(const at::Half a, const at::Half b)
const {
return convert::To<float, at::Half>(
convert::To<at::Half, float>(a) * convert::To<at::Half, float>(b));
}
};
struct HalfDivFunctor {
MATH_UTILS_DECL at::Half operator()(const at::Half a, const at::Half b)
const {
return convert::To<float, at::Half>(
convert::To<at::Half, float>(a) / convert::To<at::Half, float>(b));
}
};
} // namespace utils
} // namespace math
} // namespace caffe2
#endif // CAFFE2_UTILS_MATH_HALF_UTILS_H_
| 1,329
| 25.6
| 75
|
h
|
null |
pytorch-main/caffe2/utils/math/reduce.h
|
#ifndef CAFFE2_UTILS_MATH_REDUCE_H_
#define CAFFE2_UTILS_MATH_REDUCE_H_
#include "caffe2/core/common.h"
#include "caffe2/core/types.h"
namespace caffe2 {
class Tensor;
namespace math {
template <typename T, class Context>
TORCH_API void
ReduceMin(const int N, const T* X, T* y, Tensor* scratch_ptr, Context* context);
template <typename T, class Context>
TORCH_API void
ReduceMax(const int N, const T* X, T* y, Tensor* scratch_ptr, Context* context);
// In all of the reduce functions, X_dims and Y_dims should have ndim elements.
// Each dimension of Y_dims must match the corresponding dimension of X_dims or
// must be equal to 1. The dimensions equal to 1 indicate the dimensions of X to
// be reduced.
// Y = alpha * ReduceMin(X)
template <typename T, class Context>
TORCH_API void ReduceMin(
const int ndim,
const int* X_dims,
const int* Y_dims,
const T alpha,
const T* X,
T* Y,
Context* context,
bool allow_broadcast_fastpath=false);
// Y = alpha * ReduceMax(X)
template <typename T, class Context>
TORCH_API void ReduceMax(
const int ndim,
const int* X_dims,
const int* Y_dims,
const T alpha,
const T* X,
T* Y,
Context* context,
bool allow_broadcast_fastpath=false);
// Y = alpha * ReduceSum(X)
template <typename T, class Context>
TORCH_API void ReduceSum(
const int ndim,
const int* X_dims,
const int* Y_dims,
const T alpha,
const T* X,
T* Y,
Context* context,
bool allow_broadcast_fastpath=false);
// Y = alpha * ReduceMean(X)
template <typename T, class Context>
TORCH_API void ReduceMean(
const int ndim,
const int* X_dims,
const int* Y_dims,
const T alpha,
const T* X,
T* Y,
Context* context,
bool allow_broadcast_fastpath=false);
// Y = alpha * ReduceL1(X)
template <typename T, class Context>
TORCH_API void ReduceL1(
const int ndim,
const int* X_dims,
const int* Y_dims,
const T alpha,
const T* X,
T* Y,
Context* context,
bool allow_broadcast_fastpath=false);
// Y = alpha * ReduceL2(X)
template <typename T, class Context>
TORCH_API void ReduceL2(
const int ndim,
const int* X_dims,
const int* Y_dims,
const T alpha,
const T* X,
T* Y,
Context* context,
bool allow_broadcast_fastpath=false);
// Computes mean and variance over axes.
template <typename T, class Context>
TORCH_API void Moments(
const int ndims,
const int* X_dims,
const int* Y_dims,
const T* X,
T* mean,
T* var,
Context* context,
bool allow_broadcast_fastpath=false);
} // namespace math
} // namespace caffe2
#endif // CAFFE2_UTILS_MATH_REDUCE_H_
| 2,672
| 22.243478
| 80
|
h
|
null |
pytorch-main/caffe2/utils/math/transpose.h
|
#ifndef CAFFE2_UTILS_MATH_TRANSPOSE_H_
#define CAFFE2_UTILS_MATH_TRANSPOSE_H_
#include "caffe2/core/common.h"
#include "caffe2/core/types.h"
namespace caffe2 {
namespace math {
// Transpose tensor X with dims by axes and write the result to tensor Y.
template <typename TIndex, typename TData, class Context>
TORCH_API void Transpose(
int ndim,
const TIndex* dims,
const int* axes,
const TData* X,
TData* Y,
Context* context);
template <typename T, class Context>
TORCH_API void
NCHW2NHWC(int N, int C, int HxW, const T* X, T* Y, Context* context);
template <typename T, class Context>
TORCH_API void
NHWC2NCHW(int N, int C, int HxW, const T* X, T* Y, Context* context);
} // namespace math
} // namespace caffe2
#endif // CAFFE2_UTILS_MATH_TRANSPOSE_H_
| 785
| 23.5625
| 73
|
h
|
null |
pytorch-main/caffe2/utils/math/utils.h
|
#ifndef CAFFE2_UTILS_MATH_UTILS_H_
#define CAFFE2_UTILS_MATH_UTILS_H_
#include <vector>
#include "caffe2/core/common.h"
#if defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__) || \
defined(__HIP__) || (defined(__clang__) && defined(__CUDA__))
#define MATH_UTILS_DECL inline __host__ __device__
#else
#define MATH_UTILS_DECL inline
#endif
namespace caffe2 {
namespace math {
namespace utils {
template <typename T>
MATH_UTILS_DECL T Not(const T x) {
return !x;
}
template <typename T>
MATH_UTILS_DECL T Sign(const T x) {
return x > 0 ? T(1) : (x < 0 ? T(-1) : T(0));
}
template <typename T>
MATH_UTILS_DECL T Negate(const T x) {
return -x;
}
template <typename T>
MATH_UTILS_DECL T Inv(const T x) {
return T(1) / x;
}
template <typename T>
MATH_UTILS_DECL T Square(const T x) {
return x * x;
}
template <typename T>
MATH_UTILS_DECL T Cube(const T x) {
return x * x * x;
}
// Function uses casting from int to unsigned to compare if value of
// parameter a is greater or equal to zero and lower than value of
// parameter b. The b parameter is of type signed and is always
// positive,
// therefore its value is always lower than 0x800... where casting
// negative value of a parameter converts it to value higher than
// 0x800...
// The casting allows to use one condition instead of two.
MATH_UTILS_DECL bool IsAGeZeroAndALtB(const int a, const int b) {
return static_cast<unsigned int>(a) < static_cast<unsigned int>(b);
}
// Increase the index digits by one based on dims.
template <typename TIndex>
TORCH_API void
IncreaseIndexInDims(int ndim, const TIndex* dims, TIndex* index);
// Get index value from dims and index digits.
template <typename TIndex>
TORCH_API TIndex
GetIndexFromDims(const int n, const TIndex* dims, const TIndex* index);
// Checks if the input permutation is an identity permutation;
TORCH_API bool IsIdentityPermutation(const int n, const int* perm);
TORCH_API bool
CheckReduceDims(const int ndim, const int* X_dims, const int* Y_dims);
TORCH_API bool IsRowwiseReduce(
const int ndim,
const int* X_dims,
const int* Y_dims,
int* rows,
int* cols);
TORCH_API bool IsColwiseReduce(
const int ndim,
const int* X_dims,
const int* Y_dims,
int* rows,
int* cols);
TORCH_API bool IsBothEndsReduce(
const int ndim,
const int* X_dims,
const int* Y_dims,
int* pre,
int* mid,
int* nxt);
// Computest the broadcast binary operation dims.
template <typename TIndex>
TORCH_API void ComputeBroadcastBinaryOpDims(
const int A_ndim,
const TIndex* A_dims,
const int B_ndim,
const TIndex* B_dims,
TIndex* A_broadcast_dims,
TIndex* B_broadcast_dims,
TIndex* C_broadcast_dims);
TORCH_API bool IsRowwiseBroadcastBinaryOp(
const int ndim,
const int* A_dims,
const int* B_dims,
int* rows,
int* cols,
bool* broadcast_1st);
TORCH_API bool IsColwiseBroadcastBinaryOp(
const int ndim,
const int* A_dims,
const int* B_dims,
int* rows,
int* cols,
bool* broadcast_1st);
TORCH_API bool IsBothEndsBroadcastBinaryOp(
const int ndim,
const int* A_dims,
const int* B_dims,
int* pre,
int* mid,
int* nxt,
bool* broadcast_1st);
TORCH_API bool IsBatchTranspose2D(const int ndim, const int* axes);
TORCH_API void ComputeTransposeAxesForReduceOp(
const int num_dims,
const int num_reduce_axes,
const int* reduce_axes,
int* transpose_axes);
TORCH_API void
ComputeTransposeAxesForReduceOp(const int ndim, const int* dims, int* axes);
template <typename TIndex>
TORCH_API void ComputeTransposedStrides(
int ndim,
const TIndex* dims,
const int* axes,
TIndex* strides);
} // namespace utils
// Calculates ceil(a / b). User must be careful to ensure that there
// is no overflow or underflow in the calculation.
template <typename T>
constexpr T DivUp(const T a, const T b) {
return (a + b - T(1)) / b;
}
// Rounds a up to the next highest multiple of b. User must be careful
// to ensure that there is no overflow or underflow in the calculation
// of divUp.
template <typename T>
constexpr T RoundUp(const T a, const T b) {
return DivUp<T>(a, b) * b;
}
// Returns log2(n) for a positive integer type
template <typename T>
constexpr int IntegerLog2(T n, int p = 0) {
return (n <= 1) ? p : IntegerLog2(n / 2, p + 1);
}
// Returns the next highest power-of-2 for an integer type
template <typename T>
constexpr T IntegerNextHighestPowerOf2(T v) {
return (IntegerIsPowerOf2(v) ? T(2) * v : (T(1) << (IntegerLog2(v) + 1)));
}
} // namespace math
} // namespace caffe2
#endif // CAFFE2_UTILS_MATH_UTILS_H_
| 4,643
| 23.834225
| 76
|
h
|
null |
pytorch-main/caffe2/utils/threadpool/ThreadPool.h
|
#ifndef CAFFE2_UTILS_THREADPOOL_H_
#define CAFFE2_UTILS_THREADPOOL_H_
#include "ThreadPoolCommon.h"
#include <atomic>
#include <functional>
#include <memory>
#include <mutex>
#include <vector>
#include "caffe2/core/common.h"
//
// A work-stealing threadpool loosely based off of pthreadpool
//
namespace caffe2 {
struct Task;
class WorkersPool;
constexpr size_t kCacheLineSize = 64;
// A threadpool with the given number of threads.
// NOTE: the kCacheLineSize alignment is present only for cache
// performance, and is not strictly enforced (for example, when
// the object is created on the heap). Thus, in order to avoid
// misaligned intrinsics, no SSE instructions shall be involved in
// the ThreadPool implementation.
// Note: alignas is disabled because some compilers do not deal with
// TORCH_API and alignas annotations at the same time.
class TORCH_API /*alignas(kCacheLineSize)*/ ThreadPool {
public:
static ThreadPool* createThreadPool(int numThreads);
static std::unique_ptr<ThreadPool> defaultThreadPool();
virtual ~ThreadPool() = default;
// Returns the number of threads currently in use
virtual int getNumThreads() const = 0;
virtual void setNumThreads(size_t numThreads) = 0;
// Sets the minimum work size (range) for which to invoke the
// threadpool; work sizes smaller than this will just be run on the
// main (calling) thread
void setMinWorkSize(size_t size) {
std::lock_guard<std::mutex> guard(executionMutex_);
minWorkSize_ = size;
}
size_t getMinWorkSize() const {
return minWorkSize_;
}
virtual void run(const std::function<void(int, size_t)>& fn, size_t range) = 0;
// Run an arbitrary function in a thread-safe manner accessing the Workers
// Pool
virtual void withPool(const std::function<void(WorkersPool*)>& fn) = 0;
protected:
static size_t defaultNumThreads_;
mutable std::mutex executionMutex_;
size_t minWorkSize_;
};
size_t getDefaultNumThreads();
} // namespace caffe2
#endif // CAFFE2_UTILS_THREADPOOL_H_
| 2,014
| 28.202899
| 81
|
h
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.