source
stringlengths
3
92
c
stringlengths
26
2.25M
data.h
/*! * Copyright (c) 2015-2022 by Contributors * \file data.h * \brief The input data structure of xgboost. * \author Tianqi Chen */ #ifndef XGBOOST_DATA_H_ #define XGBOOST_DATA_H_ #include <dmlc/base.h> #include <dmlc/data.h> #include <dmlc/serializer.h> #include <xgboost/base.h> #include <xgboost/host_device_vector.h> #include <xgboost/linalg.h> #include <xgboost/span.h> #include <xgboost/string_view.h> #include <algorithm> #include <memory> #include <numeric> #include <string> #include <utility> #include <vector> namespace xgboost { // forward declare dmatrix. class DMatrix; /*! \brief data type accepted by xgboost interface */ enum class DataType : uint8_t { kFloat32 = 1, kDouble = 2, kUInt32 = 3, kUInt64 = 4, kStr = 5 }; enum class FeatureType : uint8_t { kNumerical = 0, kCategorical = 1 }; /*! * \brief Meta information about dataset, always sit in memory. */ class MetaInfo { public: /*! \brief number of data fields in MetaInfo */ static constexpr uint64_t kNumField = 12; /*! \brief number of rows in the data */ uint64_t num_row_{0}; // NOLINT /*! \brief number of columns in the data */ uint64_t num_col_{0}; // NOLINT /*! \brief number of nonzero entries in the data */ uint64_t num_nonzero_{0}; // NOLINT /*! \brief label of each instance */ linalg::Tensor<float, 2> labels; /*! * \brief the index of begin and end of a group * needed when the learning task is ranking. */ std::vector<bst_group_t> group_ptr_; // NOLINT /*! \brief weights of each instance, optional */ HostDeviceVector<bst_float> weights_; // NOLINT /*! * \brief initialized margins, * if specified, xgboost will start from this init margin * can be used to specify initial prediction to boost from. */ linalg::Tensor<float, 2> base_margin_; // NOLINT /*! * \brief lower bound of the label, to be used for survival analysis (censored regression) */ HostDeviceVector<bst_float> labels_lower_bound_; // NOLINT /*! * \brief upper bound of the label, to be used for survival analysis (censored regression) */ HostDeviceVector<bst_float> labels_upper_bound_; // NOLINT /*! * \brief Name of type for each feature provided by users. Eg. "int"/"float"/"i"/"q" */ std::vector<std::string> feature_type_names; /*! * \brief Name for each feature. */ std::vector<std::string> feature_names; /* * \brief Type of each feature. Automatically set when feature_type_names is specifed. */ HostDeviceVector<FeatureType> feature_types; /* * \brief Weight of each feature, used to define the probability of each feature being * selected when using column sampling. */ HostDeviceVector<float> feature_weights; /*! \brief default constructor */ MetaInfo() = default; MetaInfo(MetaInfo&& that) = default; MetaInfo& operator=(MetaInfo&& that) = default; MetaInfo& operator=(MetaInfo const& that) = delete; /*! * \brief Validate all metainfo. */ void Validate(int32_t device) const; MetaInfo Slice(common::Span<int32_t const> ridxs) const; /*! * \brief Get weight of each instances. * \param i Instance index. * \return The weight. */ inline bst_float GetWeight(size_t i) const { return weights_.Size() != 0 ? weights_.HostVector()[i] : 1.0f; } /*! \brief get sorted indexes (argsort) of labels by absolute value (used by cox loss) */ inline const std::vector<size_t>& LabelAbsSort() const { if (label_order_cache_.size() == labels.Size()) { return label_order_cache_; } label_order_cache_.resize(labels.Size()); std::iota(label_order_cache_.begin(), label_order_cache_.end(), 0); const auto& l = labels.Data()->HostVector(); XGBOOST_PARALLEL_SORT(label_order_cache_.begin(), label_order_cache_.end(), [&l](size_t i1, size_t i2) {return std::abs(l[i1]) < std::abs(l[i2]);}); return label_order_cache_; } /*! \brief clear all the information */ void Clear(); /*! * \brief Load the Meta info from binary stream. * \param fi The input stream */ void LoadBinary(dmlc::Stream* fi); /*! * \brief Save the Meta info to binary stream * \param fo The output stream. */ void SaveBinary(dmlc::Stream* fo) const; /*! * \brief Set information in the meta info. * \param key The key of the information. * \param dptr The data pointer of the source array. * \param dtype The type of the source data. * \param num Number of elements in the source array. */ void SetInfo(const char* key, const void* dptr, DataType dtype, size_t num); /*! * \brief Set information in the meta info with array interface. * \param key The key of the information. * \param interface_str String representation of json format array interface. */ void SetInfo(StringView key, StringView interface_str); void GetInfo(char const* key, bst_ulong* out_len, DataType dtype, const void** out_dptr) const; void SetFeatureInfo(const char *key, const char **info, const bst_ulong size); void GetFeatureInfo(const char *field, std::vector<std::string>* out_str_vecs) const; /* * \brief Extend with other MetaInfo. * * \param that The other MetaInfo object. * * \param accumulate_rows Whether rows need to be accumulated in this function. If * client code knows number of rows in advance, set this * parameter to false. * \param check_column Whether the extend method should check the consistency of * columns. */ void Extend(MetaInfo const& that, bool accumulate_rows, bool check_column); private: void SetInfoFromHost(StringView key, Json arr); void SetInfoFromCUDA(StringView key, Json arr); /*! \brief argsort of labels */ mutable std::vector<size_t> label_order_cache_; }; /*! \brief Element from a sparse vector */ struct Entry { /*! \brief feature index */ bst_feature_t index; /*! \brief feature value */ bst_float fvalue; /*! \brief default constructor */ Entry() = default; /*! * \brief constructor with index and value * \param index The feature or row index. * \param fvalue The feature value. */ XGBOOST_DEVICE Entry(bst_feature_t index, bst_float fvalue) : index(index), fvalue(fvalue) {} /*! \brief reversely compare feature values */ inline static bool CmpValue(const Entry& a, const Entry& b) { return a.fvalue < b.fvalue; } inline bool operator==(const Entry& other) const { return (this->index == other.index && this->fvalue == other.fvalue); } }; /*! * \brief Parameters for constructing batches. */ struct BatchParam { /*! \brief The GPU device to use. */ int gpu_id {-1}; /*! \brief Maximum number of bins per feature for histograms. */ int max_bin{0}; /*! \brief Hessian, used for sketching with future approx implementation. */ common::Span<float> hess; /*! \brief Whether should DMatrix regenerate the batch. Only used for GHistIndex. */ bool regen {false}; BatchParam() = default; BatchParam(int32_t device, int32_t max_bin) : gpu_id{device}, max_bin{max_bin} {} /** * \brief Get batch with sketch weighted by hessian. The batch will be regenerated if * the span is changed, so caller should keep the span for each iteration. */ BatchParam(int32_t device, int32_t max_bin, common::Span<float> hessian, bool regenerate = false) : gpu_id{device}, max_bin{max_bin}, hess{hessian}, regen{regenerate} {} bool operator!=(const BatchParam& other) const { if (hess.empty() && other.hess.empty()) { return gpu_id != other.gpu_id || max_bin != other.max_bin; } return gpu_id != other.gpu_id || max_bin != other.max_bin || hess.data() != other.hess.data(); } }; struct HostSparsePageView { using Inst = common::Span<Entry const>; common::Span<bst_row_t const> offset; common::Span<Entry const> data; Inst operator[](size_t i) const { auto size = *(offset.data() + i + 1) - *(offset.data() + i); return {data.data() + *(offset.data() + i), static_cast<Inst::index_type>(size)}; } size_t Size() const { return offset.size() == 0 ? 0 : offset.size() - 1; } }; /*! * \brief In-memory storage unit of sparse batch, stored in CSR format. */ class SparsePage { public: // Offset for each row. HostDeviceVector<bst_row_t> offset; /*! \brief the data of the segments */ HostDeviceVector<Entry> data; size_t base_rowid {0}; /*! \brief an instance of sparse vector in the batch */ using Inst = common::Span<Entry const>; HostSparsePageView GetView() const { return {offset.ConstHostSpan(), data.ConstHostSpan()}; } /*! \brief constructor */ SparsePage() { this->Clear(); } /*! \return Number of instances in the page. */ inline size_t Size() const { return offset.Size() == 0 ? 0 : offset.Size() - 1; } /*! \return estimation of memory cost of this page */ inline size_t MemCostBytes() const { return offset.Size() * sizeof(size_t) + data.Size() * sizeof(Entry); } /*! \brief clear the page */ inline void Clear() { base_rowid = 0; auto& offset_vec = offset.HostVector(); offset_vec.clear(); offset_vec.push_back(0); data.HostVector().clear(); } /*! \brief Set the base row id for this page. */ inline void SetBaseRowId(size_t row_id) { base_rowid = row_id; } SparsePage GetTranspose(int num_columns, int32_t n_threads) const; void SortRows() { auto ncol = static_cast<bst_omp_uint>(this->Size()); dmlc::OMPException exc; #pragma omp parallel for schedule(dynamic, 1) for (bst_omp_uint i = 0; i < ncol; ++i) { exc.Run([&]() { if (this->offset.HostVector()[i] < this->offset.HostVector()[i + 1]) { std::sort( this->data.HostVector().begin() + this->offset.HostVector()[i], this->data.HostVector().begin() + this->offset.HostVector()[i + 1], Entry::CmpValue); } }); } exc.Rethrow(); } /** * \brief Pushes external data batch onto this page * * \tparam AdapterBatchT * \param batch * \param missing * \param nthread * * \return The maximum number of columns encountered in this input batch. Useful when pushing many adapter batches to work out the total number of columns. */ template <typename AdapterBatchT> uint64_t Push(const AdapterBatchT& batch, float missing, int nthread); /*! * \brief Push a sparse page * \param batch the row page */ void Push(const SparsePage &batch); /*! * \brief Push a SparsePage stored in CSC format * \param batch The row batch to be pushed */ void PushCSC(const SparsePage& batch); }; class CSCPage: public SparsePage { public: CSCPage() : SparsePage() {} explicit CSCPage(SparsePage page) : SparsePage(std::move(page)) {} }; class SortedCSCPage : public SparsePage { public: SortedCSCPage() : SparsePage() {} explicit SortedCSCPage(SparsePage page) : SparsePage(std::move(page)) {} }; class EllpackPageImpl; /*! * \brief A page stored in ELLPACK format. * * This class uses the PImpl idiom (https://en.cppreference.com/w/cpp/language/pimpl) to avoid * including CUDA-specific implementation details in the header. */ class EllpackPage { public: /*! * \brief Default constructor. * * This is used in the external memory case. An empty ELLPACK page is constructed with its content * set later by the reader. */ EllpackPage(); /*! * \brief Constructor from an existing DMatrix. * * This is used in the in-memory case. The ELLPACK page is constructed from an existing DMatrix * in CSR format. */ explicit EllpackPage(DMatrix* dmat, const BatchParam& param); /*! \brief Destructor. */ ~EllpackPage(); EllpackPage(EllpackPage&& that); /*! \return Number of instances in the page. */ size_t Size() const; /*! \brief Set the base row id for this page. */ void SetBaseRowId(size_t row_id); const EllpackPageImpl* Impl() const { return impl_.get(); } EllpackPageImpl* Impl() { return impl_.get(); } private: std::unique_ptr<EllpackPageImpl> impl_; }; class GHistIndexMatrix; template<typename T> class BatchIteratorImpl { public: using iterator_category = std::forward_iterator_tag; // NOLINT virtual ~BatchIteratorImpl() = default; virtual const T& operator*() const = 0; virtual BatchIteratorImpl& operator++() = 0; virtual bool AtEnd() const = 0; virtual std::shared_ptr<T const> Page() const = 0; }; template<typename T> class BatchIterator { public: using iterator_category = std::forward_iterator_tag; // NOLINT explicit BatchIterator(BatchIteratorImpl<T>* impl) { impl_.reset(impl); } explicit BatchIterator(std::shared_ptr<BatchIteratorImpl<T>> impl) { impl_ = impl; } BatchIterator &operator++() { CHECK(impl_ != nullptr); ++(*impl_); return *this; } const T& operator*() const { CHECK(impl_ != nullptr); return *(*impl_); } bool operator!=(const BatchIterator&) const { CHECK(impl_ != nullptr); return !impl_->AtEnd(); } bool AtEnd() const { CHECK(impl_ != nullptr); return impl_->AtEnd(); } std::shared_ptr<T const> Page() const { return impl_->Page(); } private: std::shared_ptr<BatchIteratorImpl<T>> impl_; }; template<typename T> class BatchSet { public: explicit BatchSet(BatchIterator<T> begin_iter) : begin_iter_(std::move(begin_iter)) {} BatchIterator<T> begin() { return begin_iter_; } // NOLINT BatchIterator<T> end() { return BatchIterator<T>(nullptr); } // NOLINT private: BatchIterator<T> begin_iter_; }; struct XGBAPIThreadLocalEntry; /*! * \brief Internal data structured used by XGBoost during training. */ class DMatrix { public: /*! \brief default constructor */ DMatrix() = default; /*! \brief meta information of the dataset */ virtual MetaInfo& Info() = 0; virtual void SetInfo(const char *key, const void *dptr, DataType dtype, size_t num) { this->Info().SetInfo(key, dptr, dtype, num); } virtual void SetInfo(const char* key, std::string const& interface_str) { this->Info().SetInfo(key, StringView{interface_str}); } /*! \brief meta information of the dataset */ virtual const MetaInfo& Info() const = 0; /*! \brief Get thread local memory for returning data from DMatrix. */ XGBAPIThreadLocalEntry& GetThreadLocal() const; /** * \brief Gets batches. Use range based for loop over BatchSet to access individual batches. */ template<typename T> BatchSet<T> GetBatches(const BatchParam& param = {}); template <typename T> bool PageExists() const; // the following are column meta data, should be able to answer them fast. /*! \return Whether the data columns single column block. */ virtual bool SingleColBlock() const = 0; /*! \brief virtual destructor */ virtual ~DMatrix(); /*! \brief Whether the matrix is dense. */ bool IsDense() const { return Info().num_nonzero_ == Info().num_row_ * Info().num_col_; } /*! * \brief Load DMatrix from URI. * \param uri The URI of input. * \param silent Whether print information during loading. * \param load_row_split Flag to read in part of rows, divided among the workers in distributed mode. * \param file_format The format type of the file, used for dmlc::Parser::Create. * By default "auto" will be able to load in both local binary file. * \param page_size Page size for external memory. * \return The created DMatrix. */ static DMatrix* Load(const std::string& uri, bool silent, bool load_row_split, const std::string& file_format = "auto"); /** * \brief Creates a new DMatrix from an external data adapter. * * \tparam AdapterT Type of the adapter. * \param [in,out] adapter View onto an external data. * \param missing Values to count as missing. * \param nthread Number of threads for construction. * \param cache_prefix (Optional) The cache prefix for external memory. * \param page_size (Optional) Size of the page. * * \return a Created DMatrix. */ template <typename AdapterT> static DMatrix* Create(AdapterT* adapter, float missing, int nthread, const std::string& cache_prefix = ""); /** * \brief Create a new Quantile based DMatrix used for histogram based algorithm. * * \tparam DataIterHandle External iterator type, defined in C API. * \tparam DMatrixHandle DMatrix handle, defined in C API. * \tparam DataIterResetCallback Callback for reset, prototype defined in C API. * \tparam XGDMatrixCallbackNext Callback for next, prototype defined in C API. * * \param iter External data iterator * \param proxy A hanlde to ProxyDMatrix * \param reset Callback for reset * \param next Callback for next * \param missing Value that should be treated as missing. * \param nthread number of threads used for initialization. * \param max_bin Maximum number of bins. * * \return A created quantile based DMatrix. */ template <typename DataIterHandle, typename DMatrixHandle, typename DataIterResetCallback, typename XGDMatrixCallbackNext> static DMatrix *Create(DataIterHandle iter, DMatrixHandle proxy, DataIterResetCallback *reset, XGDMatrixCallbackNext *next, float missing, int nthread, int max_bin); /** * \brief Create an external memory DMatrix with callbacks. * * \tparam DataIterHandle External iterator type, defined in C API. * \tparam DMatrixHandle DMatrix handle, defined in C API. * \tparam DataIterResetCallback Callback for reset, prototype defined in C API. * \tparam XGDMatrixCallbackNext Callback for next, prototype defined in C API. * * \param iter External data iterator * \param proxy A hanlde to ProxyDMatrix * \param reset Callback for reset * \param next Callback for next * \param missing Value that should be treated as missing. * \param nthread number of threads used for initialization. * \param cache Prefix of cache file path. * * \return A created external memory DMatrix. */ template <typename DataIterHandle, typename DMatrixHandle, typename DataIterResetCallback, typename XGDMatrixCallbackNext> static DMatrix *Create(DataIterHandle iter, DMatrixHandle proxy, DataIterResetCallback *reset, XGDMatrixCallbackNext *next, float missing, int32_t nthread, std::string cache); virtual DMatrix *Slice(common::Span<int32_t const> ridxs) = 0; /*! \brief Number of rows per page in external memory. Approximately 100MB per page for * dataset with 100 features. */ static const size_t kPageSize = 32UL << 12UL; protected: virtual BatchSet<SparsePage> GetRowBatches() = 0; virtual BatchSet<CSCPage> GetColumnBatches() = 0; virtual BatchSet<SortedCSCPage> GetSortedColumnBatches() = 0; virtual BatchSet<EllpackPage> GetEllpackBatches(const BatchParam& param) = 0; virtual BatchSet<GHistIndexMatrix> GetGradientIndex(const BatchParam& param) = 0; virtual bool EllpackExists() const = 0; virtual bool SparsePageExists() const = 0; }; template<> inline BatchSet<SparsePage> DMatrix::GetBatches(const BatchParam&) { return GetRowBatches(); } template<> inline bool DMatrix::PageExists<EllpackPage>() const { return this->EllpackExists(); } template<> inline bool DMatrix::PageExists<SparsePage>() const { return this->SparsePageExists(); } template<> inline BatchSet<CSCPage> DMatrix::GetBatches(const BatchParam&) { return GetColumnBatches(); } template<> inline BatchSet<SortedCSCPage> DMatrix::GetBatches(const BatchParam&) { return GetSortedColumnBatches(); } template<> inline BatchSet<EllpackPage> DMatrix::GetBatches(const BatchParam& param) { return GetEllpackBatches(param); } template<> inline BatchSet<GHistIndexMatrix> DMatrix::GetBatches(const BatchParam& param) { return GetGradientIndex(param); } } // namespace xgboost namespace dmlc { DMLC_DECLARE_TRAITS(is_pod, xgboost::Entry, true); namespace serializer { template <> struct Handler<xgboost::Entry> { inline static void Write(Stream* strm, const xgboost::Entry& data) { strm->Write(data.index); strm->Write(data.fvalue); } inline static bool Read(Stream* strm, xgboost::Entry* data) { return strm->Read(&data->index) && strm->Read(&data->fvalue); } }; } // namespace serializer } // namespace dmlc #endif // XGBOOST_DATA_H_
3d7pt.c
/* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 32; tile_size[3] = 32; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k]) + beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] + A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
conv_dw_hcl_x86.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2021, OPEN AI LAB * Author: qtang@openailab.com */ #include "convolution_param.h" #include "conv_dw_kernel_x86.h" #include "graph/tensor.h" #include "graph/node.h" #include "graph/graph.h" #include "utility/sys_port.h" #include "utility/float.h" #include "utility/log.h" #include "device/cpu/cpu_node.h" #include "device/cpu/cpu_graph.h" #include "device/cpu/cpu_module.h" #include <math.h> #include <string.h> static void pad_int8(int8_t* input, int8_t* output, int in_h, int in_w, int out_h, int out_w, int top, int left, int8_t v) { int8_t* ptr = input; int8_t* outptr = output; int y = 0; // fill top for (; y < top; y++) { int x = 0; for (; x < out_w; x++) { outptr[x] = v; } outptr += out_w; } // fill center for (; y < (top + in_h); y++) { int x = 0; for (; x < left; x++) { outptr[x] = v; } if (in_w < 12) { for (; x < (left + in_w); x++) { outptr[x] = ptr[x - left]; } } else { memcpy(outptr + left, ptr, in_w * sizeof(int8_t)); x += in_w; } for (; x < out_w; x++) { outptr[x] = v; } ptr += in_w; outptr += out_w; } // fill bottom for (; y < out_h; y++) { int x = 0; for (; x < out_w; x++) { outptr[x] = v; } outptr += out_w; } } static int convdw3x3s1_int8_sse(struct tensor* input_tensor, struct tensor* weight_tensor, struct tensor* bias_tensor, struct tensor* output_tensor, struct conv_param* param, int num_thread) { int inch = input_tensor->dims[1]; int inh = input_tensor->dims[2]; int inw = input_tensor->dims[3]; int in_hw = inh * inw; int outch = output_tensor->dims[1]; int outh = output_tensor->dims[2]; int outw = output_tensor->dims[3]; int out_hw = outh * outw; int out_size = output_tensor->elem_num; int pad_w = param->pad_w0; int pad_h = param->pad_h0; int32_t* output_int32 = (int32_t*)sys_malloc(out_size * sizeof(int32_t)); memset(output_int32, 0, out_size * sizeof(int32_t)); float* output_fp32 = (float*)sys_malloc(out_size * sizeof(float)); int8_t* output_int8 = output_tensor->data; int8_t* input_int8 = input_tensor->data; int32_t* bias_int32 = NULL; if(bias_tensor) bias_int32 = bias_tensor->data; /* get scale value of quantizaiton */ float input_scale = input_tensor->scale; float* kernel_scales = weight_tensor->scale_list; float output_scale = output_tensor->scale; const signed char* kernel = weight_tensor->data; /* pading */ int inh_tmp = inh + pad_h + pad_h; int inw_tmp = inw + pad_w + pad_w; int8_t* input_tmp = NULL; if (inh_tmp == inh && inw_tmp == inw) input_tmp = input_int8; else { input_tmp = ( int8_t* )sys_malloc(inh_tmp * inw_tmp * inch * sizeof(int8_t)); #pragma omp parallel for num_threads(num_thread) for (int g = 0; g < inch; g++) { int8_t* pad_in = input_int8 + g * inh * inw; int8_t* pad_out = input_tmp + g * inh_tmp * inw_tmp; pad_int8(pad_in, pad_out, inh, inw, inh_tmp, inw_tmp, pad_h, pad_w, 0); } } #pragma omp parallel for num_threads(num_thread) for (int p = 0; p < outch; p++) { int32_t* out0 = output_int32 + p * out_hw; int8_t* kernel0 = (int8_t* )kernel + p * 9; int* outptr0 = out0; int8_t* img0 = input_tmp + p * inw_tmp * inh_tmp; int8_t* r0 = img0; int8_t* r1 = img0 + inw_tmp; int8_t* r2 = img0 + inw_tmp * 2; for (int i = 0; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { int sum0 = 0; sum0 += ( int )r0[0] * kernel0[0]; sum0 += ( int )r0[1] * kernel0[1]; sum0 += ( int )r0[2] * kernel0[2]; sum0 += ( int )r1[0] * kernel0[3]; sum0 += ( int )r1[1] * kernel0[4]; sum0 += ( int )r1[2] * kernel0[5]; sum0 += ( int )r2[0] * kernel0[6]; sum0 += ( int )r2[1] * kernel0[7]; sum0 += ( int )r2[2] * kernel0[8]; *outptr0 += sum0; r0++; r1++; r2++; outptr0++; } r0 += 2; r1 += 2; r2 += 2; } kernel0 += 9; } /* process bias and dequant output from int32 to fp32 */ #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outch; i++) { for (int j = 0; j < outh * outw; j++) { int output_off = i * (outh * outw) + j; if (bias_tensor) output_fp32[output_off] = (float )(output_int32[output_off] + bias_int32[i]) * input_scale * kernel_scales[i]; else output_fp32[output_off] = (float )output_int32[output_off] * input_scale * kernel_scales[i]; } } /* process activation relu */ if (param->activation == 0) { #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outch; i++) { for (int j = 0; j < outh * outw; j++) { int output_off = i * (outh * outw) + j; if (output_fp32[output_off] < 0) output_fp32[output_off] = 0; } } } /* process activation relu6 */ if (param->activation > 0) { #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outch; i++) { for (int j = 0; j < outh * outw; j++) { int output_off = i * (outh * outw) + j; if (output_fp32[output_off] < 0) output_fp32[output_off] = 0; if (output_fp32[output_off] > 6) output_fp32[output_off] = 6; } } } /* quant from fp32 to int8 */ #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outch; i++) { for (int j = 0; j < outh * outw; j++) { int output_off = i * (outh * outw) + j; int32_t data_i32 = ( int32_t )(round(output_fp32[output_off] / output_scale)); if (data_i32 > 127) data_i32 = 127; else if (data_i32 < -127) data_i32 = -127; output_int8[output_off] = (int8_t)data_i32; } } sys_free(output_int32); sys_free(output_fp32); if (!(inh_tmp == inh && inw_tmp == inw)) sys_free(input_tmp); return 0; } static int convdw3x3s2_int8_sse(struct tensor* input_tensor, struct tensor* weight_tensor, struct tensor* bias_tensor, struct tensor* output_tensor, struct conv_param* param, int num_thread) { int inch = input_tensor->dims[1]; int inh = input_tensor->dims[2]; int inw = input_tensor->dims[3]; int in_hw = inh * inw; int outch = output_tensor->dims[1]; int outh = output_tensor->dims[2]; int outw = output_tensor->dims[3]; int out_hw = outh * outw; int out_size = output_tensor->elem_num; int pad_w = param->pad_w0; int pad_h = param->pad_h0; int32_t* output_int32 = (int32_t*)sys_malloc(out_size * sizeof(int32_t)); memset(output_int32, 0, out_size * sizeof(int32_t)); float* output_fp32 = (float*)sys_malloc(out_size * sizeof(float)); int8_t* output_int8 = output_tensor->data; int8_t* input_int8 = input_tensor->data; int32_t* bias_int32 = NULL; if(bias_tensor) bias_int32 = bias_tensor->data; /* get scale value of quantizaiton */ float input_scale = input_tensor->scale; float* kernel_scales = weight_tensor->scale_list; float output_scale = output_tensor->scale; const signed char* kernel = weight_tensor->data; /* pading */ int inh_tmp = inh + pad_h + pad_h; int inw_tmp = inw + pad_w + pad_w; int8_t* input_tmp = NULL; if (inh_tmp == inh && inw_tmp == inw) input_tmp = input_int8; else { input_tmp = ( int8_t* )sys_malloc(inh_tmp * inw_tmp * inch * sizeof(int8_t)); #pragma omp parallel for num_threads(num_thread) for (int g = 0; g < inch; g++) { int8_t* pad_in = input_int8 + g * inh * inw; int8_t* pad_out = input_tmp + g * inh_tmp * inw_tmp; pad_int8(pad_in, pad_out, inh, inw, inh_tmp, inw_tmp, pad_h, pad_w, 0); } } int tailstep = inw_tmp - 2 * outw + inw_tmp; #pragma omp parallel for num_threads(num_thread) for (int p = 0; p < outch; p++) { int32_t* out0 = output_int32 + p * out_hw; int8_t* kernel0 = (int8_t* )kernel + p * 9; int* outptr0 = out0; int8_t* img0 = input_tmp + p * inw_tmp * inh_tmp; int8_t* r0 = img0; int8_t* r1 = img0 + inw_tmp; int8_t* r2 = img0 + inw_tmp * 2; for (int i = 0; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { int sum0 = 0; sum0 += ( int )r0[0] * kernel0[0]; sum0 += ( int )r0[1] * kernel0[1]; sum0 += ( int )r0[2] * kernel0[2]; sum0 += ( int )r1[0] * kernel0[3]; sum0 += ( int )r1[1] * kernel0[4]; sum0 += ( int )r1[2] * kernel0[5]; sum0 += ( int )r2[0] * kernel0[6]; sum0 += ( int )r2[1] * kernel0[7]; sum0 += ( int )r2[2] * kernel0[8]; *outptr0 += sum0; r0 += 2; r1 += 2; r2 += 2; outptr0++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } kernel0 += 9; } /* process bias and dequant output from int32 to fp32 */ #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outch; i++) { for (int j = 0; j < outh * outw; j++) { int output_off = i * (outh * outw) + j; if (bias_tensor) output_fp32[output_off] = (float )(output_int32[output_off] + bias_int32[i]) * input_scale * kernel_scales[i]; else output_fp32[output_off] = (float )output_int32[output_off] * input_scale * kernel_scales[i]; } } /* process activation relu */ if (param->activation == 0) { #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outch; i++) { for (int j = 0; j < outh * outw; j++) { int output_off = i * (outh * outw) + j; if (output_fp32[output_off] < 0) output_fp32[output_off] = 0; } } } /* process activation relu6 */ if (param->activation > 0) { #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outch; i++) { for (int j = 0; j < outh * outw; j++) { int output_off = i * (outh * outw) + j; if (output_fp32[output_off] < 0) output_fp32[output_off] = 0; if (output_fp32[output_off] > 6) output_fp32[output_off] = 6; } } } /* quant from fp32 to int8 */ #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outch; i++) { for (int j = 0; j < outh * outw; j++) { int output_off = i * (outh * outw) + j; int32_t data_i32 = ( int32_t )(round(output_fp32[output_off] / output_scale)); if (data_i32 > 127) data_i32 = 127; else if (data_i32 < -127) data_i32 = -127; output_int8[output_off] = (int8_t)data_i32; } } sys_free(output_int32); sys_free(output_fp32); if (!(inh_tmp == inh && inw_tmp == inw)) sys_free(input_tmp); return 0; } static int conv_dw_run_int8(struct tensor* input_tensor, struct tensor* weight_tensor, struct tensor* bias_tensor, struct tensor* output_tensor, struct conv_param* param, int num_thread) { int ret = -1; switch(param->stride_h) { case 1: ret = convdw3x3s1_int8_sse(input_tensor, weight_tensor, bias_tensor, output_tensor, param, num_thread); break; case 2: ret = convdw3x3s2_int8_sse(input_tensor, weight_tensor, bias_tensor, output_tensor, param, num_thread); break; default: TLOG_ERR("Direct Convolution Int8 not support the stride %d\n", param->stride_h); } return ret; } static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct node* ir_node = exec_node->ir_node; struct graph* ir_graph = ir_node->graph; struct tensor* input_tensor; struct tensor* weight_tensor; struct tensor* bias_tensor = NULL; struct tensor* output_tensor = NULL; int num_thread = exec_graph->num_thread; int cpu_affinity = exec_graph->cpu_affinity; /* set the input data and shape again, in case of reshape or dynamic shape */ input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); weight_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[1]); if (ir_node->input_num > 2) bias_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[2]); output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); struct conv_param* conv_param = ( struct conv_param* )ir_node->op.param_mem; struct conv_priv_info* conv_priv_info = ( struct conv_priv_info* )exec_node->ops_priv; int ret = -1; if (exec_graph->mode == TENGINE_MODE_FP32) ret = conv_dw_run(input_tensor, weight_tensor, bias_tensor, output_tensor, conv_priv_info, conv_param, num_thread, cpu_affinity); else if (exec_graph->mode == TENGINE_MODE_INT8) ret = conv_dw_run_int8(input_tensor, weight_tensor, bias_tensor, output_tensor, conv_param, num_thread); else { TLOG_ERR("hcl conv run failed\n"); return -1; } return ret; } static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct node* exec_node) { struct conv_param* param = ( struct conv_param* )exec_node->op.param_mem; struct node* ir_node = exec_node; struct graph* ir_graph = ir_node->graph; struct tensor* input_tensor; struct tensor* output_tensor; int group = param->group; int kernel_h = param->kernel_h; int kernel_w = param->kernel_w; int stride_h = param->stride_h; int stride_w = param->stride_w; int dilation_h = param->dilation_h; int dilation_w = param->dilation_w; int pad_h0 = param->pad_h0; int pad_w0 = param->pad_w0; int pad_h1 = param->pad_h1; int pad_w1 = param->pad_w1; input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); int in_c = input_tensor->dims[1] / group; int out_c = output_tensor->dims[1] / group; /* todo support uint8 */ if (!(input_tensor->data_type == TENGINE_DT_FP32 || input_tensor->data_type == TENGINE_DT_INT8)) return 0; if (kernel_h != kernel_w || input_tensor->dims[0] > 1) return 0; if (param->group > 1 && in_c == 1 && out_c == 1 && pad_h0 == pad_h1 && pad_w0 == pad_w1 && dilation_h == 1 && dilation_w == 1 && kernel_h == 3 && kernel_w == 3 && ((stride_h == 1 && stride_w == 1) || (stride_h == 2 && stride_w == 2))) return OPS_SCORE_BEST; else return 0; } static struct node_ops hcl_node_ops = {.prerun = NULL, .run = run, .reshape = NULL, .postrun = NULL, .init_node = init_node, .release_node = release_node, .score = score}; int register_conv_dw_hcl_x86_op(void* arg) { return register_builtin_node_ops(OP_CONV, &hcl_node_ops); } int unregister_conv_dw_hcl_x86_op(void* arg) { unregister_builtin_node_ops(OP_CONV, &hcl_node_ops); return 0; }
GB_binop__isgt_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isgt_int32) // A.*B function (eWiseMult): GB (_AemultB_08__isgt_int32) // A.*B function (eWiseMult): GB (_AemultB_02__isgt_int32) // A.*B function (eWiseMult): GB (_AemultB_04__isgt_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isgt_int32) // A*D function (colscale): GB (_AxD__isgt_int32) // D*A function (rowscale): GB (_DxB__isgt_int32) // C+=B function (dense accum): GB (_Cdense_accumB__isgt_int32) // C+=b function (dense accum): GB (_Cdense_accumb__isgt_int32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isgt_int32) // C=scalar+B GB (_bind1st__isgt_int32) // C=scalar+B' GB (_bind1st_tran__isgt_int32) // C=A+scalar GB (_bind2nd__isgt_int32) // C=A'+scalar GB (_bind2nd_tran__isgt_int32) // C type: int32_t // A type: int32_t // B,b type: int32_t // BinaryOp: cij = (aij > bij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int32_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int32_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x > y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISGT || GxB_NO_INT32 || GxB_NO_ISGT_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__isgt_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isgt_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isgt_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isgt_int32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isgt_int32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isgt_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__isgt_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isgt_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__isgt_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isgt_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isgt_int32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = GBX (Bx, p, false) ; Cx [p] = (x > bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isgt_int32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = GBX (Ax, p, false) ; Cx [p] = (aij > y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x > aij) ; \ } GrB_Info GB (_bind1st_tran__isgt_int32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij > y) ; \ } GrB_Info GB (_bind2nd_tran__isgt_int32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
sort.h
/* * (C) Copyright 2013 ECMWF. * * This software is licensed under the terms of the Apache Licence Version 2.0 * which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. * In applying this licence, ECMWF does not waive the privileges and immunities * granted to it by virtue of its status as an intergovernmental organisation * nor does it submit to any jurisdiction. */ #pragma once #include <algorithm> #include <functional> #include <iterator> #include "atlas/library/config.h" #include "atlas/parallel/omp/omp.h" #if ATLAS_HAVE_OMP #include <omp.h> #define ATLAS_HAVE_OMP_SORTING 1 #else #define ATLAS_HAVE_OMP_SORTING 0 #endif // Bug in Cray 8.5 or below results in segmentation fault in atlas_test_omp_sort #if ATLAS_HAVE_OMP_SORTING && defined(_CRAYC) #if _RELEASE <= 8 && _RELEASE_MINOR < 6 #undef ATLAS_HAVE_OMP_SORTING #define ATLAS_HAVE_OMP_SORTING 0 #endif #endif namespace atlas { namespace omp { /** * sort * ==== * * 1) template <typename RandomAccessIterator> * void sort ( RandomAccessIterator first, RandomAccessIterator last ); * * 2) template <typename RandomAccessIterator, typename Compare> * void sort ( RandomAccessIterator first, RandomAccessIterator last, Compare comp ); * * Sort elements in range * Sorts the elements in the range [first,last) into ascending order. * * The elements are compared using operator< for the first version, and comp for the second. * * Equivalent elements are not guaranteed to keep their original relative order (see stable_sort). * * Parameters * ---------- * first, last * Random-access iterators to the initial and final positions of the sequence to be sorted. The range used is [first,last), * which contains all the elements between first and last, including the element pointed by first but not the element pointed by last. * RandomAccessIterator shall point to a type for which swap is properly defined and which is both move-constructible and move-assignable. * comp * Binary function that accepts two elements in the range as arguments, and returns a value convertible to bool. * The value returned indicates whether the element passed as first argument is considered to go before the second in the specific strict weak ordering it defines. * The function shall not modify any of its arguments. * This can either be a function pointer or a function object. * * * * merge_blocks * ============ * * 1) template<typename RandomAccessIterator, typename RandomAccessIterator2> * void merge_blocks( RandomAccessIterator first, RandomAccessIterator last, * RandomAccessIterator2 blocks_size_first, RandomAccessIterator2 blocks_size_last ); * * * 1) template<typename RandomAccessIterator, typename RandomAccessIterator2, typename Compare > * void merge_blocks( RandomAccessIterator first, RandomAccessIterator last, * RandomAccessIterator2 blocks_size_first, RandomAccessIterator2 blocks_size_last, * Compare compare ); * * Sort elements in range [first + *blocks_first, first + *blocks_last) using a merge sort * where each block in range [blocks_first,blocks_last) is already sorted. * * Parameters * ---------- * first, last * Random-access iterators for bounding the sequence to be sorted * blocks_begin, blocks_end * Random-access iterators that define offsets from paramter "first" of blocks that are already sorted */ namespace detail { #if ATLAS_HAVE_OMP_SORTING template <typename RandomAccessIterator, typename Compare> void merge_sort_recursive(const RandomAccessIterator& iterator, size_t begin, size_t end, Compare compare) { auto size = end - begin; if (size >= 256) { auto mid = begin + size / 2; //#pragma omp taskgroup // --> it would be preferred to use taskgroup and taskyield instead of taskwait, // but this leads to segfaults on Cray (cce/8.5.8) { #if ATLAS_OMP_TASK_UNTIED_SUPPORTED #pragma omp task shared(iterator) untied if (size >= (1 << 15)) #else #pragma omp task shared(iterator) #endif merge_sort_recursive(iterator, begin, mid, compare); #if ATLAS_OMP_TASK_UNTIED_SUPPORTED #pragma omp task shared(iterator) untied if (size >= (1 << 15)) #else #pragma omp task shared(iterator) #endif merge_sort_recursive(iterator, mid, end, compare); //#pragma omp taskyield #pragma omp taskwait } std::inplace_merge(iterator + begin, iterator + mid, iterator + end, compare); } else { std::sort(iterator + begin, iterator + end, compare); } } #endif #if ATLAS_HAVE_OMP_SORTING template <typename RandomAccessIterator, typename Indexable, typename Compare> void merge_blocks_recursive(const RandomAccessIterator& iterator, const Indexable& blocks, size_t blocks_begin, size_t blocks_end, Compare compare) { if (blocks_end <= blocks_begin + 1) { // recursion done, go back out return; } size_t blocks_mid = (blocks_begin + blocks_end) / 2; //#pragma omp taskgroup // --> it would be preferred to use taskgroup and taskyield instead of taskwait, // but this leads to segfaults on Cray (cce/8.5.8) { #pragma omp task shared(iterator, blocks) merge_blocks_recursive(iterator, blocks, blocks_begin, blocks_mid, compare); #pragma omp task shared(iterator, blocks) merge_blocks_recursive(iterator, blocks, blocks_mid, blocks_end, compare); //#pragma omp taskyield #pragma omp taskwait } auto begin = iterator + blocks[blocks_begin]; auto mid = iterator + blocks[blocks_mid]; auto end = iterator + blocks[blocks_end]; std::inplace_merge(begin, mid, end, compare); } #endif template <typename RandomAccessIterator, typename Indexable, typename Compare> void merge_blocks_recursive_seq(RandomAccessIterator& iterator, const Indexable& blocks, size_t blocks_begin, size_t blocks_end, Compare compare) { if (blocks_end <= blocks_begin + 1) { // recursion done, go back out return; } size_t blocks_mid = (blocks_begin + blocks_end) / 2; { merge_blocks_recursive_seq(iterator, blocks, blocks_begin, blocks_mid, compare); merge_blocks_recursive_seq(iterator, blocks, blocks_mid, blocks_end, compare); } auto begin = iterator + blocks[blocks_begin]; auto mid = iterator + blocks[blocks_mid]; auto end = iterator + blocks[blocks_end]; std::inplace_merge(begin, mid, end, compare); } } // namespace detail template <typename RandomAccessIterator, typename Compare> void sort(RandomAccessIterator first, RandomAccessIterator last, Compare compare) { #if ATLAS_HAVE_OMP_SORTING if (atlas_omp_get_max_threads() > 1) { #pragma omp parallel #pragma omp single detail::merge_sort_recursive(first, 0, std::distance(first, last), compare); } else { std::sort(first, last, compare); } #else std::sort(first, last, compare); #endif } template <typename RandomAccessIterator> void sort(RandomAccessIterator first, RandomAccessIterator last) { using value_type = typename std::iterator_traits<RandomAccessIterator>::value_type; ::atlas::omp::sort(first, last, std::less<value_type>()); } template <typename RandomAccessIterator, typename RandomAccessIterator2, typename Compare> void merge_blocks(RandomAccessIterator first, RandomAccessIterator last, RandomAccessIterator2 blocks_size_first, RandomAccessIterator2 blocks_size_last, Compare compare) { using size_type = typename std::iterator_traits<RandomAccessIterator2>::value_type; size_type nb_blocks = std::distance(blocks_size_first, blocks_size_last); std::vector<size_type> blocks_displs(nb_blocks + 1); blocks_displs[0] = 0; for (size_t i = 1; i < blocks_displs.size(); ++i) { blocks_displs[i] = blocks_displs[i - 1] + blocks_size_first[i - 1]; } #if ATLAS_HAVE_OMP_SORTING if (atlas_omp_get_max_threads() > 1) { #pragma omp parallel #pragma omp single detail::merge_blocks_recursive(first, blocks_displs, 0, nb_blocks, compare); } else { detail::merge_blocks_recursive_seq(first, blocks_displs, 0, nb_blocks, compare); } #else detail::merge_blocks_recursive_seq(first, blocks_displs, 0, nb_blocks, compare); #endif } template <typename RandomAccessIterator, typename RandomAccessIterator2> void merge_blocks(RandomAccessIterator first, RandomAccessIterator last, RandomAccessIterator2 blocks_size_first, RandomAccessIterator2 blocks_size_last) { using value_type = typename std::iterator_traits<RandomAccessIterator>::value_type; ::atlas::omp::merge_blocks(first, last, blocks_size_first, blocks_size_last, std::less<value_type>()); } } // namespace omp } // namespace atlas #undef ATLAS_OMP_TASK_UNTIED_SUPPORTED #undef ATLAS_HAVE_OMP_SORTING
cache.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC AAA CCCC H H EEEEE % % C A A C H H E % % C AAAAA C HHHHH EEE % % C A A C H H E % % CCCC A A CCCC H H EEEEE % % % % % % MagickCore Pixel Cache Methods % % % % Software Design % % Cristy % % July 1999 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/cache-private.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/composite-private.h" #include "magick/distribute-cache-private.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/geometry.h" #include "magick/list.h" #include "magick/log.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/memory-private.h" #include "magick/nt-base-private.h" #include "magick/option.h" #include "magick/pixel.h" #include "magick/pixel-accessor.h" #include "magick/pixel-private.h" #include "magick/policy.h" #include "magick/quantum.h" #include "magick/random_.h" #include "magick/registry.h" #include "magick/resource_.h" #include "magick/semaphore.h" #include "magick/splay-tree.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/utility.h" #include "magick/utility-private.h" #if defined(MAGICKCORE_ZLIB_DELEGATE) #include "zlib.h" #endif /* Define declarations. */ #define CacheTick(offset,extent) QuantumTick((MagickOffsetType) offset,extent) #define IsFileDescriptorLimitExceeded() (GetMagickResource(FileResource) > \ GetMagickResourceLimit(FileResource) ? MagickTrue : MagickFalse) /* Typedef declarations. */ typedef struct _MagickModulo { ssize_t quotient, remainder; } MagickModulo; /* Forward declarations. */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static Cache GetImagePixelCache(Image *,const MagickBooleanType,ExceptionInfo *) magick_hot_spot; static const IndexPacket *GetVirtualIndexesFromCache(const Image *); static const PixelPacket *GetVirtualPixelCache(const Image *,const VirtualPixelMethod,const ssize_t, const ssize_t,const size_t,const size_t,ExceptionInfo *), *GetVirtualPixelsCache(const Image *); static MagickBooleanType GetOneAuthenticPixelFromCache(Image *,const ssize_t,const ssize_t, PixelPacket *,ExceptionInfo *), GetOneVirtualPixelFromCache(const Image *,const VirtualPixelMethod, const ssize_t,const ssize_t,PixelPacket *,ExceptionInfo *), OpenPixelCache(Image *,const MapMode,ExceptionInfo *), OpenPixelCacheOnDisk(CacheInfo *,const MapMode), ReadPixelCacheIndexes(CacheInfo *magick_restrict,NexusInfo *magick_restrict, ExceptionInfo *), ReadPixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict, ExceptionInfo *), SyncAuthenticPixelsCache(Image *,ExceptionInfo *), WritePixelCacheIndexes(CacheInfo *,NexusInfo *magick_restrict, ExceptionInfo *), WritePixelCachePixels(CacheInfo *,NexusInfo *magick_restrict, ExceptionInfo *); static PixelPacket *GetAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t, const size_t,ExceptionInfo *), *QueueAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t, const size_t,ExceptionInfo *), *SetPixelCacheNexusPixels(const CacheInfo *,const MapMode, const RectangleInfo *,const MagickBooleanType,NexusInfo *,ExceptionInfo *) magick_hot_spot; #if defined(MAGICKCORE_OPENCL_SUPPORT) static void CopyOpenCLBuffer(CacheInfo *magick_restrict); #endif #if defined(__cplusplus) || defined(c_plusplus) } #endif /* Global declarations. */ static SemaphoreInfo *cache_semaphore = (SemaphoreInfo *) NULL; static ssize_t cache_anonymous_memory = (-1); static time_t cache_epoch = 0; #if defined(MAGICKCORE_OPENCL_SUPPORT) static inline OpenCLCacheInfo *RelinquishOpenCLCacheInfo(MagickCLEnv clEnv, OpenCLCacheInfo *info) { ssize_t i; for (i=0; i < (ssize_t) info->event_count; i++) clEnv->library->clReleaseEvent(info->events[i]); info->events=(cl_event *) RelinquishMagickMemory(info->events); DestroySemaphoreInfo(&info->events_semaphore); if (info->buffer != (cl_mem) NULL) { clEnv->library->clReleaseMemObject(info->buffer); info->buffer=(cl_mem) NULL; } return((OpenCLCacheInfo *) RelinquishMagickMemory(info)); } static void CL_API_CALL RelinquishPixelCachePixelsDelayed( cl_event magick_unused(event),cl_int magick_unused(event_command_exec_status), void *user_data) { MagickCLEnv clEnv; OpenCLCacheInfo *info; PixelPacket *pixels; ssize_t i; magick_unreferenced(event); magick_unreferenced(event_command_exec_status); info=(OpenCLCacheInfo *) user_data; clEnv=GetDefaultOpenCLEnv(); for (i=(ssize_t)info->event_count-1; i >= 0; i--) { cl_int event_status; cl_uint status; status=clEnv->library->clGetEventInfo(info->events[i], CL_EVENT_COMMAND_EXECUTION_STATUS,sizeof(cl_int),&event_status,NULL); if ((status == CL_SUCCESS) && (event_status > CL_COMPLETE)) { clEnv->library->clSetEventCallback(info->events[i],CL_COMPLETE, &RelinquishPixelCachePixelsDelayed,info); return; } } pixels=info->pixels; RelinquishMagickResource(MemoryResource,info->length); (void) RelinquishOpenCLCacheInfo(clEnv,info); (void) RelinquishAlignedMemory(pixels); } static MagickBooleanType RelinquishOpenCLBuffer( CacheInfo *magick_restrict cache_info) { MagickCLEnv clEnv; assert(cache_info != (CacheInfo *) NULL); if (cache_info->opencl == (OpenCLCacheInfo *) NULL) return(MagickFalse); RelinquishPixelCachePixelsDelayed((cl_event) NULL,0,cache_info->opencl); return(MagickTrue); } static cl_event *CopyOpenCLEvents(OpenCLCacheInfo *opencl_info, cl_uint *event_count) { cl_event *events; register size_t i; assert(opencl_info != (OpenCLCacheInfo *) NULL); events=(cl_event *) NULL; LockSemaphoreInfo(opencl_info->events_semaphore); *event_count=opencl_info->event_count; if (*event_count > 0) { events=AcquireQuantumMemory(*event_count,sizeof(*events)); if (events == (cl_event *) NULL) *event_count=0; else { for (i=0; i < opencl_info->event_count; i++) events[i]=opencl_info->events[i]; } } UnlockSemaphoreInfo(opencl_info->events_semaphore); return(events); } #endif #if defined(MAGICKCORE_OPENCL_SUPPORT) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A d d O p e n C L E v e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AddOpenCLEvent() adds an event to the list of operations the next operation % should wait for. % % The format of the AddOpenCLEvent() method is: % % void AddOpenCLEvent(const Image *image,cl_event event) % % A description of each parameter follows: % % o image: the image. % % o event: the event that should be added. % */ extern MagickPrivate void AddOpenCLEvent(const Image *image,cl_event event) { CacheInfo *magick_restrict cache_info; MagickCLEnv clEnv; assert(image != (const Image *) NULL); assert(event != (cl_event) NULL); cache_info=(CacheInfo *)image->cache; assert(cache_info->opencl != (OpenCLCacheInfo *) NULL); clEnv=GetDefaultOpenCLEnv(); if (clEnv->library->clRetainEvent(event) != CL_SUCCESS) { clEnv->library->clWaitForEvents(1,&event); return; } LockSemaphoreInfo(cache_info->opencl->events_semaphore); if (cache_info->opencl->events == (cl_event *) NULL) { cache_info->opencl->events=AcquireMagickMemory(sizeof( *cache_info->opencl->events)); cache_info->opencl->event_count=1; } else cache_info->opencl->events=ResizeQuantumMemory(cache_info->opencl->events, ++cache_info->opencl->event_count,sizeof(*cache_info->opencl->events)); if (cache_info->opencl->events == (cl_event *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); cache_info->opencl->events[cache_info->opencl->event_count-1]=event; UnlockSemaphoreInfo(cache_info->opencl->events_semaphore); } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixelCache() acquires a pixel cache. % % The format of the AcquirePixelCache() method is: % % Cache AcquirePixelCache(const size_t number_threads) % % A description of each parameter follows: % % o number_threads: the number of nexus threads. % */ MagickExport Cache AcquirePixelCache(const size_t number_threads) { CacheInfo *magick_restrict cache_info; char *value; cache_info=(CacheInfo *) AcquireCriticalMemory(sizeof(*cache_info)); (void) memset(cache_info,0,sizeof(*cache_info)); cache_info->type=UndefinedCache; cache_info->mode=IOMode; cache_info->disk_mode=IOMode; cache_info->colorspace=sRGBColorspace; cache_info->channels=4; cache_info->file=(-1); cache_info->id=GetMagickThreadId(); cache_info->number_threads=number_threads; if (GetOpenMPMaximumThreads() > cache_info->number_threads) cache_info->number_threads=GetOpenMPMaximumThreads(); if (GetMagickResourceLimit(ThreadResource) > cache_info->number_threads) cache_info->number_threads=(size_t) GetMagickResourceLimit(ThreadResource); if (cache_info->number_threads == 0) cache_info->number_threads=1; cache_info->nexus_info=AcquirePixelCacheNexus(cache_info->number_threads); value=GetEnvironmentValue("MAGICK_SYNCHRONIZE"); if (value != (const char *) NULL) { cache_info->synchronize=IsStringTrue(value); value=DestroyString(value); } value=GetPolicyValue("cache:synchronize"); if (value != (const char *) NULL) { cache_info->synchronize=IsStringTrue(value); value=DestroyString(value); } cache_info->semaphore=AllocateSemaphoreInfo(); cache_info->reference_count=1; cache_info->file_semaphore=AllocateSemaphoreInfo(); cache_info->debug=IsEventLogging(); cache_info->signature=MagickCoreSignature; return((Cache ) cache_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixelCacheNexus() allocates the NexusInfo structure. % % The format of the AcquirePixelCacheNexus method is: % % NexusInfo **AcquirePixelCacheNexus(const size_t number_threads) % % A description of each parameter follows: % % o number_threads: the number of nexus threads. % */ MagickExport NexusInfo **AcquirePixelCacheNexus(const size_t number_threads) { NexusInfo **magick_restrict nexus_info; register ssize_t i; nexus_info=(NexusInfo **) MagickAssumeAligned(AcquireAlignedMemory( number_threads,sizeof(*nexus_info))); if (nexus_info == (NexusInfo **) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); nexus_info[0]=(NexusInfo *) AcquireQuantumMemory(number_threads, sizeof(**nexus_info)); if (nexus_info[0] == (NexusInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) memset(nexus_info[0],0,number_threads*sizeof(**nexus_info)); for (i=0; i < (ssize_t) number_threads; i++) { nexus_info[i]=(&nexus_info[0][i]); nexus_info[i]->signature=MagickCoreSignature; } return(nexus_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e P i x e l C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixelCachePixels() returns the pixels associated with the specified % image. % % The format of the AcquirePixelCachePixels() method is: % % const void *AcquirePixelCachePixels(const Image *image, % MagickSizeType *length,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o length: the pixel cache length. % % o exception: return any errors or warnings in this structure. % */ MagickExport const void *AcquirePixelCachePixels(const Image *image, MagickSizeType *length,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); (void) exception; *length=0; if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache)) return((const void *) NULL); *length=cache_info->length; return((const void *) cache_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C a c h e C o m p o n e n t G e n e s i s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CacheComponentGenesis() instantiates the cache component. % % The format of the CacheComponentGenesis method is: % % MagickBooleanType CacheComponentGenesis(void) % */ MagickExport MagickBooleanType CacheComponentGenesis(void) { if (cache_semaphore == (SemaphoreInfo *) NULL) cache_semaphore=AllocateSemaphoreInfo(); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C a c h e C o m p o n e n t T e r m i n u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CacheComponentTerminus() destroys the cache component. % % The format of the CacheComponentTerminus() method is: % % CacheComponentTerminus(void) % */ MagickExport void CacheComponentTerminus(void) { if (cache_semaphore == (SemaphoreInfo *) NULL) ActivateSemaphoreInfo(&cache_semaphore); /* no op-- nothing to destroy */ DestroySemaphoreInfo(&cache_semaphore); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l i p P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClipPixelCacheNexus() clips the cache nexus as defined by the image clip % mask. The method returns MagickTrue if the pixel region is clipped, % otherwise MagickFalse. % % The format of the ClipPixelCacheNexus() method is: % % MagickBooleanType ClipPixelCacheNexus(Image *image,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o nexus_info: the cache nexus to clip. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType ClipPixelCacheNexus(Image *image, NexusInfo *nexus_info,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickSizeType number_pixels; NexusInfo **magick_restrict clip_nexus, **magick_restrict image_nexus; register const PixelPacket *magick_restrict r; register IndexPacket *magick_restrict nexus_indexes, *magick_restrict indexes; register PixelPacket *magick_restrict p, *magick_restrict q; register ssize_t i; /* Apply clip mask. */ if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->clip_mask == (Image *) NULL) || (image->storage_class == PseudoClass)) return(MagickTrue); cache_info=(CacheInfo *) image->cache; if (cache_info == (Cache) NULL) return(MagickFalse); image_nexus=AcquirePixelCacheNexus(1); clip_nexus=AcquirePixelCacheNexus(1); p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y, nexus_info->region.width,nexus_info->region.height,image_nexus[0], exception); indexes=image_nexus[0]->indexes; q=nexus_info->pixels; nexus_indexes=nexus_info->indexes; r=GetVirtualPixelCacheNexus(image->clip_mask,MaskVirtualPixelMethod, nexus_info->region.x,nexus_info->region.y,nexus_info->region.width, nexus_info->region.height,clip_nexus[0],exception); number_pixels=(MagickSizeType) nexus_info->region.width* nexus_info->region.height; for (i=0; i < (ssize_t) number_pixels; i++) { double mask_alpha; if ((p == (PixelPacket *) NULL) || (r == (const PixelPacket *) NULL)) break; mask_alpha=QuantumScale*GetPixelIntensity(image,r); if (fabs(mask_alpha) >= MagickEpsilon) { SetPixelRed(q,mask_alpha*MagickOver_((MagickRealType) p->red, (MagickRealType) GetPixelOpacity(p),(MagickRealType) q->red, (MagickRealType) GetPixelOpacity(q))); SetPixelGreen(q,mask_alpha*MagickOver_((MagickRealType) p->green, (MagickRealType) GetPixelOpacity(p),(MagickRealType) q->green, (MagickRealType) GetPixelOpacity(q))); SetPixelBlue(q,mask_alpha*MagickOver_((MagickRealType) p->blue, (MagickRealType) GetPixelOpacity(p),(MagickRealType) q->blue, (MagickRealType) GetPixelOpacity(q))); SetPixelOpacity(q,GetPixelOpacity(p)); if (cache_info->active_index_channel != MagickFalse) SetPixelIndex(nexus_indexes+i,GetPixelIndex(indexes+i)); } p++; q++; r++; } clip_nexus=DestroyPixelCacheNexus(clip_nexus,1); image_nexus=DestroyPixelCacheNexus(image_nexus,1); if (i < (ssize_t) number_pixels) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o n e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelCache() clones a pixel cache. % % The format of the ClonePixelCache() method is: % % Cache ClonePixelCache(const Cache cache) % % A description of each parameter follows: % % o cache: the pixel cache. % */ MagickExport Cache ClonePixelCache(const Cache cache) { CacheInfo *magick_restrict clone_info; const CacheInfo *magick_restrict cache_info; assert(cache != NULL); cache_info=(const CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); clone_info=(CacheInfo *) AcquirePixelCache(cache_info->number_threads); clone_info->virtual_pixel_method=cache_info->virtual_pixel_method; return((Cache ) clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o n e P i x e l C a c h e M e t h o d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelCacheMethods() clones the pixel cache methods from one cache to % another. % % The format of the ClonePixelCacheMethods() method is: % % void ClonePixelCacheMethods(Cache clone,const Cache cache) % % A description of each parameter follows: % % o clone: Specifies a pointer to a Cache structure. % % o cache: the pixel cache. % */ MagickExport void ClonePixelCacheMethods(Cache clone,const Cache cache) { CacheInfo *magick_restrict cache_info, *magick_restrict source_info; assert(clone != (Cache) NULL); source_info=(CacheInfo *) clone; assert(source_info->signature == MagickCoreSignature); if (source_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", source_info->filename); assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); source_info->methods=cache_info->methods; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o n e P i x e l C a c h e R e p o s i t o r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelCacheRepository() clones the source pixel cache to the destination % cache. % % The format of the ClonePixelCacheRepository() method is: % % MagickBooleanType ClonePixelCacheRepository(CacheInfo *cache_info, % CacheInfo *source_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o source_info: the source pixel cache. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType ClonePixelCacheOnDisk( CacheInfo *magick_restrict cache_info,CacheInfo *magick_restrict clone_info, ExceptionInfo *exception) { MagickSizeType extent; size_t quantum; ssize_t count; struct stat file_stats; unsigned char *buffer; /* Clone pixel cache on disk with identical morphology. */ if ((OpenPixelCacheOnDisk(cache_info,ReadMode) == MagickFalse) || (OpenPixelCacheOnDisk(clone_info,IOMode) == MagickFalse)) return(MagickFalse); if ((lseek(cache_info->file,0,SEEK_SET) < 0) || (lseek(clone_info->file,0,SEEK_SET) < 0)) return(MagickFalse); quantum=(size_t) MagickMaxBufferExtent; if ((fstat(cache_info->file,&file_stats) == 0) && (file_stats.st_size > 0)) quantum=(size_t) MagickMin(file_stats.st_size,MagickMaxBufferExtent); buffer=(unsigned char *) AcquireQuantumMemory(quantum,sizeof(*buffer)); if (buffer == (unsigned char *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); extent=0; while ((count=read(cache_info->file,buffer,quantum)) > 0) { ssize_t number_bytes; number_bytes=write(clone_info->file,buffer,(size_t) count); if (number_bytes != count) break; extent+=number_bytes; } buffer=(unsigned char *) RelinquishMagickMemory(buffer); if (extent != cache_info->length) return(MagickFalse); return(MagickTrue); } static MagickBooleanType ClonePixelCacheRepository( CacheInfo *magick_restrict clone_info,CacheInfo *magick_restrict cache_info, ExceptionInfo *exception) { #define MaxCacheThreads ((size_t) GetMagickResourceLimit(ThreadResource)) #define cache_number_threads(source,destination,chunk,multithreaded) \ num_threads((multithreaded) == 0 ? 1 : \ (((source)->type != MemoryCache) && ((source)->type != MapCache)) || \ (((destination)->type != MemoryCache) && ((destination)->type != MapCache)) ? \ MagickMax(MagickMin(GetMagickResourceLimit(ThreadResource),2),1) : \ MagickMax(MagickMin((ssize_t) GetMagickResourceLimit(ThreadResource),(ssize_t) (chunk)/256),1)) MagickBooleanType status; NexusInfo **magick_restrict cache_nexus, **magick_restrict clone_nexus; size_t length; ssize_t y; assert(cache_info != (CacheInfo *) NULL); assert(clone_info != (CacheInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); if (cache_info->type == PingCache) return(MagickTrue); if ((cache_info->storage_class == clone_info->storage_class) && (cache_info->colorspace == clone_info->colorspace) && (cache_info->channels == clone_info->channels) && (cache_info->columns == clone_info->columns) && (cache_info->rows == clone_info->rows) && (cache_info->active_index_channel == clone_info->active_index_channel)) { /* Identical pixel cache morphology. */ if (((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) && ((clone_info->type == MemoryCache) || (clone_info->type == MapCache))) { (void) memcpy(clone_info->pixels,cache_info->pixels, cache_info->columns*cache_info->rows*sizeof(*cache_info->pixels)); if ((cache_info->active_index_channel != MagickFalse) && (clone_info->active_index_channel != MagickFalse)) (void) memcpy(clone_info->indexes,cache_info->indexes, cache_info->columns*cache_info->rows* sizeof(*cache_info->indexes)); return(MagickTrue); } if ((cache_info->type == DiskCache) && (clone_info->type == DiskCache)) return(ClonePixelCacheOnDisk(cache_info,clone_info,exception)); } /* Mismatched pixel cache morphology. */ cache_nexus=AcquirePixelCacheNexus(MaxCacheThreads); clone_nexus=AcquirePixelCacheNexus(MaxCacheThreads); length=(size_t) MagickMin(cache_info->columns,clone_info->columns)* sizeof(*cache_info->pixels); status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ cache_number_threads(cache_info,clone_info,cache_info->rows,1) #endif for (y=0; y < (ssize_t) cache_info->rows; y++) { const int id = GetOpenMPThreadId(); PixelPacket *pixels; RectangleInfo region; if (status == MagickFalse) continue; if (y >= (ssize_t) clone_info->rows) continue; region.width=cache_info->columns; region.height=1; region.x=0; region.y=y; pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,&region,MagickFalse, cache_nexus[id],exception); if (pixels == (PixelPacket *) NULL) continue; status=ReadPixelCachePixels(cache_info,cache_nexus[id],exception); if (status == MagickFalse) continue; region.width=clone_info->columns; pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,&region,MagickFalse, clone_nexus[id],exception); if (pixels == (PixelPacket *) NULL) continue; (void) memset(clone_nexus[id]->pixels,0,(size_t) clone_nexus[id]->length); (void) memcpy(clone_nexus[id]->pixels,cache_nexus[id]->pixels,length); status=WritePixelCachePixels(clone_info,clone_nexus[id],exception); } if ((cache_info->active_index_channel != MagickFalse) && (clone_info->active_index_channel != MagickFalse)) { /* Clone indexes. */ length=(size_t) MagickMin(cache_info->columns,clone_info->columns)* sizeof(*cache_info->indexes); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ cache_number_threads(cache_info,clone_info,cache_info->rows,1) #endif for (y=0; y < (ssize_t) cache_info->rows; y++) { const int id = GetOpenMPThreadId(); PixelPacket *pixels; RectangleInfo region; if (status == MagickFalse) continue; if (y >= (ssize_t) clone_info->rows) continue; region.width=cache_info->columns; region.height=1; region.x=0; region.y=y; pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,&region,MagickFalse, cache_nexus[id],exception); if (pixels == (PixelPacket *) NULL) continue; status=ReadPixelCacheIndexes(cache_info,cache_nexus[id],exception); if (status == MagickFalse) continue; region.width=clone_info->columns; pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,&region, MagickFalse,clone_nexus[id],exception); if (pixels == (PixelPacket *) NULL) continue; (void) memcpy(clone_nexus[id]->indexes,cache_nexus[id]->indexes,length); status=WritePixelCacheIndexes(clone_info,clone_nexus[id],exception); } } cache_nexus=DestroyPixelCacheNexus(cache_nexus,MaxCacheThreads); clone_nexus=DestroyPixelCacheNexus(clone_nexus,MaxCacheThreads); if (cache_info->debug != MagickFalse) { char message[MaxTextExtent]; (void) FormatLocaleString(message,MaxTextExtent,"%s => %s", CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type), CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) clone_info->type)); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y I m a g e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImagePixelCache() deallocates memory associated with the pixel cache. % % The format of the DestroyImagePixelCache() method is: % % void DestroyImagePixelCache(Image *image) % % A description of each parameter follows: % % o image: the image. % */ static void DestroyImagePixelCache(Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->cache != (void *) NULL) image->cache=DestroyPixelCache(image->cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImagePixels() deallocates memory associated with the pixel cache. % % The format of the DestroyImagePixels() method is: % % void DestroyImagePixels(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void DestroyImagePixels(Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.destroy_pixel_handler != (DestroyPixelHandler) NULL) { cache_info->methods.destroy_pixel_handler(image); return; } image->cache=DestroyPixelCache(image->cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPixelCache() deallocates memory associated with the pixel cache. % % The format of the DestroyPixelCache() method is: % % Cache DestroyPixelCache(Cache cache) % % A description of each parameter follows: % % o cache: the pixel cache. % */ static MagickBooleanType ClosePixelCacheOnDisk(CacheInfo *cache_info) { int status; status=(-1); if (cache_info->file != -1) { status=close(cache_info->file); cache_info->file=(-1); RelinquishMagickResource(FileResource,1); } return(status == -1 ? MagickFalse : MagickTrue); } static inline void RelinquishPixelCachePixels(CacheInfo *cache_info) { switch (cache_info->type) { case MemoryCache: { #if defined(MAGICKCORE_OPENCL_SUPPORT) if (RelinquishOpenCLBuffer(cache_info) != MagickFalse) { cache_info->pixels=(PixelPacket *) NULL; break; } #endif if (cache_info->mapped == MagickFalse) cache_info->pixels=(PixelPacket *) RelinquishAlignedMemory( cache_info->pixels); else (void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length); RelinquishMagickResource(MemoryResource,cache_info->length); break; } case MapCache: { (void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length); cache_info->pixels=(PixelPacket *) NULL; if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode)) (void) RelinquishUniqueFileResource(cache_info->cache_filename); *cache_info->cache_filename='\0'; RelinquishMagickResource(MapResource,cache_info->length); } case DiskCache: { if (cache_info->file != -1) (void) ClosePixelCacheOnDisk(cache_info); if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode)) (void) RelinquishUniqueFileResource(cache_info->cache_filename); *cache_info->cache_filename='\0'; RelinquishMagickResource(DiskResource,cache_info->length); break; } case DistributedCache: { *cache_info->cache_filename='\0'; (void) RelinquishDistributePixelCache((DistributeCacheInfo *) cache_info->server_info); break; } default: break; } cache_info->type=UndefinedCache; cache_info->mapped=MagickFalse; cache_info->indexes=(IndexPacket *) NULL; } MagickExport Cache DestroyPixelCache(Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); LockSemaphoreInfo(cache_info->semaphore); cache_info->reference_count--; if (cache_info->reference_count != 0) { UnlockSemaphoreInfo(cache_info->semaphore); return((Cache) NULL); } UnlockSemaphoreInfo(cache_info->semaphore); if (cache_info->debug != MagickFalse) { char message[MaxTextExtent]; (void) FormatLocaleString(message,MaxTextExtent,"destroy %s", cache_info->filename); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } RelinquishPixelCachePixels(cache_info); if (cache_info->server_info != (DistributeCacheInfo *) NULL) cache_info->server_info=DestroyDistributeCacheInfo((DistributeCacheInfo *) cache_info->server_info); if (cache_info->nexus_info != (NexusInfo **) NULL) cache_info->nexus_info=DestroyPixelCacheNexus(cache_info->nexus_info, cache_info->number_threads); if (cache_info->random_info != (RandomInfo *) NULL) cache_info->random_info=DestroyRandomInfo(cache_info->random_info); if (cache_info->file_semaphore != (SemaphoreInfo *) NULL) DestroySemaphoreInfo(&cache_info->file_semaphore); if (cache_info->semaphore != (SemaphoreInfo *) NULL) DestroySemaphoreInfo(&cache_info->semaphore); cache_info->signature=(~MagickCoreSignature); cache_info=(CacheInfo *) RelinquishMagickMemory(cache_info); cache=(Cache) NULL; return(cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPixelCacheNexus() destroys a pixel cache nexus. % % The format of the DestroyPixelCacheNexus() method is: % % NexusInfo **DestroyPixelCacheNexus(NexusInfo *nexus_info, % const size_t number_threads) % % A description of each parameter follows: % % o nexus_info: the nexus to destroy. % % o number_threads: the number of nexus threads. % */ static inline void RelinquishCacheNexusPixels(NexusInfo *nexus_info) { if (nexus_info->mapped == MagickFalse) (void) RelinquishAlignedMemory(nexus_info->cache); else (void) UnmapBlob(nexus_info->cache,(size_t) nexus_info->length); nexus_info->cache=(PixelPacket *) NULL; nexus_info->pixels=(PixelPacket *) NULL; nexus_info->indexes=(IndexPacket *) NULL; nexus_info->length=0; nexus_info->mapped=MagickFalse; } MagickExport NexusInfo **DestroyPixelCacheNexus(NexusInfo **nexus_info, const size_t number_threads) { register ssize_t i; assert(nexus_info != (NexusInfo **) NULL); for (i=0; i < (ssize_t) number_threads; i++) { if (nexus_info[i]->cache != (PixelPacket *) NULL) RelinquishCacheNexusPixels(nexus_info[i]); nexus_info[i]->signature=(~MagickCoreSignature); } nexus_info[0]=(NexusInfo *) RelinquishMagickMemory(nexus_info[0]); nexus_info=(NexusInfo **) RelinquishAlignedMemory(nexus_info); return(nexus_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c I n d e x e s F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticIndexesFromCache() returns the indexes associated with the last % call to QueueAuthenticPixelsCache() or GetAuthenticPixelsCache(). % % The format of the GetAuthenticIndexesFromCache() method is: % % IndexPacket *GetAuthenticIndexesFromCache(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static IndexPacket *GetAuthenticIndexesFromCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->indexes); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A u t h e n t i c I n d e x Q u e u e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticIndexQueue() returns the authentic black channel or the colormap % indexes associated with the last call to QueueAuthenticPixels() or % GetVirtualPixels(). NULL is returned if the black channel or colormap % indexes are not available. % % The format of the GetAuthenticIndexQueue() method is: % % IndexPacket *GetAuthenticIndexQueue(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport IndexPacket *GetAuthenticIndexQueue(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_authentic_indexes_from_handler != (GetAuthenticIndexesFromHandler) NULL) return(cache_info->methods.get_authentic_indexes_from_handler(image)); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->indexes); } #if defined(MAGICKCORE_OPENCL_SUPPORT) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c O p e n C L B u f f e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticOpenCLBuffer() returns an OpenCL buffer used to execute OpenCL % operations. % % The format of the GetAuthenticOpenCLBuffer() method is: % % cl_mem GetAuthenticOpenCLBuffer(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickPrivate cl_mem GetAuthenticOpenCLBuffer(const Image *image, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; cl_context context; cl_int status; MagickCLEnv clEnv; assert(image != (const Image *) NULL); cache_info=(CacheInfo *)image->cache; if ((cache_info->type == UndefinedCache) || (cache_info->reference_count > 1)) { SyncImagePixelCache((Image *) image,exception); cache_info=(CacheInfo *)image->cache; } if ((cache_info->type != MemoryCache) || (cache_info->mapped != MagickFalse)) return((cl_mem) NULL); LockSemaphoreInfo(cache_info->semaphore); clEnv=GetDefaultOpenCLEnv(); if (cache_info->opencl == (OpenCLCacheInfo *) NULL) { assert(cache_info->pixels != NULL); context=GetOpenCLContext(clEnv); cache_info->opencl=(OpenCLCacheInfo *) AcquireCriticalMemory( sizeof(*cache_info->opencl)); (void) memset(cache_info->opencl,0,sizeof(*cache_info->opencl)); cache_info->opencl->events_semaphore=AllocateSemaphoreInfo(); cache_info->opencl->length=cache_info->length; cache_info->opencl->pixels=cache_info->pixels; cache_info->opencl->buffer=clEnv->library->clCreateBuffer(context, CL_MEM_USE_HOST_PTR,cache_info->length,cache_info->pixels,&status); if (status != CL_SUCCESS) cache_info->opencl=RelinquishOpenCLCacheInfo(clEnv,cache_info->opencl); } if (cache_info->opencl != (OpenCLCacheInfo *) NULL) clEnv->library->clRetainMemObject(cache_info->opencl->buffer); UnlockSemaphoreInfo(cache_info->semaphore); if (cache_info->opencl == (OpenCLCacheInfo *) NULL) return((cl_mem) NULL); return(cache_info->opencl->buffer); } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelCacheNexus() gets authentic pixels from the in-memory or % disk pixel cache as defined by the geometry parameters. A pointer to the % pixels is returned if the pixels are transferred, otherwise a NULL is % returned. % % The format of the GetAuthenticPixelCacheNexus() method is: % % PixelPacket *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o nexus_info: the cache nexus to return. % % o exception: return any errors or warnings in this structure. % */ MagickExport PixelPacket *GetAuthenticPixelCacheNexus(Image *image, const ssize_t x,const ssize_t y,const size_t columns,const size_t rows, NexusInfo *nexus_info,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; PixelPacket *magick_restrict pixels; /* Transfer pixels from the cache. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickTrue, nexus_info,exception); if (pixels == (PixelPacket *) NULL) return((PixelPacket *) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (nexus_info->authentic_pixel_cache != MagickFalse) return(pixels); if (ReadPixelCachePixels(cache_info,nexus_info,exception) == MagickFalse) return((PixelPacket *) NULL); if (cache_info->active_index_channel != MagickFalse) if (ReadPixelCacheIndexes(cache_info,nexus_info,exception) == MagickFalse) return((PixelPacket *) NULL); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c P i x e l s F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelsFromCache() returns the pixels associated with the last % call to the QueueAuthenticPixelsCache() or GetAuthenticPixelsCache() methods. % % The format of the GetAuthenticPixelsFromCache() method is: % % PixelPacket *GetAuthenticPixelsFromCache(const Image image) % % A description of each parameter follows: % % o image: the image. % */ static PixelPacket *GetAuthenticPixelsFromCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A u t h e n t i c P i x e l Q u e u e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelQueue() returns the authentic pixels associated with the % last call to QueueAuthenticPixels() or GetAuthenticPixels(). % % The format of the GetAuthenticPixelQueue() method is: % % PixelPacket *GetAuthenticPixelQueue(const Image image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport PixelPacket *GetAuthenticPixelQueue(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_authentic_pixels_from_handler != (GetAuthenticPixelsFromHandler) NULL) return(cache_info->methods.get_authentic_pixels_from_handler(image)); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixels() obtains a pixel region for read/write access. If the % region is successfully accessed, a pointer to a PixelPacket array % representing the region is returned, otherwise NULL is returned. % % The returned pointer may point to a temporary working copy of the pixels % or it may point to the original pixels in memory. Performance is maximized % if the selected region is part of one row, or one or more full rows, since % then there is opportunity to access the pixels in-place (without a copy) % if the image is in memory, or in a memory-mapped file. The returned pointer % must *never* be deallocated by the user. % % Pixels accessed via the returned pointer represent a simple array of type % PixelPacket. If the image type is CMYK or if the storage class is % PseduoClass, call GetAuthenticIndexQueue() after invoking % GetAuthenticPixels() to obtain the black color component or colormap indexes % (of type IndexPacket) corresponding to the region. Once the PixelPacket % (and/or IndexPacket) array has been updated, the changes must be saved back % to the underlying image using SyncAuthenticPixels() or they may be lost. % % The format of the GetAuthenticPixels() method is: % % PixelPacket *GetAuthenticPixels(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport PixelPacket *GetAuthenticPixels(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_authentic_pixels_handler != (GetAuthenticPixelsHandler) NULL) return(cache_info->methods.get_authentic_pixels_handler(image,x,y,columns, rows,exception)); assert(id < (int) cache_info->number_threads); return(GetAuthenticPixelCacheNexus(image,x,y,columns,rows, cache_info->nexus_info[id],exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c P i x e l s C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelsCache() gets pixels from the in-memory or disk pixel cache % as defined by the geometry parameters. A pointer to the pixels is returned % if the pixels are transferred, otherwise a NULL is returned. % % The format of the GetAuthenticPixelsCache() method is: % % PixelPacket *GetAuthenticPixelsCache(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ static PixelPacket *GetAuthenticPixelsCache(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; if (cache_info == (Cache) NULL) return((PixelPacket *) NULL); assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(GetAuthenticPixelCacheNexus(image,x,y,columns,rows, cache_info->nexus_info[id],exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageExtent() returns the extent of the pixels associated with the % last call to QueueAuthenticPixels() or GetAuthenticPixels(). % % The format of the GetImageExtent() method is: % % MagickSizeType GetImageExtent(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickSizeType GetImageExtent(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(GetPixelCacheNexusExtent(cache_info,cache_info->nexus_info[id])); } #if defined(MAGICKCORE_OPENCL_SUPPORT) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t O p e n C L E v e n t s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOpenCLEvents() returns the events that the next operation should wait % for. The argument event_count is set to the number of events. % % The format of the GetOpenCLEvents() method is: % % const cl_event *GetOpenCLEvents(const Image *image, % cl_command_queue queue) % % A description of each parameter follows: % % o image: the image. % % o event_count: will be set to the number of events. % */ extern MagickPrivate cl_event *GetOpenCLEvents(const Image *image, cl_uint *event_count) { CacheInfo *magick_restrict cache_info; cl_event *events; assert(image != (const Image *) NULL); assert(event_count != (cl_uint *) NULL); cache_info=(CacheInfo *) image->cache; *event_count=0; events=(cl_event *) NULL; if (cache_info->opencl != (OpenCLCacheInfo *) NULL) events=CopyOpenCLEvents(cache_info->opencl,event_count); return(events); } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImagePixelCache() ensures that there is only a single reference to the % pixel cache to be modified, updating the provided cache pointer to point to % a clone of the original pixel cache if necessary. % % The format of the GetImagePixelCache method is: % % Cache GetImagePixelCache(Image *image,const MagickBooleanType clone, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o clone: any value other than MagickFalse clones the cache pixels. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType ValidatePixelCacheMorphology( const Image *magick_restrict image) { CacheInfo *magick_restrict cache_info; /* Does the image match the pixel cache morphology? */ cache_info=(CacheInfo *) image->cache; if ((image->storage_class != cache_info->storage_class) || (image->colorspace != cache_info->colorspace) || (image->channels != cache_info->channels) || (image->columns != cache_info->columns) || (image->rows != cache_info->rows) || (cache_info->nexus_info == (NexusInfo **) NULL)) return(MagickFalse); return(MagickTrue); } static Cache GetImagePixelCache(Image *image,const MagickBooleanType clone, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickBooleanType destroy, status; static MagickSizeType cache_timelimit = MagickResourceInfinity, cpu_throttle = MagickResourceInfinity, cycles = 0; status=MagickTrue; if (cpu_throttle == MagickResourceInfinity) cpu_throttle=GetMagickResourceLimit(ThrottleResource); if ((cpu_throttle != 0) && ((cycles++ % 32) == 0)) MagickDelay(cpu_throttle); if (cache_epoch == 0) { /* Set the expire time in seconds. */ cache_epoch=time((time_t *) NULL); cache_timelimit=GetMagickResourceLimit(TimeResource); } if ((cache_timelimit != MagickResourceInfinity) && ((MagickSizeType) (time((time_t *) NULL)-cache_epoch) >= cache_timelimit)) { #if defined(ECANCELED) errno=ECANCELED; #endif cache_info=(CacheInfo *) image->cache; if (cache_info->file != -1) (void) ClosePixelCacheOnDisk(cache_info); ThrowFatalException(ResourceLimitFatalError,"TimeLimitExceeded"); } LockSemaphoreInfo(image->semaphore); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; #if defined(MAGICKCORE_OPENCL_SUPPORT) CopyOpenCLBuffer(cache_info); #endif destroy=MagickFalse; if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode)) { LockSemaphoreInfo(cache_info->semaphore); if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode)) { CacheInfo *clone_info; Image clone_image; /* Clone pixel cache. */ clone_image=(*image); clone_image.semaphore=AllocateSemaphoreInfo(); clone_image.reference_count=1; clone_image.cache=ClonePixelCache(cache_info); clone_info=(CacheInfo *) clone_image.cache; status=OpenPixelCache(&clone_image,IOMode,exception); if (status == MagickFalse) clone_info=(CacheInfo *) DestroyPixelCache(clone_info); else { if (clone != MagickFalse) status=ClonePixelCacheRepository(clone_info,cache_info, exception); if (status == MagickFalse) clone_info=(CacheInfo *) DestroyPixelCache(clone_info); else { destroy=MagickTrue; image->cache=clone_info; } } DestroySemaphoreInfo(&clone_image.semaphore); } UnlockSemaphoreInfo(cache_info->semaphore); } if (destroy != MagickFalse) cache_info=(CacheInfo *) DestroyPixelCache(cache_info); if (status != MagickFalse) { /* Ensure the image matches the pixel cache morphology. */ image->type=UndefinedType; if (ValidatePixelCacheMorphology(image) == MagickFalse) { status=OpenPixelCache(image,IOMode,exception); cache_info=(CacheInfo *) image->cache; if (cache_info->file != -1) (void) ClosePixelCacheOnDisk(cache_info); } } UnlockSemaphoreInfo(image->semaphore); if (status == MagickFalse) return((Cache) NULL); return(image->cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e P i x e l C a c h e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImagePixelCacheType() returns the pixel cache type: UndefinedCache, % DiskCache, MapCache, MemoryCache, or PingCache. % % The format of the GetImagePixelCacheType() method is: % % CacheType GetImagePixelCacheType(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport CacheType GetPixelCacheType(const Image *image) { return(GetImagePixelCacheType(image)); } MagickExport CacheType GetImagePixelCacheType(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); return(cache_info->type); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e A u t h e n t i c P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneAuthenticPixel() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. % % The format of the GetOneAuthenticPixel() method is: % % MagickBooleanType GetOneAuthenticPixel(const Image image,const ssize_t x, % const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetOneAuthenticPixel(Image *image, const ssize_t x,const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; PixelPacket *magick_restrict pixels; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); *pixel=image->background_color; if (cache_info->methods.get_one_authentic_pixel_from_handler != (GetOneAuthenticPixelFromHandler) NULL) return(cache_info->methods.get_one_authentic_pixel_from_handler(image,x,y,pixel,exception)); pixels=GetAuthenticPixelsCache(image,x,y,1UL,1UL,exception); if (pixels == (PixelPacket *) NULL) return(MagickFalse); *pixel=(*pixels); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t O n e A u t h e n t i c P i x e l F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneAuthenticPixelFromCache() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. % % The format of the GetOneAuthenticPixelFromCache() method is: % % MagickBooleanType GetOneAuthenticPixelFromCache(const Image image, % const ssize_t x,const ssize_t y,PixelPacket *pixel, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType GetOneAuthenticPixelFromCache(Image *image, const ssize_t x,const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); PixelPacket *magick_restrict pixels; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); *pixel=image->background_color; assert(id < (int) cache_info->number_threads); pixels=GetAuthenticPixelCacheNexus(image,x,y,1UL,1UL, cache_info->nexus_info[id],exception); if (pixels == (PixelPacket *) NULL) return(MagickFalse); *pixel=(*pixels); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e V i r t u a l M a g i c k P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualMagickPixel() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. If % you plan to modify the pixel, use GetOneAuthenticPixel() instead. % % The format of the GetOneVirtualMagickPixel() method is: % % MagickBooleanType GetOneVirtualMagickPixel(const Image image, % const ssize_t x,const ssize_t y,MagickPixelPacket *pixel, % ExceptionInfo exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: these values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetOneVirtualMagickPixel(const Image *image, const ssize_t x,const ssize_t y,MagickPixelPacket *pixel, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict pixels; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); pixels=GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y, 1UL,1UL,cache_info->nexus_info[id],exception); GetMagickPixelPacket(image,pixel); if (pixels == (const PixelPacket *) NULL) return(MagickFalse); indexes=GetVirtualIndexesFromNexus(cache_info,cache_info->nexus_info[id]); SetMagickPixelPacket(image,pixels,indexes,pixel); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e V i r t u a l M e t h o d P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualMethodPixel() returns a single pixel at the specified (x,y) % location as defined by specified pixel method. The image background color % is returned if an error occurs. If you plan to modify the pixel, use % GetOneAuthenticPixel() instead. % % The format of the GetOneVirtualMethodPixel() method is: % % MagickBooleanType GetOneVirtualMethodPixel(const Image image, % const VirtualPixelMethod virtual_pixel_method,const ssize_t x, % const ssize_t y,Pixelpacket *pixel,ExceptionInfo exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetOneVirtualMethodPixel(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, PixelPacket *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const PixelPacket *magick_restrict pixels; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); *pixel=image->background_color; if (cache_info->methods.get_one_virtual_pixel_from_handler != (GetOneVirtualPixelFromHandler) NULL) return(cache_info->methods.get_one_virtual_pixel_from_handler(image, virtual_pixel_method,x,y,pixel,exception)); assert(id < (int) cache_info->number_threads); pixels=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,1UL,1UL, cache_info->nexus_info[id],exception); if (pixels == (const PixelPacket *) NULL) return(MagickFalse); *pixel=(*pixels); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e V i r t u a l P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualPixel() returns a single virtual pixel at the specified % (x,y) location. The image background color is returned if an error occurs. % If you plan to modify the pixel, use GetOneAuthenticPixel() instead. % % The format of the GetOneVirtualPixel() method is: % % MagickBooleanType GetOneVirtualPixel(const Image image,const ssize_t x, % const ssize_t y,PixelPacket *pixel,ExceptionInfo exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetOneVirtualPixel(const Image *image, const ssize_t x,const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const PixelPacket *magick_restrict pixels; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); *pixel=image->background_color; if (cache_info->methods.get_one_virtual_pixel_from_handler != (GetOneVirtualPixelFromHandler) NULL) return(cache_info->methods.get_one_virtual_pixel_from_handler(image, GetPixelCacheVirtualMethod(image),x,y,pixel,exception)); assert(id < (int) cache_info->number_threads); pixels=GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y, 1UL,1UL,cache_info->nexus_info[id],exception); if (pixels == (const PixelPacket *) NULL) return(MagickFalse); *pixel=(*pixels); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t O n e V i r t u a l P i x e l F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualPixelFromCache() returns a single virtual pixel at the % specified (x,y) location. The image background color is returned if an % error occurs. % % The format of the GetOneVirtualPixelFromCache() method is: % % MagickBooleanType GetOneVirtualPixelFromCache(const Image image, % const VirtualPixelPacket method,const ssize_t x,const ssize_t y, % PixelPacket *pixel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType GetOneVirtualPixelFromCache(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, PixelPacket *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const PixelPacket *magick_restrict pixels; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); *pixel=image->background_color; pixels=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,1UL,1UL, cache_info->nexus_info[id],exception); if (pixels == (const PixelPacket *) NULL) return(MagickFalse); *pixel=(*pixels); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e C h a n n e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheChannels() returns the number of pixel channels associated % with this instance of the pixel cache. % % The format of the GetPixelCacheChannels() method is: % % size_t GetPixelCacheChannels(Cache cache) % % A description of each parameter follows: % % o type: GetPixelCacheChannels returns DirectClass or PseudoClass. % % o cache: the pixel cache. % */ MagickExport size_t GetPixelCacheChannels(const Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); return(cache_info->channels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheColorspace() returns the class type of the pixel cache. % % The format of the GetPixelCacheColorspace() method is: % % Colorspace GetPixelCacheColorspace(Cache cache) % % A description of each parameter follows: % % o cache: the pixel cache. % */ MagickExport ColorspaceType GetPixelCacheColorspace(const Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); return(cache_info->colorspace); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e F i l e n a m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheFilename() returns the filename associated with the pixel % cache. % % The format of the GetPixelCacheFilename() method is: % % const char *GetPixelCacheFilename(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport const char *GetPixelCacheFilename(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); return(cache_info->cache_filename); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e M e t h o d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheMethods() initializes the CacheMethods structure. % % The format of the GetPixelCacheMethods() method is: % % void GetPixelCacheMethods(CacheMethods *cache_methods) % % A description of each parameter follows: % % o cache_methods: Specifies a pointer to a CacheMethods structure. % */ MagickExport void GetPixelCacheMethods(CacheMethods *cache_methods) { assert(cache_methods != (CacheMethods *) NULL); (void) memset(cache_methods,0,sizeof(*cache_methods)); cache_methods->get_virtual_pixel_handler=GetVirtualPixelCache; cache_methods->get_virtual_pixels_handler=GetVirtualPixelsCache; cache_methods->get_virtual_indexes_from_handler=GetVirtualIndexesFromCache; cache_methods->get_one_virtual_pixel_from_handler=GetOneVirtualPixelFromCache; cache_methods->get_authentic_pixels_handler=GetAuthenticPixelsCache; cache_methods->get_authentic_indexes_from_handler= GetAuthenticIndexesFromCache; cache_methods->get_authentic_pixels_from_handler=GetAuthenticPixelsFromCache; cache_methods->get_one_authentic_pixel_from_handler= GetOneAuthenticPixelFromCache; cache_methods->queue_authentic_pixels_handler=QueueAuthenticPixelsCache; cache_methods->sync_authentic_pixels_handler=SyncAuthenticPixelsCache; cache_methods->destroy_pixel_handler=DestroyImagePixelCache; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e N e x u s E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheNexusExtent() returns the extent of the pixels associated with % the last call to SetPixelCacheNexusPixels() or GetPixelCacheNexusPixels(). % % The format of the GetPixelCacheNexusExtent() method is: % % MagickSizeType GetPixelCacheNexusExtent(const Cache cache, % NexusInfo *nexus_info) % % A description of each parameter follows: % % o nexus_info: the nexus info. % */ MagickExport MagickSizeType GetPixelCacheNexusExtent(const Cache cache, NexusInfo *nexus_info) { CacheInfo *magick_restrict cache_info; MagickSizeType extent; assert(cache != NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); extent=(MagickSizeType) nexus_info->region.width*nexus_info->region.height; if (extent == 0) return((MagickSizeType) cache_info->columns*cache_info->rows); return(extent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCachePixels() returns the pixels associated with the specified image. % % The format of the GetPixelCachePixels() method is: % % void *GetPixelCachePixels(Image *image,MagickSizeType *length, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o length: the pixel cache length. % % o exception: return any errors or warnings in this structure. % */ MagickExport void *GetPixelCachePixels(Image *image,MagickSizeType *length, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); assert(length != (MagickSizeType *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); (void) exception; *length=cache_info->length; if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache)) return((void *) NULL); return((void *) cache_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e S t o r a g e C l a s s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheStorageClass() returns the class type of the pixel cache. % % The format of the GetPixelCacheStorageClass() method is: % % ClassType GetPixelCacheStorageClass(Cache cache) % % A description of each parameter follows: % % o type: GetPixelCacheStorageClass returns DirectClass or PseudoClass. % % o cache: the pixel cache. % */ MagickExport ClassType GetPixelCacheStorageClass(const Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); return(cache_info->storage_class); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e T i l e S i z e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheTileSize() returns the pixel cache tile size. % % The format of the GetPixelCacheTileSize() method is: % % void GetPixelCacheTileSize(const Image *image,size_t *width, % size_t *height) % % A description of each parameter follows: % % o image: the image. % % o width: the optimize cache tile width in pixels. % % o height: the optimize cache tile height in pixels. % */ MagickExport void GetPixelCacheTileSize(const Image *image,size_t *width, size_t *height) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); *width=2048UL/sizeof(PixelPacket); if (GetImagePixelCacheType(image) == DiskCache) *width=8192UL/sizeof(PixelPacket); *height=(*width); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e V i r t u a l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheVirtualMethod() gets the "virtual pixels" method for the % pixel cache. A virtual pixel is any pixel access that is outside the % boundaries of the image cache. % % The format of the GetPixelCacheVirtualMethod() method is: % % VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); return(cache_info->virtual_pixel_method); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l I n d e x e s F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualIndexesFromCache() returns the indexes associated with the last % call to QueueAuthenticPixelsCache() or GetVirtualPixelCache(). % % The format of the GetVirtualIndexesFromCache() method is: % % IndexPacket *GetVirtualIndexesFromCache(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static const IndexPacket *GetVirtualIndexesFromCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(GetVirtualIndexesFromNexus(cache_info,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l I n d e x e s F r o m N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualIndexesFromNexus() returns the indexes associated with the % specified cache nexus. % % The format of the GetVirtualIndexesFromNexus() method is: % % const IndexPacket *GetVirtualIndexesFromNexus(const Cache cache, % NexusInfo *nexus_info) % % A description of each parameter follows: % % o cache: the pixel cache. % % o nexus_info: the cache nexus to return the colormap indexes. % */ MagickExport const IndexPacket *GetVirtualIndexesFromNexus(const Cache cache, NexusInfo *nexus_info) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->storage_class == UndefinedClass) return((IndexPacket *) NULL); return(nexus_info->indexes); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t V i r t u a l I n d e x Q u e u e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualIndexQueue() returns the virtual black channel or the % colormap indexes associated with the last call to QueueAuthenticPixels() or % GetVirtualPixels(). NULL is returned if the black channel or colormap % indexes are not available. % % The format of the GetVirtualIndexQueue() method is: % % const IndexPacket *GetVirtualIndexQueue(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport const IndexPacket *GetVirtualIndexQueue(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_virtual_indexes_from_handler != (GetVirtualIndexesFromHandler) NULL) return(cache_info->methods.get_virtual_indexes_from_handler(image)); assert(id < (int) cache_info->number_threads); return(GetVirtualIndexesFromNexus(cache_info,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelCacheNexus() gets virtual pixels from the in-memory or disk % pixel cache as defined by the geometry parameters. A pointer to the pixels % is returned if the pixels are transferred, otherwise a NULL is returned. % % The format of the GetVirtualPixelCacheNexus() method is: % % PixelPacket *GetVirtualPixelCacheNexus(const Image *image, % const VirtualPixelMethod method,const ssize_t x,const ssize_t y, % const size_t columns,const size_t rows,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o nexus_info: the cache nexus to acquire. % % o exception: return any errors or warnings in this structure. % */ static ssize_t DitherMatrix[64] = { 0, 48, 12, 60, 3, 51, 15, 63, 32, 16, 44, 28, 35, 19, 47, 31, 8, 56, 4, 52, 11, 59, 7, 55, 40, 24, 36, 20, 43, 27, 39, 23, 2, 50, 14, 62, 1, 49, 13, 61, 34, 18, 46, 30, 33, 17, 45, 29, 10, 58, 6, 54, 9, 57, 5, 53, 42, 26, 38, 22, 41, 25, 37, 21 }; static inline ssize_t DitherX(const ssize_t x,const size_t columns) { ssize_t index; index=x+DitherMatrix[x & 0x07]-32L; if (index < 0L) return(0L); if (index >= (ssize_t) columns) return((ssize_t) columns-1L); return(index); } static inline ssize_t DitherY(const ssize_t y,const size_t rows) { ssize_t index; index=y+DitherMatrix[y & 0x07]-32L; if (index < 0L) return(0L); if (index >= (ssize_t) rows) return((ssize_t) rows-1L); return(index); } static inline ssize_t EdgeX(const ssize_t x,const size_t columns) { if (x < 0L) return(0L); if (x >= (ssize_t) columns) return((ssize_t) (columns-1)); return(x); } static inline ssize_t EdgeY(const ssize_t y,const size_t rows) { if (y < 0L) return(0L); if (y >= (ssize_t) rows) return((ssize_t) (rows-1)); return(y); } static inline ssize_t RandomX(RandomInfo *random_info,const size_t columns) { return((ssize_t) (columns*GetPseudoRandomValue(random_info))); } static inline ssize_t RandomY(RandomInfo *random_info,const size_t rows) { return((ssize_t) (rows*GetPseudoRandomValue(random_info))); } static inline MagickModulo VirtualPixelModulo(const ssize_t offset, const size_t extent) { MagickModulo modulo; /* Compute the remainder of dividing offset by extent. It returns not only the quotient (tile the offset falls in) but also the positive remainer within that tile such that 0 <= remainder < extent. This method is essentially a ldiv() using a floored modulo division rather than the normal default truncated modulo division. */ modulo.quotient=offset/(ssize_t) extent; if ((offset < 0L) && (modulo.quotient != INT64_MIN)) modulo.quotient--; modulo.remainder=(ssize_t) (offset-(double) modulo.quotient*extent); return(modulo); } MagickExport const PixelPacket *GetVirtualPixelCacheNexus(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, const size_t columns,const size_t rows,NexusInfo *nexus_info, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; IndexPacket virtual_index; MagickOffsetType offset; MagickSizeType length, number_pixels; NexusInfo **magick_restrict virtual_nexus; PixelPacket *magick_restrict pixels, virtual_pixel; RectangleInfo region; register const IndexPacket *magick_restrict virtual_indexes; register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict indexes; register PixelPacket *magick_restrict q; register ssize_t u, v; /* Acquire pixels. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->type == UndefinedCache) return((const PixelPacket *) NULL); #if defined(MAGICKCORE_OPENCL_SUPPORT) CopyOpenCLBuffer(cache_info); #endif region.x=x; region.y=y; region.width=columns; region.height=rows; pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,&region, (image->clip_mask != (Image *) NULL) || (image->mask != (Image *) NULL) ? MagickTrue : MagickFalse,nexus_info,exception); if (pixels == (PixelPacket *) NULL) return((const PixelPacket *) NULL); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) (nexus_info->region.height-1L)*cache_info->columns+ nexus_info->region.width-1L; number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows; if ((offset >= 0) && (((MagickSizeType) offset+length) < number_pixels)) if ((x >= 0) && ((ssize_t) (x+columns) <= (ssize_t) cache_info->columns) && (y >= 0) && ((ssize_t) (y+rows) <= (ssize_t) cache_info->rows)) { MagickBooleanType status; /* Pixel request is inside cache extents. */ if (nexus_info->authentic_pixel_cache != MagickFalse) return(pixels); status=ReadPixelCachePixels(cache_info,nexus_info,exception); if (status == MagickFalse) return((const PixelPacket *) NULL); if ((cache_info->storage_class == PseudoClass) || (cache_info->colorspace == CMYKColorspace)) { status=ReadPixelCacheIndexes(cache_info,nexus_info,exception); if (status == MagickFalse) return((const PixelPacket *) NULL); } return(pixels); } /* Pixel request is outside cache extents. */ q=pixels; indexes=nexus_info->indexes; virtual_nexus=AcquirePixelCacheNexus(1); switch (virtual_pixel_method) { case BlackVirtualPixelMethod: { SetPixelRed(&virtual_pixel,0); SetPixelGreen(&virtual_pixel,0); SetPixelBlue(&virtual_pixel,0); SetPixelOpacity(&virtual_pixel,OpaqueOpacity); break; } case GrayVirtualPixelMethod: { SetPixelRed(&virtual_pixel,QuantumRange/2); SetPixelGreen(&virtual_pixel,QuantumRange/2); SetPixelBlue(&virtual_pixel,QuantumRange/2); SetPixelOpacity(&virtual_pixel,OpaqueOpacity); break; } case TransparentVirtualPixelMethod: { SetPixelRed(&virtual_pixel,0); SetPixelGreen(&virtual_pixel,0); SetPixelBlue(&virtual_pixel,0); SetPixelOpacity(&virtual_pixel,TransparentOpacity); break; } case MaskVirtualPixelMethod: case WhiteVirtualPixelMethod: { SetPixelRed(&virtual_pixel,QuantumRange); SetPixelGreen(&virtual_pixel,QuantumRange); SetPixelBlue(&virtual_pixel,QuantumRange); SetPixelOpacity(&virtual_pixel,OpaqueOpacity); break; } default: { virtual_pixel=image->background_color; break; } } virtual_index=0; for (v=0; v < (ssize_t) rows; v++) { ssize_t y_offset; y_offset=y+v; if ((virtual_pixel_method == EdgeVirtualPixelMethod) || (virtual_pixel_method == UndefinedVirtualPixelMethod)) y_offset=EdgeY(y_offset,cache_info->rows); for (u=0; u < (ssize_t) columns; u+=length) { ssize_t x_offset; x_offset=x+u; length=(MagickSizeType) MagickMin(cache_info->columns-x_offset,columns-u); if (((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) || ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) || (length == 0)) { MagickModulo x_modulo, y_modulo; /* Transfer a single pixel. */ length=(MagickSizeType) 1; switch (virtual_pixel_method) { case BackgroundVirtualPixelMethod: case ConstantVirtualPixelMethod: case BlackVirtualPixelMethod: case GrayVirtualPixelMethod: case TransparentVirtualPixelMethod: case MaskVirtualPixelMethod: case WhiteVirtualPixelMethod: { p=(&virtual_pixel); virtual_indexes=(&virtual_index); break; } case EdgeVirtualPixelMethod: default: { p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, EdgeX(x_offset,cache_info->columns), EdgeY(y_offset,cache_info->rows),1UL,1UL,*virtual_nexus, exception); virtual_indexes=GetVirtualIndexesFromNexus(cache_info, *virtual_nexus); break; } case RandomVirtualPixelMethod: { if (cache_info->random_info == (RandomInfo *) NULL) cache_info->random_info=AcquireRandomInfo(); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, RandomX(cache_info->random_info,cache_info->columns), RandomY(cache_info->random_info,cache_info->rows),1UL,1UL, *virtual_nexus,exception); virtual_indexes=GetVirtualIndexesFromNexus(cache_info, *virtual_nexus); break; } case DitherVirtualPixelMethod: { p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, DitherX(x_offset,cache_info->columns), DitherY(y_offset,cache_info->rows),1UL,1UL,*virtual_nexus, exception); virtual_indexes=GetVirtualIndexesFromNexus(cache_info, *virtual_nexus); break; } case TileVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus, exception); virtual_indexes=GetVirtualIndexesFromNexus(cache_info, *virtual_nexus); break; } case MirrorVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); if ((x_modulo.quotient & 0x01) == 1L) x_modulo.remainder=(ssize_t) cache_info->columns- x_modulo.remainder-1L; y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); if ((y_modulo.quotient & 0x01) == 1L) y_modulo.remainder=(ssize_t) cache_info->rows- y_modulo.remainder-1L; p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus, exception); virtual_indexes=GetVirtualIndexesFromNexus(cache_info, *virtual_nexus); break; } case CheckerTileVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); if (((x_modulo.quotient ^ y_modulo.quotient) & 0x01) != 0L) { p=(&virtual_pixel); virtual_indexes=(&virtual_index); break; } p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus, exception); virtual_indexes=GetVirtualIndexesFromNexus(cache_info, *virtual_nexus); break; } case HorizontalTileVirtualPixelMethod: { if ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) { p=(&virtual_pixel); virtual_indexes=(&virtual_index); break; } x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus, exception); virtual_indexes=GetVirtualIndexesFromNexus(cache_info, *virtual_nexus); break; } case VerticalTileVirtualPixelMethod: { if ((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) { p=(&virtual_pixel); virtual_indexes=(&virtual_index); break; } x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus, exception); virtual_indexes=GetVirtualIndexesFromNexus(cache_info, *virtual_nexus); break; } case HorizontalTileEdgeVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,EdgeY(y_offset,cache_info->rows),1UL,1UL, *virtual_nexus,exception); virtual_indexes=GetVirtualIndexesFromNexus(cache_info, *virtual_nexus); break; } case VerticalTileEdgeVirtualPixelMethod: { y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, EdgeX(x_offset,cache_info->columns),y_modulo.remainder,1UL,1UL, *virtual_nexus,exception); virtual_indexes=GetVirtualIndexesFromNexus(cache_info, *virtual_nexus); break; } } if (p == (const PixelPacket *) NULL) break; *q++=(*p); if ((indexes != (IndexPacket *) NULL) && (virtual_indexes != (const IndexPacket *) NULL)) *indexes++=(*virtual_indexes); continue; } /* Transfer a run of pixels. */ p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x_offset,y_offset, (size_t) length,1UL,*virtual_nexus,exception); if (p == (const PixelPacket *) NULL) break; virtual_indexes=GetVirtualIndexesFromNexus(cache_info,*virtual_nexus); (void) memcpy(q,p,(size_t) length*sizeof(*p)); q+=length; if ((indexes != (IndexPacket *) NULL) && (virtual_indexes != (const IndexPacket *) NULL)) { (void) memcpy(indexes,virtual_indexes,(size_t) length* sizeof(*virtual_indexes)); indexes+=length; } } if (u < (ssize_t) columns) break; } /* Free resources. */ virtual_nexus=DestroyPixelCacheNexus(virtual_nexus,1); if (v < (ssize_t) rows) return((const PixelPacket *) NULL); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelCache() get virtual pixels from the in-memory or disk pixel % cache as defined by the geometry parameters. A pointer to the pixels % is returned if the pixels are transferred, otherwise a NULL is returned. % % The format of the GetVirtualPixelCache() method is: % % const PixelPacket *GetVirtualPixelCache(const Image *image, % const VirtualPixelMethod virtual_pixel_method,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ static const PixelPacket *GetVirtualPixelCache(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, const size_t columns,const size_t rows,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,columns,rows, cache_info->nexus_info[id],exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t V i r t u a l P i x e l Q u e u e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelQueue() returns the virtual pixels associated with the % last call to QueueAuthenticPixels() or GetVirtualPixels(). % % The format of the GetVirtualPixelQueue() method is: % % const PixelPacket *GetVirtualPixelQueue(const Image image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport const PixelPacket *GetVirtualPixelQueue(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_virtual_pixels_handler != (GetVirtualPixelsHandler) NULL) return(cache_info->methods.get_virtual_pixels_handler(image)); assert(id < (int) cache_info->number_threads); return(GetVirtualPixelsNexus(cache_info,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t V i r t u a l P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixels() returns an immutable pixel region. If the % region is successfully accessed, a pointer to it is returned, otherwise % NULL is returned. The returned pointer may point to a temporary working % copy of the pixels or it may point to the original pixels in memory. % Performance is maximized if the selected region is part of one row, or one % or more full rows, since there is opportunity to access the pixels in-place % (without a copy) if the image is in memory, or in a memory-mapped file. The % returned pointer must *never* be deallocated by the user. % % Pixels accessed via the returned pointer represent a simple array of type % PixelPacket. If the image type is CMYK or the storage class is PseudoClass, % call GetAuthenticIndexQueue() after invoking GetAuthenticPixels() to access % the black color component or to obtain the colormap indexes (of type % IndexPacket) corresponding to the region. % % If you plan to modify the pixels, use GetAuthenticPixels() instead. % % Note, the GetVirtualPixels() and GetAuthenticPixels() methods are not thread- % safe. In a threaded environment, use GetCacheViewVirtualPixels() or % GetCacheViewAuthenticPixels() instead. % % The format of the GetVirtualPixels() method is: % % const PixelPacket *GetVirtualPixels(const Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport const PixelPacket *GetVirtualPixels(const Image *image, const ssize_t x,const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL) return(cache_info->methods.get_virtual_pixel_handler(image, GetPixelCacheVirtualMethod(image),x,y,columns,rows,exception)); assert(id < (int) cache_info->number_threads); return(GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y, columns,rows,cache_info->nexus_info[id],exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l s F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelsCache() returns the pixels associated with the last call % to QueueAuthenticPixelsCache() or GetVirtualPixelCache(). % % The format of the GetVirtualPixelsCache() method is: % % PixelPacket *GetVirtualPixelsCache(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static const PixelPacket *GetVirtualPixelsCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(GetVirtualPixelsNexus(image->cache,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l s N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelsNexus() returns the pixels associated with the specified % cache nexus. % % The format of the GetVirtualPixelsNexus() method is: % % const IndexPacket *GetVirtualPixelsNexus(const Cache cache, % NexusInfo *nexus_info) % % A description of each parameter follows: % % o cache: the pixel cache. % % o nexus_info: the cache nexus to return the colormap pixels. % */ MagickExport const PixelPacket *GetVirtualPixelsNexus(const Cache cache, NexusInfo *nexus_info) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->storage_class == UndefinedClass) return((PixelPacket *) NULL); return((const PixelPacket *) nexus_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M a s k P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MaskPixelCacheNexus() masks the cache nexus as defined by the image mask. % The method returns MagickTrue if the pixel region is masked, otherwise % MagickFalse. % % The format of the MaskPixelCacheNexus() method is: % % MagickBooleanType MaskPixelCacheNexus(Image *image, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o nexus_info: the cache nexus to clip. % % o exception: return any errors or warnings in this structure. % */ static inline void ApplyPixelCompositeMask(const MagickPixelPacket *p, const MagickRealType alpha,const MagickPixelPacket *q, const MagickRealType beta,MagickPixelPacket *composite) { double gamma; if (fabs(alpha-TransparentOpacity) < MagickEpsilon) { *composite=(*q); return; } gamma=1.0-QuantumScale*QuantumScale*alpha*beta; gamma=PerceptibleReciprocal(gamma); composite->red=gamma*MagickOver_(p->red,alpha,q->red,beta); composite->green=gamma*MagickOver_(p->green,alpha,q->green,beta); composite->blue=gamma*MagickOver_(p->blue,alpha,q->blue,beta); if ((p->colorspace == CMYKColorspace) && (q->colorspace == CMYKColorspace)) composite->index=gamma*MagickOver_(p->index,alpha,q->index,beta); } static MagickBooleanType MaskPixelCacheNexus(Image *image,NexusInfo *nexus_info, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickPixelPacket alpha, beta; MagickSizeType number_pixels; NexusInfo **magick_restrict image_nexus, **magick_restrict mask_nexus; register const PixelPacket *magick_restrict r; register IndexPacket *magick_restrict nexus_indexes, *magick_restrict indexes; register PixelPacket *magick_restrict p, *magick_restrict q; register ssize_t i; /* Apply clip mask. */ if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->mask == (Image *) NULL) || (image->storage_class == PseudoClass)) return(MagickTrue); cache_info=(CacheInfo *) image->cache; if (cache_info == (Cache) NULL) return(MagickFalse); image_nexus=AcquirePixelCacheNexus(1); mask_nexus=AcquirePixelCacheNexus(1); p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x, nexus_info->region.y,nexus_info->region.width,nexus_info->region.height, image_nexus[0],exception); indexes=image_nexus[0]->indexes; q=nexus_info->pixels; nexus_indexes=nexus_info->indexes; r=GetVirtualPixelCacheNexus(image->mask,MaskVirtualPixelMethod, nexus_info->region.x,nexus_info->region.y,nexus_info->region.width, nexus_info->region.height,mask_nexus[0],&image->exception); GetMagickPixelPacket(image,&alpha); GetMagickPixelPacket(image,&beta); number_pixels=(MagickSizeType) nexus_info->region.width* nexus_info->region.height; for (i=0; i < (ssize_t) number_pixels; i++) { if ((p == (PixelPacket *) NULL) || (r == (const PixelPacket *) NULL)) break; SetMagickPixelPacket(image,p,indexes+i,&alpha); SetMagickPixelPacket(image,q,nexus_indexes+i,&beta); ApplyPixelCompositeMask(&beta,GetPixelIntensity(image,r),&alpha, alpha.opacity,&beta); SetPixelRed(q,ClampToQuantum(beta.red)); SetPixelGreen(q,ClampToQuantum(beta.green)); SetPixelBlue(q,ClampToQuantum(beta.blue)); SetPixelOpacity(q,ClampToQuantum(beta.opacity)); if (cache_info->active_index_channel != MagickFalse) SetPixelIndex(nexus_indexes+i,GetPixelIndex(indexes+i)); p++; q++; r++; } mask_nexus=DestroyPixelCacheNexus(mask_nexus,1); image_nexus=DestroyPixelCacheNexus(image_nexus,1); if (i < (ssize_t) number_pixels) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + O p e n P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OpenPixelCache() allocates the pixel cache. This includes defining the cache % dimensions, allocating space for the image pixels and optionally the % colormap indexes, and memory mapping the cache if it is disk based. The % cache nexus array is initialized as well. % % The format of the OpenPixelCache() method is: % % MagickBooleanType OpenPixelCache(Image *image,const MapMode mode, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o mode: ReadMode, WriteMode, or IOMode. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType OpenPixelCacheOnDisk(CacheInfo *cache_info, const MapMode mode) { int file; /* Open pixel cache on disk. */ if ((cache_info->file != -1) && (cache_info->disk_mode == mode)) return(MagickTrue); /* cache already open and in the proper mode */ if (*cache_info->cache_filename == '\0') file=AcquireUniqueFileResource(cache_info->cache_filename); else switch (mode) { case ReadMode: { file=open_utf8(cache_info->cache_filename,O_RDONLY | O_BINARY,0); break; } case WriteMode: { file=open_utf8(cache_info->cache_filename,O_WRONLY | O_CREAT | O_BINARY | O_EXCL,S_MODE); if (file == -1) file=open_utf8(cache_info->cache_filename,O_WRONLY | O_BINARY,S_MODE); break; } case IOMode: default: { file=open_utf8(cache_info->cache_filename,O_RDWR | O_CREAT | O_BINARY | O_EXCL,S_MODE); if (file == -1) file=open_utf8(cache_info->cache_filename,O_RDWR | O_BINARY,S_MODE); break; } } if (file == -1) return(MagickFalse); (void) AcquireMagickResource(FileResource,1); if (cache_info->file != -1) (void) ClosePixelCacheOnDisk(cache_info); cache_info->file=file; cache_info->disk_mode=mode; return(MagickTrue); } static inline MagickOffsetType WritePixelCacheRegion( const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset, const MagickSizeType length,const unsigned char *magick_restrict buffer) { register MagickOffsetType i; ssize_t count; #if !defined(MAGICKCORE_HAVE_PWRITE) if (lseek(cache_info->file,offset,SEEK_SET) < 0) return((MagickOffsetType) -1); #endif count=0; for (i=0; i < (MagickOffsetType) length; i+=count) { #if !defined(MAGICKCORE_HAVE_PWRITE) count=write(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) SSIZE_MAX)); #else count=pwrite(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) SSIZE_MAX),(off_t) (offset+i)); #endif if (count <= 0) { count=0; if (errno != EINTR) break; } } return(i); } static MagickBooleanType SetPixelCacheExtent(Image *image,MagickSizeType length) { CacheInfo *magick_restrict cache_info; MagickOffsetType count, extent, offset; cache_info=(CacheInfo *) image->cache; if (image->debug != MagickFalse) { char format[MaxTextExtent], message[MaxTextExtent]; (void) FormatMagickSize(length,MagickFalse,format); (void) FormatLocaleString(message,MaxTextExtent, "extend %s (%s[%d], disk, %s)",cache_info->filename, cache_info->cache_filename,cache_info->file,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_END); if (offset < 0) return(MagickFalse); if ((MagickSizeType) offset >= length) count=(MagickOffsetType) 1; else { extent=(MagickOffsetType) length-1; count=WritePixelCacheRegion(cache_info,extent,1,(const unsigned char *) ""); if (count != 1) return(MagickFalse); #if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE) if (cache_info->synchronize != MagickFalse) (void) posix_fallocate(cache_info->file,offset+1,extent-offset); #endif } offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_SET); if (offset < 0) return(MagickFalse); return(MagickTrue); } static MagickBooleanType OpenPixelCache(Image *image,const MapMode mode, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info, source_info; char format[MaxTextExtent], message[MaxTextExtent]; const char *hosts, *type; MagickSizeType length, number_pixels; MagickStatusType status; size_t columns, packet_size; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (cache_anonymous_memory < 0) { char *value; /* Does the security policy require anonymous mapping for pixel cache? */ cache_anonymous_memory=0; value=GetPolicyValue("pixel-cache-memory"); if (value == (char *) NULL) value=GetPolicyValue("cache:memory-map"); if (LocaleCompare(value,"anonymous") == 0) { #if defined(MAGICKCORE_HAVE_MMAP) && defined(MAP_ANONYMOUS) cache_anonymous_memory=1; #else (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateError,"DelegateLibrarySupportNotBuiltIn", "'%s' (policy requires anonymous memory mapping)",image->filename); #endif } value=DestroyString(value); } if ((image->columns == 0) || (image->rows == 0)) ThrowBinaryException(CacheError,"NoPixelsDefinedInCache",image->filename); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if ((AcquireMagickResource(WidthResource,image->columns) == MagickFalse) || (AcquireMagickResource(HeightResource,image->rows) == MagickFalse)) ThrowBinaryException(ImageError,"WidthOrHeightExceedsLimit", image->filename); length=GetImageListLength(image); if (AcquireMagickResource(ListLengthResource,length) == MagickFalse) ThrowBinaryException(ResourceLimitError,"ListLengthExceedsLimit", image->filename); source_info=(*cache_info); source_info.file=(-1); (void) FormatLocaleString(cache_info->filename,MaxTextExtent,"%s[%.20g]", image->filename,(double) image->scene); cache_info->mode=mode; cache_info->rows=image->rows; cache_info->columns=image->columns; cache_info->channels=image->channels; cache_info->active_index_channel=((image->storage_class == PseudoClass) || (image->colorspace == CMYKColorspace)) ? MagickTrue : MagickFalse; number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows; packet_size=sizeof(PixelPacket); if (cache_info->active_index_channel != MagickFalse) packet_size+=sizeof(IndexPacket); length=number_pixels*packet_size; columns=(size_t) (length/cache_info->rows/packet_size); if ((cache_info->columns != columns) || ((ssize_t) cache_info->columns < 0) || ((ssize_t) cache_info->rows < 0)) ThrowBinaryException(ResourceLimitError,"PixelCacheAllocationFailed", image->filename); cache_info->length=length; if (image->ping != MagickFalse) { cache_info->storage_class=image->storage_class; cache_info->colorspace=image->colorspace; cache_info->type=PingCache; return(MagickTrue); } status=AcquireMagickResource(AreaResource,(MagickSizeType) cache_info->columns*cache_info->rows); if (cache_info->mode == PersistMode) status=MagickFalse; length=number_pixels*(sizeof(PixelPacket)+sizeof(IndexPacket)); if ((status != MagickFalse) && (length == (MagickSizeType) ((size_t) length)) && ((cache_info->type == UndefinedCache) || (cache_info->type == MemoryCache))) { status=AcquireMagickResource(MemoryResource,cache_info->length); if (status != MagickFalse) { status=MagickTrue; if (cache_anonymous_memory <= 0) { cache_info->mapped=MagickFalse; cache_info->pixels=(PixelPacket *) MagickAssumeAligned( AcquireAlignedMemory(1,(size_t) cache_info->length)); } else { cache_info->mapped=MagickTrue; cache_info->pixels=(PixelPacket *) MapBlob(-1,IOMode,0,(size_t) cache_info->length); } if (cache_info->pixels == (PixelPacket *) NULL) { cache_info->mapped=source_info.mapped; cache_info->pixels=source_info.pixels; } else { /* Create memory pixel cache. */ cache_info->colorspace=image->colorspace; cache_info->type=MemoryCache; cache_info->indexes=(IndexPacket *) NULL; if (cache_info->active_index_channel != MagickFalse) cache_info->indexes=(IndexPacket *) (cache_info->pixels+ number_pixels); if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status&=ClonePixelCacheRepository(cache_info,&source_info, exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickTrue,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MaxTextExtent, "open %s (%s %s, %.20gx%.20g %s)",cache_info->filename, cache_info->mapped != MagickFalse ? "Anonymous" : "Heap", type,(double) cache_info->columns,(double) cache_info->rows, format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s", message); } cache_info->storage_class=image->storage_class; return(status == 0 ? MagickFalse : MagickTrue); } } } status=AcquireMagickResource(DiskResource,cache_info->length); hosts=(const char *) GetImageRegistry(StringRegistryType,"cache:hosts", exception); if ((status == MagickFalse) && (hosts != (const char *) NULL)) { DistributeCacheInfo *server_info; /* Distribute the pixel cache to a remote server. */ server_info=AcquireDistributeCacheInfo(exception); if (server_info != (DistributeCacheInfo *) NULL) { status=OpenDistributePixelCache(server_info,image); if (status == MagickFalse) { ThrowFileException(exception,CacheError,"UnableToOpenPixelCache", GetDistributeCacheHostname(server_info)); server_info=DestroyDistributeCacheInfo(server_info); } else { /* Create a distributed pixel cache. */ status=MagickTrue; cache_info->type=DistributedCache; cache_info->storage_class=image->storage_class; cache_info->colorspace=image->colorspace; cache_info->server_info=server_info; (void) FormatLocaleString(cache_info->cache_filename, MaxTextExtent,"%s:%d",GetDistributeCacheHostname( (DistributeCacheInfo *) cache_info->server_info), GetDistributeCachePort((DistributeCacheInfo *) cache_info->server_info)); if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info, exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickFalse, format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MaxTextExtent, "open %s (%s[%d], %s, %.20gx%.20g %s)",cache_info->filename, cache_info->cache_filename,GetDistributeCacheFile( (DistributeCacheInfo *) cache_info->server_info),type, (double) cache_info->columns,(double) cache_info->rows, format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s", message); } return(status == 0 ? MagickFalse : MagickTrue); } } cache_info->type=UndefinedCache; (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'",image->filename); return(MagickFalse); } /* Create pixel cache on disk. */ if (status == MagickFalse) { cache_info->type=UndefinedCache; (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'",image->filename); return(MagickFalse); } if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode) && (cache_info->mode != PersistMode)) { (void) ClosePixelCacheOnDisk(cache_info); *cache_info->cache_filename='\0'; } if (OpenPixelCacheOnDisk(cache_info,mode) == MagickFalse) { ThrowFileException(exception,CacheError,"UnableToOpenPixelCache", image->filename); return(MagickFalse); } status=SetPixelCacheExtent(image,(MagickSizeType) cache_info->offset+ cache_info->length); if (status == MagickFalse) { ThrowFileException(exception,CacheError,"UnableToExtendCache", image->filename); return(MagickFalse); } cache_info->storage_class=image->storage_class; cache_info->colorspace=image->colorspace; length=number_pixels*(sizeof(PixelPacket)+sizeof(IndexPacket)); if (length != (MagickSizeType) ((size_t) length)) cache_info->type=DiskCache; else { status=AcquireMagickResource(MapResource,cache_info->length); if (status == MagickFalse) cache_info->type=DiskCache; else if ((cache_info->type != MapCache) && (cache_info->type != MemoryCache)) { cache_info->type=DiskCache; RelinquishMagickResource(MapResource,cache_info->length); } else { cache_info->pixels=(PixelPacket *) MapBlob(cache_info->file,mode, cache_info->offset,(size_t) cache_info->length); if (cache_info->pixels == (PixelPacket *) NULL) { cache_info->type=DiskCache; cache_info->mapped=source_info.mapped; cache_info->pixels=source_info.pixels; RelinquishMagickResource(MapResource,cache_info->length); } else { /* Create file-backed memory-mapped pixel cache. */ (void) ClosePixelCacheOnDisk(cache_info); cache_info->type=MapCache; cache_info->mapped=MagickTrue; cache_info->indexes=(IndexPacket *) NULL; if (cache_info->active_index_channel != MagickFalse) cache_info->indexes=(IndexPacket *) (cache_info->pixels+ number_pixels); if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info, exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickTrue, format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MaxTextExtent, "open %s (%s[%d], %s, %.20gx%.20g %s)", cache_info->filename,cache_info->cache_filename, cache_info->file,type,(double) cache_info->columns, (double) cache_info->rows,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s", message); } return(status == 0 ? MagickFalse : MagickTrue); } } } status=MagickTrue; if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info,exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickFalse,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MaxTextExtent, "open %s (%s[%d], %s, %.20gx%.20g %s)",cache_info->filename, cache_info->cache_filename,cache_info->file,type,(double) cache_info->columns,(double) cache_info->rows,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } return(status == 0 ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P e r s i s t P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PersistPixelCache() attaches to or initializes a persistent pixel cache. A % persistent pixel cache is one that resides on disk and is not destroyed % when the program exits. % % The format of the PersistPixelCache() method is: % % MagickBooleanType PersistPixelCache(Image *image,const char *filename, % const MagickBooleanType attach,MagickOffsetType *offset, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o filename: the persistent pixel cache filename. % % o attach: A value other than zero initializes the persistent pixel cache. % % o initialize: A value other than zero initializes the persistent pixel % cache. % % o offset: the offset in the persistent cache to store pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType PersistPixelCache(Image *image, const char *filename,const MagickBooleanType attach,MagickOffsetType *offset, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info, *magick_restrict clone_info; MagickBooleanType status; ssize_t page_size; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (void *) NULL); assert(filename != (const char *) NULL); assert(offset != (MagickOffsetType *) NULL); page_size=GetMagickPageSize(); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) CopyOpenCLBuffer(cache_info); #endif if (attach != MagickFalse) { /* Attach existing persistent pixel cache. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "attach persistent cache"); (void) CopyMagickString(cache_info->cache_filename,filename, MaxTextExtent); cache_info->type=DiskCache; cache_info->offset=(*offset); if (OpenPixelCache(image,ReadMode,exception) == MagickFalse) return(MagickFalse); *offset+=cache_info->length+page_size-(cache_info->length % page_size); return(SyncImagePixelCache(image,exception)); } /* Clone persistent pixel cache. */ status=AcquireMagickResource(DiskResource,cache_info->length); if (status == MagickFalse) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'",image->filename); return(MagickFalse); } clone_info=(CacheInfo *) ClonePixelCache(cache_info); clone_info->type=DiskCache; (void) CopyMagickString(clone_info->cache_filename,filename,MaxTextExtent); clone_info->file=(-1); clone_info->storage_class=cache_info->storage_class; clone_info->colorspace=cache_info->colorspace; clone_info->columns=cache_info->columns; clone_info->rows=cache_info->rows; clone_info->active_index_channel=cache_info->active_index_channel; clone_info->mode=PersistMode; clone_info->length=cache_info->length; clone_info->channels=cache_info->channels; clone_info->offset=(*offset); status=ClonePixelCacheRepository(clone_info,cache_info,exception); *offset+=cache_info->length+page_size-(cache_info->length % page_size); clone_info=(CacheInfo *) DestroyPixelCache(clone_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Q u e u e A u t h e n t i c P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QueueAuthenticPixelCacheNexus() allocates an region to store image pixels as % defined by the region rectangle and returns a pointer to the region. This % region is subsequently transferred from the pixel cache with % SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the % pixels are transferred, otherwise a NULL is returned. % % The format of the QueueAuthenticPixelCacheNexus() method is: % % PixelPacket *QueueAuthenticPixelCacheNexus(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % const MagickBooleanType clone,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o nexus_info: the cache nexus to set. % % o clone: clone the pixel cache. % % o exception: return any errors or warnings in this structure. % */ MagickExport PixelPacket *QueueAuthenticPixel(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, const MagickBooleanType clone,NexusInfo *nexus_info, ExceptionInfo *exception) { return(QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,clone,nexus_info, exception)); } MagickExport PixelPacket *QueueAuthenticPixelCacheNexus(Image *image, const ssize_t x,const ssize_t y,const size_t columns,const size_t rows, const MagickBooleanType clone,NexusInfo *nexus_info,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickOffsetType offset; MagickSizeType number_pixels; PixelPacket *magick_restrict pixels; RectangleInfo region; /* Validate pixel cache geometry. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) GetImagePixelCache(image,clone,exception); if (cache_info == (Cache) NULL) return((PixelPacket *) NULL); assert(cache_info->signature == MagickCoreSignature); if ((cache_info->columns == 0) || (cache_info->rows == 0) || (x < 0) || (y < 0) || (x >= (ssize_t) cache_info->columns) || (y >= (ssize_t) cache_info->rows)) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "PixelsAreNotAuthentic","`%s'",image->filename); return((PixelPacket *) NULL); } offset=(MagickOffsetType) y*cache_info->columns+x; if (offset < 0) return((PixelPacket *) NULL); number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows; offset+=(MagickOffsetType) (rows-1)*cache_info->columns+columns-1; if ((MagickSizeType) offset >= number_pixels) return((PixelPacket *) NULL); /* Return pixel cache. */ region.x=x; region.y=y; region.width=columns; region.height=rows; pixels=SetPixelCacheNexusPixels(cache_info,WriteMode,&region, (image->clip_mask != (Image *) NULL) || (image->mask != (Image *) NULL) ? MagickTrue : MagickFalse,nexus_info,exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Q u e u e A u t h e n t i c P i x e l s C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QueueAuthenticPixelsCache() allocates an region to store image pixels as % defined by the region rectangle and returns a pointer to the region. This % region is subsequently transferred from the pixel cache with % SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the % pixels are transferred, otherwise a NULL is returned. % % The format of the QueueAuthenticPixelsCache() method is: % % PixelPacket *QueueAuthenticPixelsCache(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ static PixelPacket *QueueAuthenticPixelsCache(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse, cache_info->nexus_info[id],exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u e u e A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QueueAuthenticPixels() queues a mutable pixel region. If the region is % successfully initialized a pointer to a PixelPacket array representing the % region is returned, otherwise NULL is returned. The returned pointer may % point to a temporary working buffer for the pixels or it may point to the % final location of the pixels in memory. % % Write-only access means that any existing pixel values corresponding to % the region are ignored. This is useful if the initial image is being % created from scratch, or if the existing pixel values are to be % completely replaced without need to refer to their pre-existing values. % The application is free to read and write the pixel buffer returned by % QueueAuthenticPixels() any way it pleases. QueueAuthenticPixels() does not % initialize the pixel array values. Initializing pixel array values is the % application's responsibility. % % Performance is maximized if the selected region is part of one row, or % one or more full rows, since then there is opportunity to access the % pixels in-place (without a copy) if the image is in memory, or in a % memory-mapped file. The returned pointer must *never* be deallocated % by the user. % % Pixels accessed via the returned pointer represent a simple array of type % PixelPacket. If the image type is CMYK or the storage class is PseudoClass, % call GetAuthenticIndexQueue() after invoking GetAuthenticPixels() to obtain % the black color component or the colormap indexes (of type IndexPacket) % corresponding to the region. Once the PixelPacket (and/or IndexPacket) % array has been updated, the changes must be saved back to the underlying % image using SyncAuthenticPixels() or they may be lost. % % The format of the QueueAuthenticPixels() method is: % % PixelPacket *QueueAuthenticPixels(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport PixelPacket *QueueAuthenticPixels(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.queue_authentic_pixels_handler != (QueueAuthenticPixelsHandler) NULL) return(cache_info->methods.queue_authentic_pixels_handler(image,x,y,columns, rows,exception)); assert(id < (int) cache_info->number_threads); return(QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse, cache_info->nexus_info[id],exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e a d P i x e l C a c h e I n d e x e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPixelCacheIndexes() reads colormap indexes from the specified region of % the pixel cache. % % The format of the ReadPixelCacheIndexes() method is: % % MagickBooleanType ReadPixelCacheIndexes(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to read the colormap indexes. % % o exception: return any errors or warnings in this structure. % */ static inline MagickOffsetType ReadPixelCacheRegion( const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset, const MagickSizeType length,unsigned char *magick_restrict buffer) { register MagickOffsetType i; ssize_t count; #if !defined(MAGICKCORE_HAVE_PREAD) if (lseek(cache_info->file,offset,SEEK_SET) < 0) return((MagickOffsetType) -1); #endif count=0; for (i=0; i < (MagickOffsetType) length; i+=count) { #if !defined(MAGICKCORE_HAVE_PREAD) count=read(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) SSIZE_MAX)); #else count=pread(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) SSIZE_MAX),(off_t) (offset+i)); #endif if (count <= 0) { count=0; if (errno != EINTR) break; } } return(i); } static MagickBooleanType ReadPixelCacheIndexes( CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info, ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; register IndexPacket *magick_restrict q; register ssize_t y; size_t rows; if (cache_info->active_index_channel == MagickFalse) return(MagickFalse); if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) nexus_info->region.width*sizeof(IndexPacket); rows=nexus_info->region.height; extent=length*rows; q=nexus_info->indexes; y=0; switch (cache_info->type) { case MemoryCache: case MapCache: { register IndexPacket *magick_restrict p; /* Read indexes from memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } p=cache_info->indexes+offset; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=cache_info->columns; q+=nexus_info->region.width; } break; } case DiskCache: { /* Read indexes from disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } extent=(MagickSizeType) cache_info->columns*cache_info->rows; for (y=0; y < (ssize_t) rows; y++) { count=ReadPixelCacheRegion(cache_info,cache_info->offset+extent* sizeof(PixelPacket)+offset*sizeof(*q),length,(unsigned char *) q); if (count < (MagickOffsetType) length) break; offset+=cache_info->columns; q+=nexus_info->region.width; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Read indexes from distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=ReadDistributePixelCacheIndexes((DistributeCacheInfo *) cache_info->server_info,&region,length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; q+=nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToReadPixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e a d P i x e l C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPixelCachePixels() reads pixels from the specified region of the pixel % cache. % % The format of the ReadPixelCachePixels() method is: % % MagickBooleanType ReadPixelCachePixels(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to read the pixels. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType ReadPixelCachePixels( CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info, ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; register PixelPacket *magick_restrict q; register ssize_t y; size_t rows; if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns; if ((ssize_t) (offset/cache_info->columns) != nexus_info->region.y) return(MagickFalse); offset+=nexus_info->region.x; length=(MagickSizeType) nexus_info->region.width*sizeof(PixelPacket); if ((length/sizeof(PixelPacket)) != nexus_info->region.width) return(MagickFalse); rows=nexus_info->region.height; extent=length*rows; if ((extent == 0) || ((extent/length) != rows)) return(MagickFalse); q=nexus_info->pixels; y=0; switch (cache_info->type) { case MemoryCache: case MapCache: { register PixelPacket *magick_restrict p; /* Read pixels from memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } p=cache_info->pixels+offset; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=cache_info->columns; q+=nexus_info->region.width; } break; } case DiskCache: { /* Read pixels from disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=ReadPixelCacheRegion(cache_info,cache_info->offset+offset* sizeof(*q),length,(unsigned char *) q); if (count < (MagickOffsetType) length) break; offset+=cache_info->columns; q+=nexus_info->region.width; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Read pixels from distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=ReadDistributePixelCachePixels((DistributeCacheInfo *) cache_info->server_info,&region,length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; q+=nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToReadPixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e f e r e n c e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReferencePixelCache() increments the reference count associated with the % pixel cache returning a pointer to the cache. % % The format of the ReferencePixelCache method is: % % Cache ReferencePixelCache(Cache cache_info) % % A description of each parameter follows: % % o cache_info: the pixel cache. % */ MagickExport Cache ReferencePixelCache(Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache *) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); LockSemaphoreInfo(cache_info->semaphore); cache_info->reference_count++; UnlockSemaphoreInfo(cache_info->semaphore); return(cache_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e s e t P i x e l C a c h e E p o c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetPixelCacheEpoch() resets the pixel cache epoch. % % The format of the ResetPixelCacheEpoch method is: % % void ResetPixelCacheEpoch(void) % */ MagickPrivate void ResetPixelCacheEpoch(void) { cache_epoch=0; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t P i x e l C a c h e M e t h o d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelCacheMethods() sets the image pixel methods to the specified ones. % % The format of the SetPixelCacheMethods() method is: % % SetPixelCacheMethods(Cache *,CacheMethods *cache_methods) % % A description of each parameter follows: % % o cache: the pixel cache. % % o cache_methods: Specifies a pointer to a CacheMethods structure. % */ MagickExport void SetPixelCacheMethods(Cache cache,CacheMethods *cache_methods) { CacheInfo *magick_restrict cache_info; GetOneAuthenticPixelFromHandler get_one_authentic_pixel_from_handler; GetOneVirtualPixelFromHandler get_one_virtual_pixel_from_handler; /* Set cache pixel methods. */ assert(cache != (Cache) NULL); assert(cache_methods != (CacheMethods *) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); if (cache_methods->get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL) cache_info->methods.get_virtual_pixel_handler= cache_methods->get_virtual_pixel_handler; if (cache_methods->destroy_pixel_handler != (DestroyPixelHandler) NULL) cache_info->methods.destroy_pixel_handler= cache_methods->destroy_pixel_handler; if (cache_methods->get_virtual_indexes_from_handler != (GetVirtualIndexesFromHandler) NULL) cache_info->methods.get_virtual_indexes_from_handler= cache_methods->get_virtual_indexes_from_handler; if (cache_methods->get_authentic_pixels_handler != (GetAuthenticPixelsHandler) NULL) cache_info->methods.get_authentic_pixels_handler= cache_methods->get_authentic_pixels_handler; if (cache_methods->queue_authentic_pixels_handler != (QueueAuthenticPixelsHandler) NULL) cache_info->methods.queue_authentic_pixels_handler= cache_methods->queue_authentic_pixels_handler; if (cache_methods->sync_authentic_pixels_handler != (SyncAuthenticPixelsHandler) NULL) cache_info->methods.sync_authentic_pixels_handler= cache_methods->sync_authentic_pixels_handler; if (cache_methods->get_authentic_pixels_from_handler != (GetAuthenticPixelsFromHandler) NULL) cache_info->methods.get_authentic_pixels_from_handler= cache_methods->get_authentic_pixels_from_handler; if (cache_methods->get_authentic_indexes_from_handler != (GetAuthenticIndexesFromHandler) NULL) cache_info->methods.get_authentic_indexes_from_handler= cache_methods->get_authentic_indexes_from_handler; get_one_virtual_pixel_from_handler= cache_info->methods.get_one_virtual_pixel_from_handler; if (get_one_virtual_pixel_from_handler != (GetOneVirtualPixelFromHandler) NULL) cache_info->methods.get_one_virtual_pixel_from_handler= cache_methods->get_one_virtual_pixel_from_handler; get_one_authentic_pixel_from_handler= cache_methods->get_one_authentic_pixel_from_handler; if (get_one_authentic_pixel_from_handler != (GetOneAuthenticPixelFromHandler) NULL) cache_info->methods.get_one_authentic_pixel_from_handler= cache_methods->get_one_authentic_pixel_from_handler; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t P i x e l C a c h e N e x u s P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelCacheNexusPixels() defines the region of the cache for the % specified cache nexus. % % The format of the SetPixelCacheNexusPixels() method is: % % PixelPacket SetPixelCacheNexusPixels(const CacheInfo *cache_info, % const MapMode mode,const RectangleInfo *region, % const MagickBooleanType buffered,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o mode: ReadMode, WriteMode, or IOMode. % % o region: A pointer to the RectangleInfo structure that defines the % region of this particular cache nexus. % % o buffered: pixels are buffered. % % o nexus_info: the cache nexus to set. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType AcquireCacheNexusPixels( const CacheInfo *magick_restrict cache_info,NexusInfo *nexus_info, ExceptionInfo *exception) { if (nexus_info->length != (MagickSizeType) ((size_t) nexus_info->length)) return(MagickFalse); if (cache_anonymous_memory <= 0) { nexus_info->mapped=MagickFalse; nexus_info->cache=(PixelPacket *) MagickAssumeAligned( AcquireAlignedMemory(1,(size_t) nexus_info->length)); if (nexus_info->cache != (PixelPacket *) NULL) (void) memset(nexus_info->cache,0,(size_t) nexus_info->length); } else { nexus_info->mapped=MagickTrue; nexus_info->cache=(PixelPacket *) MapBlob(-1,IOMode,0,(size_t) nexus_info->length); } if (nexus_info->cache == (PixelPacket *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", cache_info->filename); return(MagickFalse); } return(MagickTrue); } static inline MagickBooleanType IsAuthenticPixelCache( const CacheInfo *magick_restrict cache_info, const NexusInfo *magick_restrict nexus_info) { MagickBooleanType status; MagickOffsetType offset; /* Does nexus pixels point directly to in-core cache pixels or is it buffered? */ if (cache_info->type == PingCache) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; status=nexus_info->pixels == (cache_info->pixels+offset) ? MagickTrue : MagickFalse; return(status); } static inline void PrefetchPixelCacheNexusPixels(const NexusInfo *nexus_info, const MapMode mode) { magick_unreferenced(nexus_info); magick_unreferenced(mode); if (mode == ReadMode) { MagickCachePrefetch((unsigned char *) nexus_info->pixels,0,1); return; } MagickCachePrefetch((unsigned char *) nexus_info->pixels,1,1); } static PixelPacket *SetPixelCacheNexusPixels(const CacheInfo *cache_info, const MapMode mode,const RectangleInfo *region, const MagickBooleanType buffered,NexusInfo *nexus_info, ExceptionInfo *exception) { MagickBooleanType status; MagickSizeType length, number_pixels; assert(cache_info != (const CacheInfo *) NULL); assert(cache_info->signature == MagickCoreSignature); if (cache_info->type == UndefinedCache) return((PixelPacket *) NULL); if ((region->width == 0) || (region->height == 0)) return((PixelPacket *) NULL); nexus_info->region=(*region); if (((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) && (buffered == MagickFalse)) { ssize_t x, y; x=nexus_info->region.x+(ssize_t) nexus_info->region.width-1; y=nexus_info->region.y+(ssize_t) nexus_info->region.height-1; if (((nexus_info->region.x >= 0) && (nexus_info->region.y >= 0) && (y < (ssize_t) cache_info->rows)) && (((nexus_info->region.x == 0) && (nexus_info->region.width == cache_info->columns)) || ((nexus_info->region.height == 1) && (x < (ssize_t) cache_info->columns)))) { MagickOffsetType offset; /* Pixels are accessed directly from memory. */ offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; nexus_info->pixels=cache_info->pixels+offset; nexus_info->indexes=(IndexPacket *) NULL; if (cache_info->active_index_channel != MagickFalse) nexus_info->indexes=cache_info->indexes+offset; PrefetchPixelCacheNexusPixels(nexus_info,mode); nexus_info->authentic_pixel_cache=IsAuthenticPixelCache(cache_info, nexus_info); return(nexus_info->pixels); } } /* Pixels are stored in a staging region until they are synced to the cache. */ number_pixels=(MagickSizeType) nexus_info->region.width* nexus_info->region.height; length=number_pixels*sizeof(PixelPacket); if (cache_info->active_index_channel != MagickFalse) length+=number_pixels*sizeof(IndexPacket); if (nexus_info->cache == (PixelPacket *) NULL) { nexus_info->length=length; status=AcquireCacheNexusPixels(cache_info,nexus_info,exception); if (status == MagickFalse) { nexus_info->length=0; return((PixelPacket *) NULL); } } else if (nexus_info->length < length) { RelinquishCacheNexusPixels(nexus_info); nexus_info->length=length; status=AcquireCacheNexusPixels(cache_info,nexus_info,exception); if (status == MagickFalse) { nexus_info->length=0; return((PixelPacket *) NULL); } } nexus_info->pixels=nexus_info->cache; nexus_info->indexes=(IndexPacket *) NULL; if (cache_info->active_index_channel != MagickFalse) nexus_info->indexes=(IndexPacket *) (nexus_info->pixels+number_pixels); PrefetchPixelCacheNexusPixels(nexus_info,mode); nexus_info->authentic_pixel_cache=IsAuthenticPixelCache(cache_info, nexus_info); return(nexus_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t P i x e l C a c h e V i r t u a l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelCacheVirtualMethod() sets the "virtual pixels" method for the % pixel cache and returns the previous setting. A virtual pixel is any pixel % access that is outside the boundaries of the image cache. % % The format of the SetPixelCacheVirtualMethod() method is: % % VirtualPixelMethod SetPixelCacheVirtualMethod(const Image *image, % const VirtualPixelMethod virtual_pixel_method) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: choose the type of virtual pixel. % */ static MagickBooleanType SetCacheAlphaChannel(Image *image, const Quantum opacity) { CacheInfo *magick_restrict cache_info; CacheView *magick_restrict image_view; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); image->matte=MagickTrue; status=MagickTrue; image_view=AcquireVirtualCacheView(image,&image->exception); /* must be virtual */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, &image->exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { q->opacity=opacity; q++; } status=SyncCacheViewAuthenticPixels(image_view,&image->exception); } image_view=DestroyCacheView(image_view); return(status); } MagickExport VirtualPixelMethod SetPixelCacheVirtualMethod(const Image *image, const VirtualPixelMethod virtual_pixel_method) { CacheInfo *magick_restrict cache_info; VirtualPixelMethod method; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); method=cache_info->virtual_pixel_method; cache_info->virtual_pixel_method=virtual_pixel_method; if ((image->columns != 0) && (image->rows != 0)) switch (virtual_pixel_method) { case BackgroundVirtualPixelMethod: { if ((image->background_color.opacity != OpaqueOpacity) && (image->matte == MagickFalse)) (void) SetCacheAlphaChannel((Image *) image,OpaqueOpacity); if ((IsPixelGray(&image->background_color) == MagickFalse) && (IsGrayColorspace(image->colorspace) != MagickFalse)) (void) SetImageColorspace((Image *) image,sRGBColorspace); break; } case TransparentVirtualPixelMethod: { if (image->matte == MagickFalse) (void) SetCacheAlphaChannel((Image *) image,OpaqueOpacity); break; } default: break; } return(method); } #if defined(MAGICKCORE_OPENCL_SUPPORT) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c A u t h e n t i c O p e n C L B u f f e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticOpenCLBuffer() ensures all the OpenCL operations have been % completed and updates the host memory. % % The format of the SyncAuthenticOpenCLBuffer() method is: % % void SyncAuthenticOpenCLBuffer(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static void CopyOpenCLBuffer(CacheInfo *magick_restrict cache_info) { MagickCLEnv clEnv; assert(cache_info != (CacheInfo *)NULL); if ((cache_info->type != MemoryCache) || (cache_info->opencl == (OpenCLCacheInfo *)NULL)) return; /* Ensure single threaded access to OpenCL environment. */ LockSemaphoreInfo(cache_info->semaphore); if (cache_info->opencl != (OpenCLCacheInfo *)NULL) { cl_event *events; cl_uint event_count; clEnv=GetDefaultOpenCLEnv(); events=CopyOpenCLEvents(cache_info->opencl,&event_count); if (events != (cl_event *) NULL) { cl_command_queue queue; cl_context context; cl_int status; PixelPacket *pixels; context=GetOpenCLContext(clEnv); queue=AcquireOpenCLCommandQueue(clEnv); pixels=(PixelPacket *) clEnv->library->clEnqueueMapBuffer(queue, cache_info->opencl->buffer,CL_TRUE, CL_MAP_READ | CL_MAP_WRITE,0, cache_info->length,event_count,events,NULL,&status); assert(pixels == cache_info->pixels); events=(cl_event *) RelinquishMagickMemory(events); RelinquishOpenCLCommandQueue(clEnv,queue); } cache_info->opencl=RelinquishOpenCLCacheInfo(clEnv,cache_info->opencl); } UnlockSemaphoreInfo(cache_info->semaphore); } MagickPrivate void SyncAuthenticOpenCLBuffer(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (Image *)NULL); cache_info = (CacheInfo *)image->cache; CopyOpenCLBuffer(cache_info); } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c A u t h e n t i c P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticPixelCacheNexus() saves the authentic image pixels to the % in-memory or disk cache. The method returns MagickTrue if the pixel region % is synced, otherwise MagickFalse. % % The format of the SyncAuthenticPixelCacheNexus() method is: % % MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o nexus_info: the cache nexus to sync. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image, NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickBooleanType status; /* Transfer pixels to the cache. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->cache == (Cache) NULL) ThrowBinaryException(CacheError,"PixelCacheIsNotOpen",image->filename); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->type == UndefinedCache) return(MagickFalse); if ((image->storage_class == DirectClass) && (image->clip_mask != (Image *) NULL) && (ClipPixelCacheNexus(image,nexus_info,exception) == MagickFalse)) return(MagickFalse); if ((image->storage_class == DirectClass) && (image->mask != (Image *) NULL) && (MaskPixelCacheNexus(image,nexus_info,exception) == MagickFalse)) return(MagickFalse); if (nexus_info->authentic_pixel_cache != MagickFalse) { image->taint=MagickTrue; return(MagickTrue); } assert(cache_info->signature == MagickCoreSignature); status=WritePixelCachePixels(cache_info,nexus_info,exception); if ((cache_info->active_index_channel != MagickFalse) && (WritePixelCacheIndexes(cache_info,nexus_info,exception) == MagickFalse)) return(MagickFalse); if (status != MagickFalse) image->taint=MagickTrue; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c A u t h e n t i c P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticPixelsCache() saves the authentic image pixels to the in-memory % or disk cache. The method returns MagickTrue if the pixel region is synced, % otherwise MagickFalse. % % The format of the SyncAuthenticPixelsCache() method is: % % MagickBooleanType SyncAuthenticPixelsCache(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType SyncAuthenticPixelsCache(Image *image, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id], exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticPixels() saves the image pixels to the in-memory or disk cache. % The method returns MagickTrue if the pixel region is flushed, otherwise % MagickFalse. % % The format of the SyncAuthenticPixels() method is: % % MagickBooleanType SyncAuthenticPixels(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SyncAuthenticPixels(Image *image, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.sync_authentic_pixels_handler != (SyncAuthenticPixelsHandler) NULL) return(cache_info->methods.sync_authentic_pixels_handler(image,exception)); assert(id < (int) cache_info->number_threads); status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id], exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c I m a g e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImagePixelCache() saves the image pixels to the in-memory or disk cache. % The method returns MagickTrue if the pixel region is flushed, otherwise % MagickFalse. % % The format of the SyncImagePixelCache() method is: % % MagickBooleanType SyncImagePixelCache(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate MagickBooleanType SyncImagePixelCache(Image *image, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(exception != (ExceptionInfo *) NULL); cache_info=(CacheInfo *) GetImagePixelCache(image,MagickTrue,exception); return(cache_info == (CacheInfo *) NULL ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + W r i t e P i x e l C a c h e I n d e x e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePixelCacheIndexes() writes the colormap indexes to the specified % region of the pixel cache. % % The format of the WritePixelCacheIndexes() method is: % % MagickBooleanType WritePixelCacheIndexes(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to write the colormap indexes. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType WritePixelCacheIndexes(CacheInfo *cache_info, NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; register const IndexPacket *magick_restrict p; register ssize_t y; size_t rows; if (cache_info->active_index_channel == MagickFalse) return(MagickFalse); if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) nexus_info->region.width*sizeof(IndexPacket); rows=nexus_info->region.height; extent=(MagickSizeType) length*rows; p=nexus_info->indexes; y=0; switch (cache_info->type) { case MemoryCache: case MapCache: { register IndexPacket *magick_restrict q; /* Write indexes to memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } q=cache_info->indexes+offset; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=nexus_info->region.width; q+=cache_info->columns; } break; } case DiskCache: { /* Write indexes to disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } extent=(MagickSizeType) cache_info->columns*cache_info->rows; for (y=0; y < (ssize_t) rows; y++) { count=WritePixelCacheRegion(cache_info,cache_info->offset+extent* sizeof(PixelPacket)+offset*sizeof(*p),length,(const unsigned char *) p); if (count < (MagickOffsetType) length) break; p+=nexus_info->region.width; offset+=cache_info->columns; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Write indexes to distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=WriteDistributePixelCacheIndexes((DistributeCacheInfo *) cache_info->server_info,&region,length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToWritePixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + W r i t e P i x e l C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePixelCachePixels() writes image pixels to the specified region of the % pixel cache. % % The format of the WritePixelCachePixels() method is: % % MagickBooleanType WritePixelCachePixels(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to write the pixels. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType WritePixelCachePixels(CacheInfo *cache_info, NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; register const PixelPacket *magick_restrict p; register ssize_t y; size_t rows; if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) nexus_info->region.width*sizeof(PixelPacket); rows=nexus_info->region.height; extent=length*rows; p=nexus_info->pixels; y=0; switch (cache_info->type) { case MemoryCache: case MapCache: { register PixelPacket *magick_restrict q; /* Write pixels to memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } q=cache_info->pixels+offset; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=nexus_info->region.width; q+=cache_info->columns; } break; } case DiskCache: { /* Write pixels to disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=WritePixelCacheRegion(cache_info,cache_info->offset+offset* sizeof(*p),length,(const unsigned char *) p); if (count < (MagickOffsetType) length) break; p+=nexus_info->region.width; offset+=cache_info->columns; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Write pixels to distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=WriteDistributePixelCachePixels((DistributeCacheInfo *) cache_info->server_info,&region,length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToWritePixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); }
transfer.c
#include <math.h> #include <stdlib.h> #include <fastpm/libfastpm.h> #include <fastpm/logging.h> #include "pmpfft.h" void fastpm_apply_smoothing_transfer(PM * pm, FastPMFloat * from, FastPMFloat * to, double sml) { #pragma omp parallel { PMKIter kiter; pm_kiter_init(pm, &kiter); int d; int i; double *kernel[3]; for(d = 0; d < 3; d ++) { kernel[d] = malloc(sizeof(double) * pm->Nmesh[d]); for(i = 0; i < pm->Nmesh[d]; i ++) { double kk = kiter.kk[d][i]; kernel[d][i] = exp(- 0.5 * kk * sml * sml); } } for(; !pm_kiter_stop(&kiter); pm_kiter_next(&kiter)) { int dir; double smth = 1.0; for(dir = 0; dir < 3; dir++) smth *= kernel[dir][kiter.iabs[dir]]; to[kiter.ind + 0] = from[kiter.ind + 0] * smth; to[kiter.ind + 1] = from[kiter.ind + 1] * smth; } for(d = 0; d < 3; d ++) { free(kernel[d]); } } } void fastpm_apply_lowpass_transfer(PM * pm, FastPMFloat * from, FastPMFloat * to, double kth) { double kth2 = kth * kth; #pragma omp parallel { PMKIter kiter; pm_kiter_init(pm, &kiter); for(; !pm_kiter_stop(&kiter); pm_kiter_next(&kiter)) { int dir; double smth = 1.0; double kk = 0; for(dir = 0; dir < 3; dir++) { kk += kiter.kk[dir][kiter.iabs[dir]]; } if(kk < kth2) smth = 1; else smth = 0; to[kiter.ind + 0] = from[kiter.ind + 0] * smth; to[kiter.ind + 1] = from[kiter.ind + 1] * smth; } } } static double sinc_unnormed(double x) { if(x < 1e-5 && x > -1e-5) { double x2 = x * x; return 1.0 - x2 / 6. + x2 * x2 / 120.; } else { return sin(x) / x; } } void fastpm_apply_decic_transfer(PM * pm, FastPMFloat * from, FastPMFloat * to) { #pragma omp parallel { PMKIter kiter; pm_kiter_init(pm, &kiter); int d; int i; double *kernel[3]; for(d = 0; d < 3; d ++) { kernel[d] = malloc(sizeof(double) * pm->Nmesh[d]); for(i = 0; i < pm->Nmesh[d]; i ++) { double w = kiter.k[d][i] * pm->BoxSize[d] / pm->Nmesh[d]; double cic = sinc_unnormed(0.5 * w); /* Watchout: this does divide by sinc, not sinc 2, */ kernel[d][i] = 1.0 / pow(cic, 2); } } for(; !pm_kiter_stop(&kiter); pm_kiter_next(&kiter)) { int dir; double smth = 1.0; for(dir = 0; dir < 3; dir++) smth *= kernel[dir][kiter.iabs[dir]]; /* - i k[d] */ to[kiter.ind + 0] = from[kiter.ind + 0] * smth; to[kiter.ind + 1] = from[kiter.ind + 1] * smth; } for(d = 0; d < 3; d ++) { free(kernel[d]); } } } void fastpm_apply_diff_transfer(PM * pm, FastPMFloat * from, FastPMFloat * to, int dir) { ptrdiff_t * Nmesh = pm_nmesh(pm); #pragma omp parallel { PMKIter kiter; for(pm_kiter_init(pm, &kiter); !pm_kiter_stop(&kiter); pm_kiter_next(&kiter)) { double k_finite = kiter.k_finite[dir][kiter.iabs[dir]]; /* i k[d] */ if( kiter.iabs[0] == (Nmesh[0] - kiter.iabs[0]) % Nmesh[0] && kiter.iabs[1] == (Nmesh[1] - kiter.iabs[1]) % Nmesh[1] && kiter.iabs[2] == (Nmesh[2] - kiter.iabs[2]) % Nmesh[2] ) { /* We are at the nyquist and the diff operator shall be zero; * otherwise the force is not real! */ to[kiter.ind + 0] = 0; to[kiter.ind + 1] = 0; } { FastPMFloat tmp[2]; tmp[0] = - from[kiter.ind + 1] * (k_finite); tmp[1] = from[kiter.ind + 0] * (k_finite); to[kiter.ind + 0] = tmp[0]; to[kiter.ind + 1] = tmp[1]; } } } } void fastpm_apply_laplace_transfer(PM * pm, FastPMFloat * from, FastPMFloat * to, int order) { /* order = 0: 1 / kk gives an IC that agrees with linear theory better * at intermidiate scales (desi-cosmosim email archives). * than * order = 1: 1 / sinc(kk) used in original FastPM; * */ #pragma omp parallel { PMKIter kiter; pm_kiter_init(pm, &kiter); float ** kklist [3] = {kiter.kk, kiter.kk_finite, kiter.kk_finite2}; for(; !pm_kiter_stop(&kiter); pm_kiter_next(&kiter)) { int d; double kk_finite = 0; for(d = 0; d < 3; d++) { kk_finite += kklist[order][d][kiter.iabs[d]]; } ptrdiff_t ind = kiter.ind; /* 1 / k2 */ if(LIKELY(kk_finite != 0)) { to[ind + 0] = from[ind + 0] * (1 / kk_finite); to[ind + 1] = from[ind + 1] * (1 / kk_finite); } else { to[ind + 0] = 0; to[ind + 1] = 0; } } } } void fastpm_apply_any_transfer(PM * pm, FastPMFloat * from, FastPMFloat * to, fastpm_fkfunc func, void * data) { #pragma omp parallel { PMKIter kiter; pm_kiter_init(pm, &kiter); for(; !pm_kiter_stop(&kiter); pm_kiter_next(&kiter)) { int dir; double smth = 1.0; double kk = 0; for(dir = 0; dir < 3; dir++) { kk += kiter.kk[dir][kiter.iabs[dir]]; } double k = sqrt(kk); smth = func(k, data); to[kiter.ind + 0] = from[kiter.ind + 0] * smth; to[kiter.ind + 1] = from[kiter.ind + 1] * smth; } } } void fastpm_apply_multiply_transfer(PM * pm, FastPMFloat * from, FastPMFloat * to, double value) { ptrdiff_t i; #pragma omp parallel for for(i = 0; i < pm_allocsize(pm); i ++) { to[i] = from[i] * value; } } void fastpm_apply_normalize_transfer(PM * pm, FastPMFloat * from, FastPMFloat * to) { double Norm = 0; #pragma omp parallel reduction(+: Norm) { PMKIter kiter; pm_kiter_init(pm, &kiter); for(; !pm_kiter_stop(&kiter); pm_kiter_next(&kiter)) { int dir; double kk = 0; for(dir = 0; dir < 3; dir++) { kk += kiter.kk[dir][kiter.iabs[dir]]; } if(kk == 0) { Norm += from[kiter.ind + 0]; } } } MPI_Allreduce(MPI_IN_PLACE, &Norm, 1, MPI_DOUBLE, MPI_SUM, pm_comm(pm)); if(Norm == 0) { fastpm_raise(-1, "It makes no sense to normalize a field with a mean of zero."); } fastpm_apply_multiply_transfer(pm, from, to, 1 / Norm); } void fastpm_apply_c2r_weight_transfer(PM * pm, FastPMFloat * from, FastPMFloat * to) { ptrdiff_t * Nmesh = pm_nmesh(pm); #pragma omp parallel { PMKIter kiter; pm_kiter_init(pm, &kiter); for(; !pm_kiter_stop(&kiter); pm_kiter_next(&kiter)) { /* usually each mode in the complex array moves its dual mode too, */ double weight = 2.0; /* but if my dual is myself, then I am used once */ if( kiter.iabs[0] == (Nmesh[0] - kiter.iabs[0]) % Nmesh[0] && kiter.iabs[1] == (Nmesh[1] - kiter.iabs[1]) % Nmesh[1] && kiter.iabs[2] == (Nmesh[2] - kiter.iabs[2]) % Nmesh[2] ) { weight = 1.0; /* fastpm_ilog(0, "weight == 1 mode found at %td %td %td\n", kiter.iabs[0], kiter.iabs[1], kiter.iabs[2]); */ } to[kiter.ind + 0] = weight * from[kiter.ind + 0]; to[kiter.ind + 1] = weight * from[kiter.ind + 1]; } } } void fastpm_apply_modify_mode_transfer(PM * pm, FastPMFloat * from, FastPMFloat * to, ptrdiff_t * mode, double value) { fastpm_apply_set_mode_transfer(pm, from, to, mode, value, 0); } /* method == 0 for override, method == 1 for add; * always use get_mode_transfer to check the result -- this doesn't guarentee setting the mode * to the value (because not all modes are free. * */ void fastpm_apply_set_mode_transfer(PM * pm, FastPMFloat * from, FastPMFloat * to, ptrdiff_t * mode, double value, int method) { ptrdiff_t * Nmesh = pm_nmesh(pm); if( mode[0] == (Nmesh[0] - mode[0]) % Nmesh[0] && mode[1] == (Nmesh[1] - mode[1]) % Nmesh[1] && mode[2] == (Nmesh[2] - mode[2]) % Nmesh[2] ) { if(mode[3] == 1) { /* These modes are purely real, thus we can not set the imag part to non-zero */ method = 0; value = 0; } } #pragma omp parallel { PMKIter kiter; pm_kiter_init(pm, &kiter); for(; !pm_kiter_stop(&kiter); pm_kiter_next(&kiter)) { to[kiter.ind + 0] = from[kiter.ind + 0]; to[kiter.ind + 1] = from[kiter.ind + 1]; if(( kiter.iabs[0] == mode[0] && kiter.iabs[1] == mode[1] && kiter.iabs[2] == mode[2] )) { if(method == 0) to[kiter.ind + mode[3]] = value; else to[kiter.ind + mode[3]] += value; } if(( kiter.iabs[0] == (Nmesh[0] - mode[0]) % Nmesh[0] && kiter.iabs[1] == (Nmesh[1] - mode[1]) % Nmesh[1] && kiter.iabs[2] == (Nmesh[2] - mode[2]) % Nmesh[2] )) { /* conjugate plane */ if(method == 0) to[kiter.ind + mode[3]] = value * ((mode[3] == 0)?1:-1); else to[kiter.ind + mode[3]] += value * ((mode[3] == 0)?1:-1); } } } } double fastpm_apply_get_mode_transfer(PM * pm, FastPMFloat * from, ptrdiff_t * mode) { double result = 0.0; #pragma omp parallel { PMKIter kiter; pm_kiter_init(pm, &kiter); for(; !pm_kiter_stop(&kiter); pm_kiter_next(&kiter)) { if(( kiter.iabs[0] == mode[0] && kiter.iabs[1] == mode[1] && kiter.iabs[2] == mode[2] )) { result = from[kiter.ind + mode[3]]; } } } MPI_Allreduce(MPI_IN_PLACE, &result, 1, MPI_DOUBLE, MPI_SUM, pm_comm(pm)); return result; }
par_mgr.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * Two-grid system solver * *****************************************************************************/ #include "_hypre_parcsr_ls.h" #include "par_amg.h" #include "par_mgr.h" #include <assert.h> /* Create */ void * hypre_MGRCreate() { hypre_ParMGRData *mgr_data; mgr_data = hypre_CTAlloc(hypre_ParMGRData, 1, HYPRE_MEMORY_HOST); /* block data */ (mgr_data -> block_size) = 1; (mgr_data -> num_coarse_indexes) = 1; (mgr_data -> block_num_coarse_indexes) = NULL; (mgr_data -> block_cf_marker) = NULL; /* general data */ (mgr_data -> max_num_coarse_levels) = 10; (mgr_data -> A_array) = NULL; (mgr_data -> P_array) = NULL; (mgr_data -> RT_array) = NULL; (mgr_data -> RAP) = NULL; (mgr_data -> CF_marker_array) = NULL; (mgr_data -> coarse_indices_lvls) = NULL; (mgr_data -> F_array) = NULL; (mgr_data -> U_array) = NULL; (mgr_data -> residual) = NULL; (mgr_data -> rel_res_norms) = NULL; (mgr_data -> Vtemp) = NULL; (mgr_data -> Ztemp) = NULL; (mgr_data -> Utemp) = NULL; (mgr_data -> Ftemp) = NULL; (mgr_data -> num_iterations) = 0; (mgr_data -> num_interp_sweeps) = 1; (mgr_data -> num_restrict_sweeps) = 1; (mgr_data -> trunc_factor) = 0.0; (mgr_data -> max_row_sum) = 0.9; (mgr_data -> strong_threshold) = 0.25; (mgr_data -> S_commpkg_switch) = 1.0; (mgr_data -> P_max_elmts) = 0; (mgr_data -> coarse_grid_solver) = NULL; (mgr_data -> coarse_grid_solver_setup) = NULL; (mgr_data -> coarse_grid_solver_solve) = NULL; (mgr_data -> global_smoother) = NULL; (mgr_data -> use_default_cgrid_solver) = 1; (mgr_data -> omega) = 1.; (mgr_data -> max_iter) = 20; (mgr_data -> tol) = 1.0e-7; (mgr_data -> relax_type) = 0; (mgr_data -> relax_order) = 1; (mgr_data -> interp_type) = 2; (mgr_data -> restrict_type) = 0; (mgr_data -> num_relax_sweeps) = 1; (mgr_data -> relax_weight) = 1.0; (mgr_data -> logging) = 0; (mgr_data -> print_level) = 0; (mgr_data -> l1_norms) = NULL; (mgr_data -> reserved_coarse_size) = 0; (mgr_data -> reserved_coarse_indexes) = NULL; (mgr_data -> reserved_Cpoint_local_indexes) = NULL; (mgr_data -> diaginv) = NULL; (mgr_data -> global_smooth_iters) = 1; (mgr_data -> global_smooth_type) = 0; (mgr_data -> set_non_Cpoints_to_F) = 0; (mgr_data -> Frelax_method) = 0; (mgr_data -> FrelaxVcycleData) = NULL; (mgr_data -> max_local_lvls) = 10; (mgr_data -> print_coarse_system) = 0; return (void *) mgr_data; } /*-------------------------------------------------------------------------- *--------------------------------------------------------------------------*/ /* Destroy */ HYPRE_Int hypre_MGRDestroy( void *data ) { hypre_ParMGRData * mgr_data = (hypre_ParMGRData*) data; HYPRE_Int i; HYPRE_Int num_coarse_levels = (mgr_data -> num_coarse_levels); /* block info data */ if ((mgr_data -> block_cf_marker)) { for (i=0; i < (mgr_data -> max_num_coarse_levels); i++) { if ((mgr_data -> block_cf_marker)[i]) { hypre_TFree((mgr_data -> block_cf_marker)[i], HYPRE_MEMORY_HOST); } } hypre_TFree((mgr_data -> block_cf_marker), HYPRE_MEMORY_HOST); (mgr_data -> block_cf_marker) = NULL; } if(mgr_data -> block_num_coarse_indexes) { hypre_TFree(mgr_data -> block_num_coarse_indexes, HYPRE_MEMORY_HOST); (mgr_data -> block_num_coarse_indexes) = NULL; } /* final residual vector */ if((mgr_data -> residual)) { hypre_ParVectorDestroy( (mgr_data -> residual) ); (mgr_data -> residual) = NULL; } if((mgr_data -> rel_res_norms)) { hypre_TFree( (mgr_data -> rel_res_norms) , HYPRE_MEMORY_HOST); (mgr_data -> rel_res_norms) = NULL; } /* temp vectors for solve phase */ if((mgr_data -> Vtemp)) { hypre_ParVectorDestroy( (mgr_data -> Vtemp) ); (mgr_data -> Vtemp) = NULL; } if((mgr_data -> Ztemp)) { hypre_ParVectorDestroy( (mgr_data -> Ztemp) ); (mgr_data -> Ztemp) = NULL; } if((mgr_data -> Utemp)) { hypre_ParVectorDestroy( (mgr_data -> Utemp) ); (mgr_data -> Utemp) = NULL; } if((mgr_data -> Ftemp)) { hypre_ParVectorDestroy( (mgr_data -> Ftemp) ); (mgr_data -> Ftemp) = NULL; } /* coarse grid solver */ if((mgr_data -> use_default_cgrid_solver)) { if((mgr_data -> coarse_grid_solver)) hypre_BoomerAMGDestroy( (mgr_data -> coarse_grid_solver) ); (mgr_data -> coarse_grid_solver) = NULL; } /* l1_norms */ if ((mgr_data -> l1_norms)) { for (i=0; i < (num_coarse_levels); i++) if ((mgr_data -> l1_norms)[i]) hypre_TFree((mgr_data -> l1_norms)[i], HYPRE_MEMORY_HOST); hypre_TFree((mgr_data -> l1_norms), HYPRE_MEMORY_HOST); } /* coarse_indices_lvls */ if ((mgr_data -> coarse_indices_lvls)) { for (i=0; i < (num_coarse_levels); i++) if ((mgr_data -> coarse_indices_lvls)[i]) hypre_TFree((mgr_data -> coarse_indices_lvls)[i], HYPRE_MEMORY_HOST); hypre_TFree((mgr_data -> coarse_indices_lvls), HYPRE_MEMORY_HOST); } /* linear system and cf marker array */ if(mgr_data -> A_array || mgr_data -> P_array || mgr_data -> RT_array || mgr_data -> CF_marker_array) { for (i=1; i < num_coarse_levels+1; i++) { hypre_ParVectorDestroy((mgr_data -> F_array)[i]); hypre_ParVectorDestroy((mgr_data -> U_array)[i]); if ((mgr_data -> P_array)[i-1]) hypre_ParCSRMatrixDestroy((mgr_data -> P_array)[i-1]); if ((mgr_data -> RT_array)[i-1]) hypre_ParCSRMatrixDestroy((mgr_data -> RT_array)[i-1]); hypre_TFree((mgr_data -> CF_marker_array)[i-1], HYPRE_MEMORY_HOST); } for (i=1; i < (num_coarse_levels); i++) { if ((mgr_data -> A_array)[i]) hypre_ParCSRMatrixDestroy((mgr_data -> A_array)[i]); } } if((mgr_data -> F_array)) { hypre_TFree((mgr_data -> F_array), HYPRE_MEMORY_HOST); (mgr_data -> F_array) = NULL; } if((mgr_data -> U_array)) { hypre_TFree((mgr_data -> U_array), HYPRE_MEMORY_HOST); (mgr_data -> U_array) = NULL; } if((mgr_data -> A_array)) { hypre_TFree((mgr_data -> A_array), HYPRE_MEMORY_HOST); (mgr_data -> A_array) = NULL; } if((mgr_data -> P_array)) { hypre_TFree((mgr_data -> P_array), HYPRE_MEMORY_HOST); (mgr_data -> P_array) = NULL; } if((mgr_data -> RT_array)) { hypre_TFree((mgr_data -> RT_array), HYPRE_MEMORY_HOST); (mgr_data -> RT_array) = NULL; } if((mgr_data -> CF_marker_array)) { hypre_TFree((mgr_data -> CF_marker_array), HYPRE_MEMORY_HOST); (mgr_data -> CF_marker_array) = NULL; } if((mgr_data -> reserved_Cpoint_local_indexes)) { hypre_TFree((mgr_data -> reserved_Cpoint_local_indexes), HYPRE_MEMORY_HOST); (mgr_data -> reserved_Cpoint_local_indexes) = NULL; } /* data for V-cycle F-relaxation */ if (mgr_data -> FrelaxVcycleData) { for (i = 0; i < num_coarse_levels; i++) { if ((mgr_data -> FrelaxVcycleData)[i]) { hypre_MGRDestroyFrelaxVcycleData((mgr_data -> FrelaxVcycleData)[i]); (mgr_data -> FrelaxVcycleData)[i] = NULL; } } hypre_TFree(mgr_data -> FrelaxVcycleData, HYPRE_MEMORY_HOST); mgr_data -> FrelaxVcycleData = NULL; } /* data for reserved coarse nodes */ if(mgr_data -> reserved_coarse_indexes) { hypre_TFree(mgr_data -> reserved_coarse_indexes, HYPRE_MEMORY_HOST); (mgr_data -> reserved_coarse_indexes) = NULL; } /* coarse level matrix - RAP */ if ((mgr_data -> RAP)) hypre_ParCSRMatrixDestroy((mgr_data -> RAP)); if ((mgr_data -> diaginv)) hypre_TFree((mgr_data -> diaginv), HYPRE_MEMORY_HOST); /* mgr data */ hypre_TFree(mgr_data, HYPRE_MEMORY_HOST); return hypre_error_flag; } /* Create data for V-cycle F-relaxtion */ void * hypre_MGRCreateFrelaxVcycleData() { hypre_ParAMGData *vdata = hypre_CTAlloc(hypre_ParAMGData, 1, HYPRE_MEMORY_HOST); hypre_ParAMGDataAArray(vdata) = NULL; hypre_ParAMGDataPArray(vdata) = NULL; hypre_ParAMGDataFArray(vdata) = NULL; hypre_ParAMGDataCFMarkerArray(vdata) = NULL; hypre_ParAMGDataVtemp(vdata) = NULL; hypre_ParAMGDataAMat(vdata) = NULL; hypre_ParAMGDataBVec(vdata) = NULL; hypre_ParAMGDataZtemp(vdata) = NULL; hypre_ParAMGDataCommInfo(vdata) = NULL; hypre_ParAMGDataUArray(vdata) = NULL; hypre_ParAMGDataNewComm(vdata) = hypre_MPI_COMM_NULL; hypre_ParAMGDataNumLevels(vdata) = 0; hypre_ParAMGDataMaxLevels(vdata) = 10; return (void *) vdata; } /* Destroy data for V-cycle F-relaxation */ HYPRE_Int hypre_MGRDestroyFrelaxVcycleData( void *data ) { hypre_ParAMGData * vdata = (hypre_ParAMGData*) data; HYPRE_Int i; HYPRE_Int num_levels = hypre_ParAMGDataNumLevels(vdata); MPI_Comm new_comm = hypre_ParAMGDataNewComm(vdata); for (i=1; i < num_levels; i++) { hypre_ParVectorDestroy(hypre_ParAMGDataFArray(vdata)[i]); hypre_ParVectorDestroy(hypre_ParAMGDataUArray(vdata)[i]); if (hypre_ParAMGDataAArray(vdata)[i]) hypre_ParCSRMatrixDestroy(hypre_ParAMGDataAArray(vdata)[i]); if (hypre_ParAMGDataPArray(vdata)[i-1]) hypre_ParCSRMatrixDestroy(hypre_ParAMGDataPArray(vdata)[i-1]); hypre_TFree(hypre_ParAMGDataCFMarkerArray(vdata)[i-1], HYPRE_MEMORY_HOST); } /* see comments in par_coarsen.c regarding special case for CF_marker */ if (num_levels == 1) { hypre_TFree(hypre_ParAMGDataCFMarkerArray(vdata)[0], HYPRE_MEMORY_HOST); } /* Points to vtemp of mgr_data, which is already destroyed */ // hypre_ParVectorDestroy(hypre_ParAMGDataVtemp(vdata)); hypre_TFree(hypre_ParAMGDataFArray(vdata), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParAMGDataUArray(vdata), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParAMGDataAArray(vdata), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParAMGDataPArray(vdata), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParAMGDataCFMarkerArray(vdata), HYPRE_MEMORY_HOST); /* Points to ztemp of mgr_data, which is already destroyed */ /* if (hypre_ParAMGDataZtemp(vdata)) hypre_ParVectorDestroy(hypre_ParAMGDataZtemp(vdata)); */ if (hypre_ParAMGDataAMat(vdata)) hypre_TFree(hypre_ParAMGDataAMat(vdata), HYPRE_MEMORY_HOST); if (hypre_ParAMGDataBVec(vdata)) hypre_TFree(hypre_ParAMGDataBVec(vdata), HYPRE_MEMORY_HOST); if (hypre_ParAMGDataCommInfo(vdata)) hypre_TFree(hypre_ParAMGDataCommInfo(vdata), HYPRE_MEMORY_HOST); if (new_comm != hypre_MPI_COMM_NULL) { hypre_MPI_Comm_free (&new_comm); } hypre_TFree(vdata, HYPRE_MEMORY_HOST); return hypre_error_flag; } /* Set C-point variables for each reduction level */ /* Currently not implemented */ HYPRE_Int hypre_MGRSetReductionLevelCpoints( void *mgr_vdata, HYPRE_Int nlevels, HYPRE_Int *num_coarse_points, HYPRE_Int **level_coarse_indexes) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> num_coarse_levels) = nlevels; (mgr_data -> num_coarse_per_level) = num_coarse_points; (mgr_data -> level_coarse_indexes) = level_coarse_indexes; return hypre_error_flag; } /* Initialize some data */ /* Set whether non-coarse points on each level should be explicitly tagged as F-points */ HYPRE_Int hypre_MGRSetNonCpointsToFpoints( void *mgr_vdata, HYPRE_Int nonCptToFptFlag) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> set_non_Cpoints_to_F) = nonCptToFptFlag; return hypre_error_flag; } /* Initialize/ set block data information */ HYPRE_Int hypre_MGRSetCpointsByBlock( void *mgr_vdata, HYPRE_Int block_size, HYPRE_Int max_num_levels, HYPRE_Int *block_num_coarse_points, HYPRE_Int **block_coarse_indexes) { HYPRE_Int i,j; HYPRE_Int **block_cf_marker = NULL; HYPRE_Int *block_num_coarse_indexes = NULL; hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; /* free block cf_marker data if not previously destroyed */ if((mgr_data -> block_cf_marker) != NULL) { for (i=0; i < (mgr_data -> max_num_coarse_levels); i++) { if ((mgr_data -> block_cf_marker)[i]) { hypre_TFree((mgr_data -> block_cf_marker)[i], HYPRE_MEMORY_HOST); (mgr_data -> block_cf_marker)[i] = NULL; } } hypre_TFree(mgr_data -> block_cf_marker, HYPRE_MEMORY_HOST); (mgr_data -> block_cf_marker) = NULL; } if((mgr_data -> block_num_coarse_indexes)) { hypre_TFree((mgr_data -> block_num_coarse_indexes), HYPRE_MEMORY_HOST); (mgr_data -> block_num_coarse_indexes) = NULL; } /* store block cf_marker */ block_cf_marker = hypre_CTAlloc(HYPRE_Int *, max_num_levels, HYPRE_MEMORY_HOST); for (i = 0; i < max_num_levels; i++) { block_cf_marker[i] = hypre_CTAlloc(HYPRE_Int, block_size, HYPRE_MEMORY_HOST); memset(block_cf_marker[i], FMRK, block_size*sizeof(HYPRE_Int)); } for (i = 0; i < max_num_levels; i++) { for(j=0; j<block_num_coarse_points[i]; j++) { (block_cf_marker[i])[block_coarse_indexes[i][j]] = CMRK; } } /* store block_num_coarse_points */ if(max_num_levels > 0) { block_num_coarse_indexes = hypre_CTAlloc(HYPRE_Int, max_num_levels, HYPRE_MEMORY_HOST); for(i=0; i<max_num_levels; i++) block_num_coarse_indexes[i] = block_num_coarse_points[i]; } /* set block data */ (mgr_data -> max_num_coarse_levels) = max_num_levels; (mgr_data -> block_size) = block_size; (mgr_data -> block_num_coarse_indexes) = block_num_coarse_indexes; (mgr_data -> block_cf_marker) = block_cf_marker; return hypre_error_flag; } /*Set number of points that remain part of the coarse grid throughout the hierarchy */ HYPRE_Int hypre_MGRSetReservedCoarseNodes(void *mgr_vdata, HYPRE_Int reserved_coarse_size, HYPRE_Int *reserved_cpt_index) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_BigInt *reserved_coarse_indexes = NULL; HYPRE_Int i; if (!mgr_data) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Warning! MGR object empty!\n"); return hypre_error_flag; } if(reserved_coarse_size < 0) { hypre_error_in_arg(2); return hypre_error_flag; } /* free data not previously destroyed */ if((mgr_data -> reserved_coarse_indexes)) { hypre_TFree((mgr_data -> reserved_coarse_indexes), HYPRE_MEMORY_HOST); (mgr_data -> reserved_coarse_indexes) = NULL; } /* set reserved coarse nodes */ if(reserved_coarse_size > 0) { reserved_coarse_indexes = hypre_CTAlloc(HYPRE_BigInt, reserved_coarse_size, HYPRE_MEMORY_HOST); for(i=0; i<reserved_coarse_size; i++) reserved_coarse_indexes[i] = reserved_cpt_index[i]; } (mgr_data -> reserved_coarse_size) = reserved_coarse_size; (mgr_data -> reserved_coarse_indexes) = reserved_coarse_indexes; return hypre_error_flag; } /* Set CF marker array */ HYPRE_Int hypre_MGRCoarsen(hypre_ParCSRMatrix *S, hypre_ParCSRMatrix *A, HYPRE_Int fixed_coarse_size, HYPRE_Int *fixed_coarse_indexes, HYPRE_Int debug_flag, HYPRE_Int **CF_marker, HYPRE_Int cflag) { HYPRE_Int *cf_marker, i, row, nc; HYPRE_Int *cindexes = fixed_coarse_indexes; HYPRE_Int nloc = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); /* If this is the last level, coarsen onto fixed coarse set */ if(cflag) { if(*CF_marker != NULL) { hypre_TFree(*CF_marker, HYPRE_MEMORY_HOST); } cf_marker = hypre_CTAlloc(HYPRE_Int, nloc, HYPRE_MEMORY_HOST); memset(cf_marker, FMRK, nloc*sizeof(HYPRE_Int)); /* first mark fixed coarse set */ nc = fixed_coarse_size; for(i = 0; i < nc; i++) { cf_marker[cindexes[i]] = CMRK; } } else { /* First coarsen to get initial CF splitting. * This is then followed by updating the CF marker to pass * coarse information to the next levels. NOTE: It may be * convenient to implement this way (allows the use of multiple * coarsening strategies without changing too much code), * but not necessarily the best option, compared to initializing * CF_marker first and then coarsening on subgraph which excludes * the initialized coarse nodes. */ hypre_BoomerAMGCoarsen(S, A, 0, debug_flag, &cf_marker); /* Update CF_marker to correct Cpoints marked as Fpoints. */ nc = fixed_coarse_size; for(i = 0; i < nc; i++) { cf_marker[cindexes[i]] = CMRK; } /* set F-points to FMRK. This is necessary since the different coarsening schemes differentiate * between type of F-points (example Ruge coarsening). We do not need that distinction here. */ for (row = 0; row <nloc; row++) { if(cf_marker[row] == CMRK) continue; cf_marker[row] = FMRK; } #if 0 /* IMPORTANT: Update coarse_indexes array to define the positions of the fixed coarse points * in the next level. */ nc = 0; index_i = 0; for (row = 0; row <nloc; row++) { /* loop through new c-points */ if(cf_marker[row] == CMRK) nc++; else if(cf_marker[row] == S_CMRK) { /* previously marked c-point is part of fixed coarse set. Track its current local index */ cindexes[index_i++] = nc; /* reset c-point from S_CMRK to CMRK */ cf_marker[row] = CMRK; nc++; } /* set F-points to FMRK. This is necessary since the different coarsening schemes differentiate * between type of F-points (example Ruge coarsening). We do not need that distinction here. */ else { cf_marker[row] = FMRK; } } /* check if this should be last level */ if( nc == fixed_coarse_size) last_level = 1; //printf(" nc = %d and fixed coarse size = %d \n", nc, fixed_coarse_size); #endif } /* set CF_marker */ *CF_marker = cf_marker; return hypre_error_flag; } /* Interpolation for MGR - Adapted from BoomerAMGBuildInterp */ HYPRE_Int hypre_MGRBuildP( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_BigInt *num_cpts_global, HYPRE_Int method, HYPRE_Int debug_flag, hypre_ParCSRMatrix **P_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Real *a_diag; hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; HYPRE_Int *tmp_map_offd = NULL; HYPRE_Int *CF_marker_offd = NULL; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size, P_offd_size; HYPRE_Int *P_marker, *P_marker_offd; HYPRE_Int jj_counter,jj_counter_offd; HYPRE_Int *jj_count, *jj_count_offd; // HYPRE_Int jj_begin_row,jj_begin_row_offd; // HYPRE_Int jj_end_row,jj_end_row_offd; HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int *fine_to_coarse; //HYPRE_BigInt *fine_to_coarse_offd; HYPRE_Int *coarse_counter; HYPRE_Int coarse_shift; HYPRE_BigInt total_global_cpts; //HYPRE_BigInt my_first_cpt; HYPRE_Int num_cols_P_offd; HYPRE_Int i,i1; HYPRE_Int j,jl,jj; HYPRE_Int start; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; HYPRE_Int num_threads; HYPRE_Int num_sends; HYPRE_Int index; HYPRE_Int ns, ne, size, rest; HYPRE_Int *int_buf_data; HYPRE_Real wall_time; /* for debugging instrumentation */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); num_threads = hypre_NumThreads(); #ifdef HYPRE_NO_GLOBAL_PARTITION //my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); #else //my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; #endif /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (debug_flag < 0) { debug_flag = -debug_flag; } if (debug_flag==4) wall_time = time_getWallclockSeconds(); if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1; jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ /* RDF: this looks a little tricky, but doable */ #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE #endif #endif for (j = 0; j < num_threads; j++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { jj_count[j]++; fine_to_coarse[i] = coarse_counter[j]; coarse_counter[j]++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is the approximation of A_{ff}^{-1}A_{fc} *--------------------------------------------------------------------*/ else { for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; if (CF_marker[i1] >= 0) { jj_count[j]++; } } if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; if (CF_marker_offd[i1] >= 0) { jj_count_offd[j]++; } } } } } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ for (i=0; i < num_threads-1; i++) { coarse_counter[i+1] += coarse_counter[i]; jj_count[i+1] += jj_count[i]; jj_count_offd[i+1] += jj_count_offd[i]; } i = num_threads-1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_SHARED); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_SHARED); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_SHARED); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_SHARED); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_SHARED); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_SHARED); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Internal work 1 = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); //fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE #endif #endif for (j = 0; j < num_threads; j++) { coarse_shift = 0; if (j > 0) coarse_shift = coarse_counter[j-1]; size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) { fine_to_coarse[i] += coarse_shift; } } /* index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) big_buf_data[index++] = fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]+ my_first_cpt; } comm_handle = hypre_ParCSRCommHandleCreate( 21, comm_pkg, big_buf_data, fine_to_coarse_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n", my_id, wall_time); fflush(NULL); } */ if (debug_flag==4) wall_time = time_getWallclockSeconds(); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif //for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt; /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ a_diag = hypre_CTAlloc(HYPRE_Real, n_fine, HYPRE_MEMORY_HOST); for (i = 0; i < n_fine; i++) { for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; if ( i==i1 ) /* diagonal of A only */ { a_diag[i] = 1.0/A_diag_data[jj]; } } } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,P_marker,P_marker_offd,jj_counter,jj_counter_offd,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE #endif #endif for (jl = 0; jl < num_threads; jl++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (jl < rest) { ns = jl*size+jl; ne = (jl+1)*size+jl+1; } else { ns = jl*size+rest; ne = (jl+1)*size+rest; } jj_counter = 0; if (jl > 0) jj_counter = jj_count[jl-1]; jj_counter_offd = 0; if (jl > 0) jj_counter_offd = jj_count_offd[jl-1]; P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); if (num_cols_A_offd) P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); else P_marker_offd = NULL; for (i = 0; i < n_fine; i++) { P_marker[i] = -1; } for (i = 0; i < num_cols_A_offd; i++) { P_marker_offd[i] = -1; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { /* Diagonal part of P */ P_diag_i[i] = jj_counter; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] >= 0) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; if(method == 0) { P_diag_data[jj_counter] = 0.0; } else if (method == 1) { P_diag_data[jj_counter] = - A_diag_data[jj]; } else if (method == 2) { P_diag_data[jj_counter] = - A_diag_data[jj]*a_diag[i]; } jj_counter++; } } /* Off-Diagonal part of P */ P_offd_i[i] = jj_counter_offd; if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; /*----------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_offd_j * and initialize interpolation weight to zero. *-----------------------------------------------------------*/ if (CF_marker_offd[i1] >= 0) { P_marker_offd[i1] = jj_counter_offd; /*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/ P_offd_j[jj_counter_offd] = i1; if(method == 0) { P_offd_data[jj_counter_offd] = 0.0; } else if (method == 1) { P_offd_data[jj_counter_offd] = - A_offd_data[jj]; } else if (method == 2) { P_offd_data[jj_counter_offd] = - A_offd_data[jj]*a_diag[i]; } jj_counter_offd++; } } } } P_offd_i[i+1] = jj_counter_offd; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); } hypre_TFree(a_diag, HYPRE_MEMORY_HOST); P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; num_cols_P_offd = 0; if (P_offd_size) { P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i=0; i < num_cols_A_offd; i++) P_marker[i] = 0; num_cols_P_offd = 0; for (i=0; i < P_offd_size; i++) { index = P_offd_j[i]; if (!P_marker[index]) { num_cols_P_offd++; P_marker[index] = 1; } } col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); index = 0; for (i=0; i < num_cols_P_offd; i++) { while (P_marker[index]==0) index++; tmp_map_offd[i] = index++; } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i=0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(tmp_map_offd, P_offd_j[i], num_cols_P_offd); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } for (i=0; i < n_fine; i++) if (CF_marker[i] == -3) CF_marker[i] = -1; if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_GetCommPkgRTFromCommPkgA(P,A, fine_to_coarse, tmp_map_offd); *P_ptr = P; hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); //hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(jj_count, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); return(0); } /* Interpolation for MGR - Dynamic Row Sum method */ HYPRE_Int hypre_MGRBuildPDRS( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, HYPRE_BigInt *num_cpts_global, HYPRE_Int blk_size, HYPRE_Int reserved_coarse_size, HYPRE_Int debug_flag, hypre_ParCSRMatrix **P_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Real *a_diag; hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; HYPRE_Int *tmp_map_offd; HYPRE_Int *CF_marker_offd = NULL; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size, P_offd_size; HYPRE_Int *P_marker, *P_marker_offd; HYPRE_Int jj_counter,jj_counter_offd; HYPRE_Int *jj_count, *jj_count_offd; // HYPRE_Int jj_begin_row,jj_begin_row_offd; // HYPRE_Int jj_end_row,jj_end_row_offd; HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int *fine_to_coarse; //HYPRE_Int *fine_to_coarse_offd; HYPRE_Int *coarse_counter; HYPRE_Int coarse_shift; HYPRE_BigInt total_global_cpts; //HYPRE_BigInt my_first_cpt; HYPRE_Int num_cols_P_offd; HYPRE_Int i,i1; HYPRE_Int j,jl,jj; HYPRE_Int start; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; HYPRE_Int num_threads; HYPRE_Int num_sends; HYPRE_Int index; HYPRE_Int ns, ne, size, rest; HYPRE_Int *int_buf_data; HYPRE_Real wall_time; /* for debugging instrumentation */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); num_threads = hypre_NumThreads(); #ifdef HYPRE_NO_GLOBAL_PARTITION //my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); #else //my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; #endif /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (debug_flag < 0) { debug_flag = -debug_flag; } if (debug_flag==4) wall_time = time_getWallclockSeconds(); if (num_cols_A_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1; jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ /* RDF: this looks a little tricky, but doable */ #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE #endif #endif for (j = 0; j < num_threads; j++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { jj_count[j]++; fine_to_coarse[i] = coarse_counter[j]; coarse_counter[j]++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is the approximation of A_{ff}^{-1}A_{fc} *--------------------------------------------------------------------*/ else { for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; if (CF_marker[i1] >= 0) { jj_count[j]++; } } if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; if (CF_marker_offd[i1] >= 0) { jj_count_offd[j]++; } } } } /*-------------------------------------------------------------------- * Set up the indexes for the DRS method *--------------------------------------------------------------------*/ } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ for (i=0; i < num_threads-1; i++) { coarse_counter[i+1] += coarse_counter[i]; jj_count[i+1] += jj_count[i]; jj_count_offd[i+1] += jj_count_offd[i]; } i = num_threads-1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Internal work 1 = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); //fine_to_coarse_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE #endif #endif for (j = 0; j < num_threads; j++) { coarse_shift = 0; if (j > 0) coarse_shift = coarse_counter[j-1]; size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) fine_to_coarse[i] += coarse_shift; } /*index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, fine_to_coarse_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n", my_id, wall_time); fflush(NULL); }*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif //for (i = 0; i < n_fine; i++) fine_to_coarse[i] -= my_first_cpt; /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ a_diag = hypre_CTAlloc(HYPRE_Real, n_fine, HYPRE_MEMORY_HOST); for (i = 0; i < n_fine; i++) { for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; if ( i==i1 ) /* diagonal of A only */ { a_diag[i] = 1.0/A_diag_data[jj]; } } } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,P_marker,P_marker_offd,jj_counter,jj_counter_offd,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE #endif #endif for (jl = 0; jl < num_threads; jl++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (jl < rest) { ns = jl*size+jl; ne = (jl+1)*size+jl+1; } else { ns = jl*size+rest; ne = (jl+1)*size+rest; } jj_counter = 0; if (jl > 0) jj_counter = jj_count[jl-1]; jj_counter_offd = 0; if (jl > 0) jj_counter_offd = jj_count_offd[jl-1]; P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); if (num_cols_A_offd) P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); else P_marker_offd = NULL; for (i = 0; i < n_fine; i++) { P_marker[i] = -1; } for (i = 0; i < num_cols_A_offd; i++) { P_marker_offd[i] = -1; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { /* Diagonal part of P */ P_diag_i[i] = jj_counter; for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++) { i1 = A_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] >= 0) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; P_diag_data[jj_counter] = - A_diag_data[jj]*a_diag[i]; jj_counter++; } } /* Off-Diagonal part of P */ P_offd_i[i] = jj_counter_offd; if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; /*----------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_offd_j * and initialize interpolation weight to zero. *-----------------------------------------------------------*/ if (CF_marker_offd[i1] >= 0) { P_marker_offd[i1] = jj_counter_offd; /*P_offd_j[jj_counter_offd] = fine_to_coarse_offd[i1];*/ P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = - A_offd_data[jj]*a_diag[i]; jj_counter_offd++; } } } } P_offd_i[i+1] = jj_counter_offd; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); } hypre_TFree(a_diag, HYPRE_MEMORY_HOST); P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; num_cols_P_offd = 0; if (P_offd_size) { P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i=0; i < num_cols_A_offd; i++) P_marker[i] = 0; num_cols_P_offd = 0; for (i=0; i < P_offd_size; i++) { index = P_offd_j[i]; if (!P_marker[index]) { num_cols_P_offd++; P_marker[index] = 1; } } tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); index = 0; for (i=0; i < num_cols_P_offd; i++) { while (P_marker[index]==0) index++; tmp_map_offd[i] = index++; } #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i=0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(tmp_map_offd, P_offd_j[i], num_cols_P_offd); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } for (i=0; i < n_fine; i++) if (CF_marker[i] == -3) CF_marker[i] = -1; if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_GetCommPkgRTFromCommPkgA(P,A, fine_to_coarse, tmp_map_offd); *P_ptr = P; hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); // hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(jj_count, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); return(0); } /* Setup interpolation operator */ HYPRE_Int hypre_MGRBuildInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, HYPRE_Int *col_offd_S_to_A, hypre_ParCSRMatrix **P, HYPRE_Int last_level, HYPRE_Int method, HYPRE_Int numsweeps) { // HYPRE_Int i; hypre_ParCSRMatrix *P_ptr = NULL; // HYPRE_Real jac_trunc_threshold = trunc_factor; // HYPRE_Real jac_trunc_threshold_minus = 0.5*jac_trunc_threshold; /* Build interpolation operator using (hypre default) */ if(!last_level) { hypre_MGRBuildP( A,CF_marker,num_cpts_global,2,debug_flag,&P_ptr); } /* Do Jacobi interpolation for last level */ else { if (method <3) { hypre_MGRBuildP( A,CF_marker,num_cpts_global,method,debug_flag,&P_ptr); /* Could do a few sweeps of Jacobi to further improve P */ //for(i=0; i<numsweeps; i++) // hypre_BoomerAMGJacobiInterp(A, &P_ptr, S,1, NULL, CF_marker, 0, jac_trunc_threshold, jac_trunc_threshold_minus ); } else { /* Classical modified interpolation */ hypre_BoomerAMGBuildInterp(A, CF_marker, S, num_cpts_global,1, NULL,debug_flag, trunc_factor, max_elmts, col_offd_S_to_A, &P_ptr); /* Do k steps of Jacobi build W for P = [-W I]. * Note that BoomerAMGJacobiInterp assumes you have some initial P, * hence we need to initialize P as above, before calling this routine. * If numsweeps = 0, the following step is skipped and P is returned as is. * Looping here is equivalent to improving P by Jacobi interpolation */ // for(i=0; i<numsweeps; i++) // hypre_BoomerAMGJacobiInterp(A, &P_ptr, S,1, NULL, CF_marker, // 0, jac_trunc_threshold, // jac_trunc_threshold_minus ); } } /* set pointer to P */ *P = P_ptr; return hypre_error_flag; } void hypre_blas_smat_inv_n4 (HYPRE_Real *a) { const HYPRE_Real a11 = a[0], a12 = a[1], a13 = a[2], a14 = a[3]; const HYPRE_Real a21 = a[4], a22 = a[5], a23 = a[6], a24 = a[7]; const HYPRE_Real a31 = a[8], a32 = a[9], a33 = a[10], a34 = a[11]; const HYPRE_Real a41 = a[12], a42 = a[13], a43 = a[14], a44 = a[15]; const HYPRE_Real M11 = a22*a33*a44 + a23*a34*a42 + a24*a32*a43 - a22*a34*a43 - a23*a32*a44 - a24*a33*a42; const HYPRE_Real M12 = a12*a34*a43 + a13*a32*a44 + a14*a33*a42 - a12*a33*a44 - a13*a34*a42 - a14*a32*a43; const HYPRE_Real M13 = a12*a23*a44 + a13*a24*a42 + a14*a22*a43 - a12*a24*a43 - a13*a22*a44 - a14*a23*a42; const HYPRE_Real M14 = a12*a24*a33 + a13*a22*a34 + a14*a23*a32 - a12*a23*a34 - a13*a24*a32 - a14*a22*a33; const HYPRE_Real M21 = a21*a34*a43 + a23*a31*a44 + a24*a33*a41 - a21*a33*a44 - a23*a34*a41 - a24*a31*a43; const HYPRE_Real M22 = a11*a33*a44 + a13*a34*a41 + a14*a31*a43 - a11*a34*a43 - a13*a31*a44 - a14*a33*a41; const HYPRE_Real M23 = a11*a24*a43 + a13*a21*a44 + a14*a23*a41 - a11*a23*a44 - a13*a24*a41 - a14*a21*a43; const HYPRE_Real M24 = a11*a23*a34 + a13*a24*a31 + a14*a21*a33 - a11*a24*a33 - a13*a21*a34 - a14*a23*a31; const HYPRE_Real M31 = a21*a32*a44 + a22*a34*a41 + a24*a31*a42 - a21*a34*a42 - a22*a31*a44 - a24*a32*a41; const HYPRE_Real M32 = a11*a34*a42 + a12*a31*a44 + a14*a32*a41 - a11*a32*a44 - a12*a34*a41 - a14*a31*a42; const HYPRE_Real M33 = a11*a22*a44 + a12*a24*a41 + a14*a21*a42 - a11*a24*a42 - a12*a21*a44 - a14*a22*a41; const HYPRE_Real M34 = a11*a24*a32 + a12*a21*a34 + a14*a22*a31 - a11*a22*a34 - a12*a24*a31 - a14*a21*a32; const HYPRE_Real M41 = a21*a33*a42 + a22*a31*a43 + a23*a32*a41 - a21*a32*a43 - a22*a33*a41 - a23*a31*a42; const HYPRE_Real M42 = a11*a32*a43 + a12*a33*a41 + a13*a31*a42 - a11*a33*a42 - a12*a31*a43 - a13*a32*a41; const HYPRE_Real M43 = a11*a23*a42 + a12*a21*a43 + a13*a22*a41 - a11*a22*a43 - a12*a23*a41 - a13*a21*a42; const HYPRE_Real M44 = a11*a22*a33 + a12*a23*a31 + a13*a21*a32 - a11*a23*a32 - a12*a21*a33 - a13*a22*a31; const HYPRE_Real det = a11*M11 + a12*M21 + a13*M31 + a14*M41; HYPRE_Real det_inv; //if ( fabs(det) < 1e-22 ) { /* there should be no print statements that can't be turned off. Is this an error? */ //hypre_fprintf(stderr, "### WARNING: Matrix is nearly singular! det = %e\n", det); /* printf("##----------------------------------------------\n"); printf("## %12.5e %12.5e %12.5e \n", a0, a1, a2); printf("## %12.5e %12.5e %12.5e \n", a3, a4, a5); printf("## %12.5e %12.5e %12.5e \n", a5, a6, a7); printf("##----------------------------------------------\n"); getchar(); */ //} det_inv = 1.0/det; a[0] = M11*det_inv; a[1] = M12*det_inv; a[2] = M13*det_inv; a[3] = M14*det_inv; a[4] = M21*det_inv; a[5] = M22*det_inv; a[6] = M23*det_inv; a[7] = M24*det_inv; a[8] = M31*det_inv; a[9] = M32*det_inv; a[10] = M33*det_inv; a[11] = M34*det_inv; a[12] = M41*det_inv; a[13] = M42*det_inv; a[14] = M43*det_inv; a[15] = M44*det_inv; } void hypre_blas_mat_inv(HYPRE_Real *a, HYPRE_Int n) { HYPRE_Int i,j,k,l,u,kn,in; HYPRE_Real alinv; if (n == 4) { hypre_blas_smat_inv_n4(a); } else { for (k=0; k<n; ++k) { kn = k*n; l = kn+k; //if (fabs(a[l]) < SMALLREAL) { // printf("### WARNING: Diagonal entry is close to zero!"); // printf("### WARNING: diag_%d=%e\n", k, a[l]); // a[l] = SMALLREAL; //} alinv = 1.0/a[l]; a[l] = alinv; for (j=0; j<k; ++j) { u = kn+j; a[u] *= alinv; } for (j=k+1; j<n; ++j) { u = kn+j; a[u] *= alinv; } for (i=0; i<k; ++i) { in = i*n; for (j=0; j<n; ++j) if (j!=k) { u = in+j; a[u] -= a[in+k]*a[kn+j]; } // end if (j!=k) } for (i=k+1; i<n; ++i) { in = i*n; for (j=0; j<n; ++j) if (j!=k) { u = in+j; a[u] -= a[in+k]*a[kn+j]; } // end if (j!=k) } for (i=0; i<k; ++i) { u=i*n+k; a[u] *= -alinv; } for (i=k+1; i<n; ++i) { u=i*n+k; a[u] *= -alinv; } } // end for (k=0; k<n; ++k) }// end if } HYPRE_Int hypre_block_jacobi_scaling(hypre_ParCSRMatrix *A, hypre_ParCSRMatrix **B_ptr, void *mgr_vdata, HYPRE_Int debug_flag) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; HYPRE_Int num_procs, my_id; HYPRE_Int blk_size = (mgr_data -> block_size); HYPRE_Int reserved_coarse_size = (mgr_data -> reserved_coarse_size); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_ParCSRMatrix *B; hypre_CSRMatrix *B_diag; HYPRE_Real *B_diag_data; HYPRE_Int *B_diag_i; HYPRE_Int *B_diag_j; hypre_CSRMatrix *B_offd; HYPRE_Int i,ii; HYPRE_Int j,jj; HYPRE_Int k; HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int n_block, left_size,inv_size; // HYPRE_Real wall_time; /* for debugging instrumentation */ HYPRE_Int bidx,bidxm1,bidxp1; HYPRE_Real * diaginv; const HYPRE_Int nb2 = blk_size*blk_size; HYPRE_Int block_scaling_error = 0; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); // HYPRE_Int num_threads = hypre_NumThreads(); //printf("n = %d\n",n); if (my_id == num_procs) { n_block = (n - reserved_coarse_size) / blk_size; left_size = n - blk_size*n_block; } else { n_block = n / blk_size; left_size = n - blk_size*n_block; } inv_size = nb2*n_block + left_size*left_size; //printf("inv_size = %d\n",inv_size); hypre_blockRelax_setup(A,blk_size,reserved_coarse_size,&(mgr_data -> diaginv)); // if (debug_flag==4) wall_time = time_getWallclockSeconds(); /*----------------------------------------------------------------------- * First Pass: Determine size of B and fill in *-----------------------------------------------------------------------*/ B_diag_i = hypre_CTAlloc(HYPRE_Int, n+1, HYPRE_MEMORY_HOST); B_diag_j = hypre_CTAlloc(HYPRE_Int, inv_size, HYPRE_MEMORY_HOST); B_diag_data = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST); B_diag_i[n] = inv_size; //B_offd_i = hypre_CTAlloc(HYPRE_Int, n+1, HYPRE_MEMORY_HOST); //B_offd_j = hypre_CTAlloc(HYPRE_Int, 1, HYPRE_MEMORY_HOST); //B_offd_data = hypre_CTAlloc(HYPRE_Real, 1, HYPRE_MEMORY_HOST); //B_offd_i[n] = 1; /*----------------------------------------------------------------- * Get all the diagonal sub-blocks *-----------------------------------------------------------------*/ diaginv = hypre_CTAlloc(HYPRE_Real, nb2, HYPRE_MEMORY_HOST); //printf("n_block = %d\n",n_block); for (i = 0;i < n_block; i++) { bidxm1 = i*blk_size; bidxp1 = (i+1)*blk_size; for (k = 0;k < blk_size; k++) { for (j = 0;j < blk_size; j++) { bidx = k*blk_size + j; diaginv[bidx] = 0.0; } for (ii = A_diag_i[bidxm1+k]; ii < A_diag_i[bidxm1+k+1]; ii++) { jj = A_diag_j[ii]; if (jj >= bidxm1 && jj < bidxp1 && fabs(A_diag_data[ii]) > SMALLREAL) { bidx = k*blk_size + jj - bidxm1; //printf("jj = %d,val = %e, bidx = %d\n",jj,A_diag_data[ii],bidx); diaginv[bidx] = A_diag_data[ii]; } } } /* for (k = 0;k < blk_size; k++) */ /* { */ /* for (j = 0;j < blk_size; j++) */ /* { */ /* bidx = k*blk_size + j; */ /* printf("diaginv[%d] = %e\n",bidx,diaginv[bidx]); */ /* } */ /* } */ hypre_blas_mat_inv(diaginv, blk_size); for (k = 0;k < blk_size; k++) { B_diag_i[i*blk_size+k] = i*nb2 + k*blk_size; //B_offd_i[i*nb2+k] = 0; for (j = 0;j < blk_size; j++) { bidx = i*nb2 + k*blk_size + j; B_diag_j[bidx] = i*blk_size + j; B_diag_data[bidx] = diaginv[k*blk_size + j]; } } } //printf("Before create\n"); B = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParCSRMatrixGlobalNumCols(A), hypre_ParCSRMatrixRowStarts(A), hypre_ParCSRMatrixColStarts(A), 0, inv_size, 0); //printf("After create\n"); B_diag = hypre_ParCSRMatrixDiag(B); hypre_CSRMatrixData(B_diag) = B_diag_data; hypre_CSRMatrixI(B_diag) = B_diag_i; hypre_CSRMatrixJ(B_diag) = B_diag_j; B_offd = hypre_ParCSRMatrixOffd(B); hypre_CSRMatrixData(B_offd) = NULL; hypre_CSRMatrixI(B_offd) = NULL; hypre_CSRMatrixJ(B_offd) = NULL; /* hypre_ParCSRMatrixOwnsRowStarts(B) = 0; */ *B_ptr = B; return(block_scaling_error); } HYPRE_Int hypre_block_jacobi (hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u, HYPRE_Real blk_size, HYPRE_Int n_block, HYPRE_Int left_size, HYPRE_Real *diaginv, hypre_ParVector *Vtemp) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd); hypre_Vector *u_local = hypre_ParVectorLocalVector(u); HYPRE_Real *u_data = hypre_VectorData(u_local); hypre_Vector *f_local = hypre_ParVectorLocalVector(f); HYPRE_Real *f_data = hypre_VectorData(f_local); hypre_Vector *Vtemp_local = hypre_ParVectorLocalVector(Vtemp); HYPRE_Real *Vtemp_data = hypre_VectorData(Vtemp_local); HYPRE_Real *Vext_data = NULL; HYPRE_Real *v_buf_data; HYPRE_Int i, j, k; HYPRE_Int ii, jj; HYPRE_Int bidx,bidx1; HYPRE_Int relax_error = 0; HYPRE_Int num_sends; HYPRE_Int index, start; HYPRE_Int num_procs, my_id; HYPRE_Real *res; const HYPRE_Int nb2 = blk_size*blk_size; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); // HYPRE_Int num_threads = hypre_NumThreads(); res = hypre_CTAlloc(HYPRE_Real, blk_size, HYPRE_MEMORY_HOST); if (num_procs > 1) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); v_buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); Vext_data = hypre_CTAlloc(HYPRE_Real, num_cols_offd, HYPRE_MEMORY_HOST); if (num_cols_offd) { A_offd_j = hypre_CSRMatrixJ(A_offd); A_offd_data = hypre_CSRMatrixData(A_offd); } index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) v_buf_data[index++] = u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data, Vext_data); } /*----------------------------------------------------------------- * Copy current approximation into temporary vector. *-----------------------------------------------------------------*/ #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif #endif for (i = 0; i < n; i++) { Vtemp_data[i] = u_data[i]; //printf("u_old[%d] = %e\n",i,Vtemp_data[i]); } if (num_procs > 1) { hypre_ParCSRCommHandleDestroy(comm_handle); comm_handle = NULL; } /*----------------------------------------------------------------- * Relax points block by block *-----------------------------------------------------------------*/ for (i = 0;i < n_block; i++) { for (j = 0;j < blk_size; j++) { bidx = i*blk_size +j; res[j] = f_data[bidx]; for (jj = A_diag_i[bidx]; jj < A_diag_i[bidx+1]; jj++) { ii = A_diag_j[jj]; res[j] -= A_diag_data[jj] * Vtemp_data[ii]; //printf("%d: Au= %e * %e =%e\n",ii,A_diag_data[jj],Vtemp_data[ii], res[j]); } for (jj = A_offd_i[bidx]; jj < A_offd_i[bidx+1]; jj++) { ii = A_offd_j[jj]; res[j] -= A_offd_data[jj] * Vext_data[ii]; } //printf("%d: res = %e\n",bidx,res[j]); } for (j = 0;j < blk_size; j++) { bidx1 = i*blk_size +j; for (k = 0;k < blk_size; k++) { bidx = i*nb2 +j*blk_size+k; u_data[bidx1] += res[k]*diaginv[bidx]; //printf("u[%d] = %e, diaginv[%d] = %e\n",bidx1,u_data[bidx1],bidx,diaginv[bidx]); } //printf("u[%d] = %e\n",bidx1,u_data[bidx1]); } } if (num_procs > 1) { hypre_TFree(Vext_data, HYPRE_MEMORY_HOST); hypre_TFree(v_buf_data, HYPRE_MEMORY_HOST); } hypre_TFree(res, HYPRE_MEMORY_HOST); return(relax_error); } /*Block smoother*/ HYPRE_Int hypre_blockRelax_setup(hypre_ParCSRMatrix *A, HYPRE_Int blk_size, HYPRE_Int reserved_coarse_size, HYPRE_Real **diaginvptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int i, j,k; HYPRE_Int ii, jj; HYPRE_Int bidx,bidxm1,bidxp1; HYPRE_Int num_procs, my_id; const HYPRE_Int nb2 = blk_size*blk_size; HYPRE_Int n_block; HYPRE_Int left_size,inv_size; HYPRE_Real *diaginv = *diaginvptr; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); // HYPRE_Int num_threads = hypre_NumThreads(); if (my_id == num_procs) { n_block = (n - reserved_coarse_size) / blk_size; left_size = n - blk_size*n_block; } else { n_block = n / blk_size; left_size = n - blk_size*n_block; } inv_size = nb2*n_block + left_size*left_size; if (diaginv !=NULL) { hypre_TFree(diaginv, HYPRE_MEMORY_HOST); diaginv = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST); } else { diaginv = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST); } /*----------------------------------------------------------------- * Get all the diagonal sub-blocks *-----------------------------------------------------------------*/ for (i = 0;i < n_block; i++) { bidxm1 = i*blk_size; bidxp1 = (i+1)*blk_size; //printf("bidxm1 = %d,bidxp1 = %d\n",bidxm1,bidxp1); for (k = 0;k < blk_size; k++) { for (j = 0;j < blk_size; j++) { bidx = i*nb2 + k*blk_size + j; diaginv[bidx] = 0.0; } for (ii = A_diag_i[bidxm1+k]; ii < A_diag_i[bidxm1+k+1]; ii++) { jj = A_diag_j[ii]; if (jj >= bidxm1 && jj < bidxp1 && fabs(A_diag_data[ii]) > SMALLREAL) { bidx = i*nb2 + k*blk_size + jj - bidxm1; //printf("jj = %d,val = %e, bidx = %d\n",jj,A_diag_data[ii],bidx); diaginv[bidx] = A_diag_data[ii]; } } } } for (i = 0;i < left_size; i++) { bidxm1 =n_block*nb2 + i*blk_size; bidxp1 =n_block*nb2 + (i+1)*blk_size; for (j = 0;j < left_size; j++) { bidx = n_block*nb2 + i*blk_size +j; diaginv[bidx] = 0.0; } for (ii = A_diag_i[n_block*blk_size + i]; ii < A_diag_i[n_block*blk_size+i+1]; ii++) { jj = A_diag_j[ii]; if (jj > n_block*blk_size) { bidx = n_block*nb2 + i*blk_size + jj - n_block*blk_size; diaginv[bidx] = A_diag_data[ii]; } } } /*----------------------------------------------------------------- * compute the inverses of all the diagonal sub-blocks *-----------------------------------------------------------------*/ if (blk_size > 1) { for (i = 0;i < n_block; i++) { hypre_blas_mat_inv(diaginv+i*nb2, blk_size); } hypre_blas_mat_inv(diaginv+(HYPRE_Int)(blk_size*nb2),left_size); } else { for (i = 0;i < n; i++) { // FIX-ME: zero-diagonal should be tested previously if (fabs(diaginv[i]) < SMALLREAL) diaginv[i] = 0.0; else diaginv[i] = 1.0 / diaginv[i]; } } *diaginvptr = diaginv; return 1; } HYPRE_Int hypre_blockRelax(hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u, HYPRE_Int blk_size, HYPRE_Int reserved_coarse_size, hypre_ParVector *Vtemp, hypre_ParVector *Ztemp) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int i, j,k; HYPRE_Int ii, jj; HYPRE_Int bidx,bidxm1,bidxp1; HYPRE_Int relax_error = 0; HYPRE_Int num_procs, my_id; const HYPRE_Int nb2 = blk_size*blk_size; HYPRE_Int n_block; HYPRE_Int left_size,inv_size; HYPRE_Real *diaginv; hypre_MPI_Comm_size(comm,&num_procs); hypre_MPI_Comm_rank(comm,&my_id); // HYPRE_Int num_threads = hypre_NumThreads(); if (my_id == num_procs) { n_block = (n - reserved_coarse_size) / blk_size; left_size = n - blk_size*n_block; } else { n_block = n / blk_size; left_size = n - blk_size*n_block; } inv_size = nb2*n_block + left_size*left_size; diaginv = hypre_CTAlloc(HYPRE_Real, inv_size, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------- * Get all the diagonal sub-blocks *-----------------------------------------------------------------*/ for (i = 0;i < n_block; i++) { bidxm1 = i*blk_size; bidxp1 = (i+1)*blk_size; //printf("bidxm1 = %d,bidxp1 = %d\n",bidxm1,bidxp1); for (k = 0;k < blk_size; k++) { for (j = 0;j < blk_size; j++) { bidx = i*nb2 + k*blk_size + j; diaginv[bidx] = 0.0; } for (ii = A_diag_i[bidxm1+k]; ii < A_diag_i[bidxm1+k+1]; ii++) { jj = A_diag_j[ii]; if (jj >= bidxm1 && jj < bidxp1 && fabs(A_diag_data[ii]) > SMALLREAL) { bidx = i*nb2 + k*blk_size + jj - bidxm1; //printf("jj = %d,val = %e, bidx = %d\n",jj,A_diag_data[ii],bidx); diaginv[bidx] = A_diag_data[ii]; } } } } for (i = 0;i < left_size; i++) { bidxm1 =n_block*nb2 + i*blk_size; bidxp1 =n_block*nb2 + (i+1)*blk_size; for (j = 0;j < left_size; j++) { bidx = n_block*nb2 + i*blk_size +j; diaginv[bidx] = 0.0; } for (ii = A_diag_i[n_block*blk_size + i]; ii < A_diag_i[n_block*blk_size+i+1]; ii++) { jj = A_diag_j[ii]; if (jj > n_block*blk_size) { bidx = n_block*nb2 + i*blk_size + jj - n_block*blk_size; diaginv[bidx] = A_diag_data[ii]; } } } /* for (i = 0;i < n_block; i++) { for (j = 0;j < blk_size; j++) { for (k = 0;k < blk_size; k ++) { bidx = i*nb2 + j*blk_size + k; printf("%e\t",diaginv[bidx]); } printf("\n"); } printf("\n"); } */ /*----------------------------------------------------------------- * compute the inverses of all the diagonal sub-blocks *-----------------------------------------------------------------*/ if (blk_size > 1) { for (i = 0;i < n_block; i++) { hypre_blas_mat_inv(diaginv+i*nb2, blk_size); } hypre_blas_mat_inv(diaginv+(HYPRE_Int)(blk_size*nb2),left_size); /* for (i = 0;i < n_block; i++) { for (j = 0;j < blk_size; j++) { for (k = 0;k < blk_size; k ++) { bidx = i*nb2 + j*blk_size + k; printf("%e\t",diaginv[bidx]); } printf("\n"); } printf("\n"); } */ } else { for (i = 0;i < n; i++) { // FIX-ME: zero-diagonal should be tested previously if (fabs(diaginv[i]) < SMALLREAL) diaginv[i] = 0.0; else diaginv[i] = 1.0 / diaginv[i]; } } hypre_block_jacobi(A,f,u,blk_size,n_block,left_size,diaginv,Vtemp); /*----------------------------------------------------------------- * Free temperary memeory *-----------------------------------------------------------------*/ hypre_TFree(diaginv, HYPRE_MEMORY_HOST); return(relax_error); } /* set coarse grid solver */ HYPRE_Int hypre_MGRSetCoarseSolver( void *mgr_vdata, HYPRE_Int (*coarse_grid_solver_solve)(void*,void*,void*,void*), HYPRE_Int (*coarse_grid_solver_setup)(void*,void*,void*,void*), void *coarse_grid_solver ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } (mgr_data -> coarse_grid_solver_solve) = coarse_grid_solver_solve; (mgr_data -> coarse_grid_solver_setup) = coarse_grid_solver_setup; (mgr_data -> coarse_grid_solver) = (HYPRE_Solver) coarse_grid_solver; (mgr_data -> use_default_cgrid_solver) = 0; return hypre_error_flag; } /* Set the maximum number of coarse levels. * maxcoarselevs = 1 yields the default 2-grid scheme. */ HYPRE_Int hypre_MGRSetMaxCoarseLevels( void *mgr_vdata, HYPRE_Int maxcoarselevs ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> max_num_coarse_levels) = maxcoarselevs; return hypre_error_flag; } /* Set the system block size */ HYPRE_Int hypre_MGRSetBlockSize( void *mgr_vdata, HYPRE_Int bsize ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> block_size) = bsize; return hypre_error_flag; } /* Set the relaxation type for the fine levels of the reduction. * Currently supports the following flavors of relaxation types * as described in the documentation: * relax_types 0 - 8, 13, 14, 18, 19, 98. * See par_relax.c and par_relax_more.c for more details. * */ HYPRE_Int hypre_MGRSetRelaxType( void *mgr_vdata, HYPRE_Int relax_type ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> relax_type) = relax_type; return hypre_error_flag; } /* Set the number of relaxation sweeps */ HYPRE_Int hypre_MGRSetNumRelaxSweeps( void *mgr_vdata, HYPRE_Int nsweeps ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> num_relax_sweeps) = nsweeps; return hypre_error_flag; } /* Set the F-relaxation strategy: 0=single level, 1=multi level */ HYPRE_Int hypre_MGRSetFRelaxMethod( void *mgr_vdata, HYPRE_Int relax_method ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> Frelax_method) = relax_method; return hypre_error_flag; } /* Set the type of the restriction type * for computing restriction operator */ HYPRE_Int hypre_MGRSetRestrictType( void *mgr_vdata, HYPRE_Int restrict_type) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> restrict_type) = restrict_type; return hypre_error_flag; } /* Set the number of Jacobi interpolation iterations * for computing interpolation operator */ HYPRE_Int hypre_MGRSetNumRestrictSweeps( void *mgr_vdata, HYPRE_Int nsweeps ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> num_restrict_sweeps) = nsweeps; return hypre_error_flag; } /* Set the type of the interpolation * for computing interpolation operator */ HYPRE_Int hypre_MGRSetInterpType( void *mgr_vdata, HYPRE_Int interpType) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> interp_type) = interpType; return hypre_error_flag; } /* Set the number of Jacobi interpolation iterations * for computing interpolation operator */ HYPRE_Int hypre_MGRSetNumInterpSweeps( void *mgr_vdata, HYPRE_Int nsweeps ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> num_interp_sweeps) = nsweeps; return hypre_error_flag; } /* Set print level for mgr solver */ HYPRE_Int hypre_MGRSetPrintLevel( void *mgr_vdata, HYPRE_Int print_level ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> print_level) = print_level; return hypre_error_flag; } /* Set print level for mgr solver */ HYPRE_Int hypre_MGRSetLogging( void *mgr_vdata, HYPRE_Int logging ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> logging) = logging; return hypre_error_flag; } /* Set max number of iterations for mgr solver */ HYPRE_Int hypre_MGRSetMaxIter( void *mgr_vdata, HYPRE_Int max_iter ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> max_iter) = max_iter; return hypre_error_flag; } /* Set convergence tolerance for mgr solver */ HYPRE_Int hypre_MGRSetTol( void *mgr_vdata, HYPRE_Real tol ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> tol) = tol; return hypre_error_flag; } /* Set max number of iterations for mgr solver */ HYPRE_Int hypre_MGRSetMaxGlobalsmoothIters( void *mgr_vdata, HYPRE_Int max_iter ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> global_smooth_iters) = max_iter; return hypre_error_flag; } /* Set max number of iterations for mgr solver */ HYPRE_Int hypre_MGRSetGlobalsmoothType( void *mgr_vdata, HYPRE_Int iter_type ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; (mgr_data -> global_smooth_type) = iter_type; return hypre_error_flag; } /* Get number of iterations for MGR solver */ HYPRE_Int hypre_MGRGetNumIterations( void *mgr_vdata, HYPRE_Int *num_iterations ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } *num_iterations = mgr_data->num_iterations; return hypre_error_flag; } /* Get residual norms for MGR solver */ HYPRE_Int hypre_MGRGetFinalRelativeResidualNorm( void *mgr_vdata, HYPRE_Real *res_norm ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } *res_norm = mgr_data->final_rel_residual_norm; return hypre_error_flag; } HYPRE_Int hypre_MGRBuildAff( MPI_Comm comm, HYPRE_Int local_num_variables, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int *CF_marker, HYPRE_Int **coarse_dof_func_ptr, HYPRE_BigInt **coarse_pnts_global_ptr, hypre_ParCSRMatrix *A, HYPRE_Int debug_flag, hypre_ParCSRMatrix **P_f_ptr, hypre_ParCSRMatrix **A_ff_ptr ) { HYPRE_Int *CF_marker_copy = hypre_CTAlloc(HYPRE_Int, local_num_variables, HYPRE_MEMORY_HOST); HYPRE_Int i; for (i = 0; i < local_num_variables; i++) { CF_marker_copy[i] = -CF_marker[i]; } hypre_BoomerAMGCoarseParms(comm, local_num_variables, 1, NULL, CF_marker_copy, coarse_dof_func_ptr, coarse_pnts_global_ptr); hypre_MGRBuildP(A, CF_marker_copy, (*coarse_pnts_global_ptr), 0, debug_flag, P_f_ptr); hypre_BoomerAMGBuildCoarseOperator(*P_f_ptr, A, *P_f_ptr, A_ff_ptr); hypre_TFree(CF_marker_copy, HYPRE_MEMORY_HOST); return 0; } /* Get pointer to coarse grid matrix for MGR solver */ HYPRE_Int hypre_MGRGetCoarseGridMatrix( void *mgr_vdata, hypre_ParCSRMatrix **RAP ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } if (mgr_data -> RAP == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC," Coarse grid matrix is NULL. Please make sure MGRSetup() is called \n"); return hypre_error_flag; } *RAP = mgr_data->RAP; return hypre_error_flag; } /* Get pointer to coarse grid solution for MGR solver */ HYPRE_Int hypre_MGRGetCoarseGridSolution( void *mgr_vdata, hypre_ParVector **sol ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } if (mgr_data -> U_array == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC," MGR solution array is NULL. Please make sure MGRSetup() and MGRSolve() are called \n"); return hypre_error_flag; } *sol = mgr_data->U_array[mgr_data->num_coarse_levels]; return hypre_error_flag; } /* Get pointer to coarse grid solution for MGR solver */ HYPRE_Int hypre_MGRGetCoarseGridRHS( void *mgr_vdata, hypre_ParVector **rhs ) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; if (!mgr_data) { hypre_error_in_arg(1); return hypre_error_flag; } if (mgr_data -> F_array == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC," MGR RHS array is NULL. Please make sure MGRSetup() and MGRSolve() are called \n"); return hypre_error_flag; } *rhs = mgr_data->F_array[mgr_data->num_coarse_levels]; return hypre_error_flag; } /* Print coarse grid linear system (for debugging)*/ HYPRE_Int hypre_MGRPrintCoarseSystem( void *mgr_vdata, HYPRE_Int print_flag) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; mgr_data->print_coarse_system = print_flag; return hypre_error_flag; } /* Print solver params */ HYPRE_Int hypre_MGRWriteSolverParams(void *mgr_vdata) { hypre_ParMGRData *mgr_data = (hypre_ParMGRData*) mgr_vdata; hypre_printf("MGR Setup parameters: \n"); hypre_printf("Max number of coarse levels: %d\n", (mgr_data -> max_num_coarse_levels)); hypre_printf("Block size: %d\n", (mgr_data -> block_size)); hypre_printf("Number of coarse indexes: %d\n", (mgr_data -> num_coarse_indexes)); hypre_printf("reserved coarse nodes size: %d\n", (mgr_data -> reserved_coarse_size)); hypre_printf("\n MGR Solver Parameters: \n"); hypre_printf("F-relaxation Method: %d\n", (mgr_data -> Frelax_method)); hypre_printf("Relax type: %d\n", (mgr_data -> relax_type)); hypre_printf("Number of relax sweeps: %d\n", (mgr_data -> num_relax_sweeps)); hypre_printf("Interpolation type: %d\n", (mgr_data -> interp_type)); hypre_printf("Number of interpolation sweeps: %d\n", (mgr_data -> num_interp_sweeps)); hypre_printf("Restriction type: %d\n", (mgr_data -> restrict_type)); hypre_printf("Number of restriction sweeps: %d\n", (mgr_data -> num_restrict_sweeps)); hypre_printf("Global smoother type: %d\n", (mgr_data ->global_smooth_type)); hypre_printf("Number of global smoother sweeps: %d\n", (mgr_data ->global_smooth_iters)); hypre_printf("Max number of iterations: %d\n", (mgr_data -> max_iter)); hypre_printf("Stopping tolerance: %e\n", (mgr_data -> tol)); return hypre_error_flag; }
openmp_utils.h
#ifndef OPENMP_UTILS_H #define OPENMP_UTILS_H #include <math.h> template <typename T> static inline void myAtomicVecAdd(typename vec4<T>::Type& a, typename vec4<T>::Type& b) { //#pragma omp critical a += b; /* #pragma omp atomic a.x += b.x; #pragma omp atomic a.y += b.y; #pragma omp atomic a.z += b.z; */ } #endif
GB_binop__iseq_fc64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__iseq_fc64) // A.*B function (eWiseMult): GB (_AemultB_08__iseq_fc64) // A.*B function (eWiseMult): GB (_AemultB_02__iseq_fc64) // A.*B function (eWiseMult): GB (_AemultB_04__iseq_fc64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__iseq_fc64) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__iseq_fc64) // C+=b function (dense accum): GB (_Cdense_accumb__iseq_fc64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__iseq_fc64) // C=scalar+B GB (_bind1st__iseq_fc64) // C=scalar+B' GB (_bind1st_tran__iseq_fc64) // C=A+scalar GB (_bind2nd__iseq_fc64) // C=A'+scalar GB (_bind2nd_tran__iseq_fc64) // C type: GxB_FC64_t // A type: GxB_FC64_t // A pattern? 0 // B type: GxB_FC64_t // B pattern? 0 // BinaryOp: cij = GB_FC64_iseq (aij, bij) #define GB_ATYPE \ GxB_FC64_t #define GB_BTYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ GxB_FC64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ GxB_FC64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GxB_FC64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_FC64_iseq (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISEQ || GxB_NO_FC64 || GxB_NO_ISEQ_FC64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__iseq_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__iseq_fc64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__iseq_fc64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type GxB_FC64_t GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__iseq_fc64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; GxB_FC64_t alpha_scalar ; GxB_FC64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((GxB_FC64_t *) alpha_scalar_in)) ; beta_scalar = (*((GxB_FC64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__iseq_fc64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__iseq_fc64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__iseq_fc64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__iseq_fc64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__iseq_fc64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ; GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; GxB_FC64_t bij = GBX (Bx, p, false) ; Cx [p] = GB_FC64_iseq (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__iseq_fc64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ; GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; GxB_FC64_t aij = GBX (Ax, p, false) ; Cx [p] = GB_FC64_iseq (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_FC64_iseq (x, aij) ; \ } GrB_Info GB (_bind1st_tran__iseq_fc64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_FC64_iseq (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__iseq_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
shear.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % SSSSS H H EEEEE AAA RRRR % % SS H H E A A R R % % SSS HHHHH EEE AAAAA RRRR % % SS H H E A A R R % % SSSSS H H EEEEE A A R R % % % % % % MagickCore Methods to Shear or Rotate an Image by an Arbitrary Angle % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % The XShearImage() and YShearImage() methods are based on the paper "A Fast % Algorithm for General Raster Rotation" by Alan W. Paeth, Graphics % Interface '86 (Vancouver). ShearRotateImage() is adapted from a similar % method based on the Paeth paper written by Michael Halle of the Spatial % Imaging Group, MIT Media Lab. % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache-private.h" #include "MagickCore/channel.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/decorate.h" #include "MagickCore/distort.h" #include "MagickCore/draw.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/matrix.h" #include "MagickCore/memory_.h" #include "MagickCore/list.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/nt-base-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/quantum.h" #include "MagickCore/resource_.h" #include "MagickCore/shear.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/transform.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C r o p T o F i t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CropToFitImage() crops the sheared image as determined by the bounding box % as defined by width and height and shearing angles. % % The format of the CropToFitImage method is: % % MagickBooleanType CropToFitImage(Image **image, % const double x_shear,const double x_shear, % const double width,const double height, % const MagickBooleanType rotate,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o x_shear, y_shear, width, height: Defines a region of the image to crop. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType CropToFitImage(Image **image, const double x_shear,const double y_shear, const double width,const double height, const MagickBooleanType rotate,ExceptionInfo *exception) { Image *crop_image; PointInfo extent[4], min, max; RectangleInfo geometry, page; register ssize_t i; /* Calculate the rotated image size. */ extent[0].x=(double) (-width/2.0); extent[0].y=(double) (-height/2.0); extent[1].x=(double) width/2.0; extent[1].y=(double) (-height/2.0); extent[2].x=(double) (-width/2.0); extent[2].y=(double) height/2.0; extent[3].x=(double) width/2.0; extent[3].y=(double) height/2.0; for (i=0; i < 4; i++) { extent[i].x+=x_shear*extent[i].y; extent[i].y+=y_shear*extent[i].x; if (rotate != MagickFalse) extent[i].x+=x_shear*extent[i].y; extent[i].x+=(double) (*image)->columns/2.0; extent[i].y+=(double) (*image)->rows/2.0; } min=extent[0]; max=extent[0]; for (i=1; i < 4; i++) { if (min.x > extent[i].x) min.x=extent[i].x; if (min.y > extent[i].y) min.y=extent[i].y; if (max.x < extent[i].x) max.x=extent[i].x; if (max.y < extent[i].y) max.y=extent[i].y; } geometry.x=(ssize_t) ceil(min.x-0.5); geometry.y=(ssize_t) ceil(min.y-0.5); geometry.width=(size_t) floor(max.x-min.x+0.5); geometry.height=(size_t) floor(max.y-min.y+0.5); page=(*image)->page; (void) ParseAbsoluteGeometry("0x0+0+0",&(*image)->page); crop_image=CropImage(*image,&geometry,exception); if (crop_image == (Image *) NULL) return(MagickFalse); crop_image->page=page; *image=DestroyImage(*image); *image=crop_image; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s k e w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DeskewImage() removes skew from the image. Skew is an artifact that % occurs in scanned images because of the camera being misaligned, % imperfections in the scanning or surface, or simply because the paper was % not placed completely flat when scanned. % % The result will be auto-croped if the artifact "deskew:auto-crop" is % defined, while the amount the image is to be deskewed, in degrees is also % saved as the artifact "deskew:angle". % % The format of the DeskewImage method is: % % Image *DeskewImage(const Image *image,const double threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: separate background from foreground. % % o exception: return any errors or warnings in this structure. % */ static void RadonProjection(const Image *image,MatrixInfo *source_matrixs, MatrixInfo *destination_matrixs,const ssize_t sign,size_t *projection) { MatrixInfo *swap; register MatrixInfo *p, *q; register ssize_t x; size_t step; p=source_matrixs; q=destination_matrixs; for (step=1; step < GetMatrixColumns(p); step*=2) { for (x=0; x < (ssize_t) GetMatrixColumns(p); x+=2*(ssize_t) step) { register ssize_t i; ssize_t y; unsigned short element, neighbor; for (i=0; i < (ssize_t) step; i++) { for (y=0; y < (ssize_t) (GetMatrixRows(p)-i-1); y++) { if (GetMatrixElement(p,x+i,y,&element) == MagickFalse) continue; if (GetMatrixElement(p,x+i+step,y+i,&neighbor) == MagickFalse) continue; neighbor+=element; if (SetMatrixElement(q,x+2*i,y,&neighbor) == MagickFalse) continue; if (GetMatrixElement(p,x+i+step,y+i+1,&neighbor) == MagickFalse) continue; neighbor+=element; if (SetMatrixElement(q,x+2*i+1,y,&neighbor) == MagickFalse) continue; } for ( ; y < (ssize_t) (GetMatrixRows(p)-i); y++) { if (GetMatrixElement(p,x+i,y,&element) == MagickFalse) continue; if (GetMatrixElement(p,x+i+step,y+i,&neighbor) == MagickFalse) continue; neighbor+=element; if (SetMatrixElement(q,x+2*i,y,&neighbor) == MagickFalse) continue; if (SetMatrixElement(q,x+2*i+1,y,&element) == MagickFalse) continue; } for ( ; y < (ssize_t) GetMatrixRows(p); y++) { if (GetMatrixElement(p,x+i,y,&element) == MagickFalse) continue; if (SetMatrixElement(q,x+2*i,y,&element) == MagickFalse) continue; if (SetMatrixElement(q,x+2*i+1,y,&element) == MagickFalse) continue; } } } swap=p; p=q; q=swap; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,GetMatrixColumns(p),1) #endif for (x=0; x < (ssize_t) GetMatrixColumns(p); x++) { register ssize_t y; size_t sum; sum=0; for (y=0; y < (ssize_t) (GetMatrixRows(p)-1); y++) { ssize_t delta; unsigned short element, neighbor; if (GetMatrixElement(p,x,y,&element) == MagickFalse) continue; if (GetMatrixElement(p,x,y+1,&neighbor) == MagickFalse) continue; delta=(ssize_t) element-(ssize_t) neighbor; sum+=delta*delta; } projection[GetMatrixColumns(p)+sign*x-1]=sum; } } static MagickBooleanType RadonTransform(const Image *image, const double threshold,size_t *projection,ExceptionInfo *exception) { CacheView *image_view; MatrixInfo *destination_matrixs, *source_matrixs; MagickBooleanType status; size_t count, width; ssize_t j, y; unsigned char c; unsigned short bits[256]; for (width=1; width < ((image->columns+7)/8); width<<=1) ; source_matrixs=AcquireMatrixInfo(width,image->rows,sizeof(unsigned short), exception); destination_matrixs=AcquireMatrixInfo(width,image->rows, sizeof(unsigned short),exception); if ((source_matrixs == (MatrixInfo *) NULL) || (destination_matrixs == (MatrixInfo *) NULL)) { if (destination_matrixs != (MatrixInfo *) NULL) destination_matrixs=DestroyMatrixInfo(destination_matrixs); if (source_matrixs != (MatrixInfo *) NULL) source_matrixs=DestroyMatrixInfo(source_matrixs); return(MagickFalse); } if (NullMatrix(source_matrixs) == MagickFalse) { destination_matrixs=DestroyMatrixInfo(destination_matrixs); source_matrixs=DestroyMatrixInfo(source_matrixs); return(MagickFalse); } for (j=0; j < 256; j++) { c=(unsigned char) j; for (count=0; c != 0; c>>=1) count+=c & 0x01; bits[j]=(unsigned short) count; } status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t i, x; size_t bit, byte; unsigned short value; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } bit=0; byte=0; i=(ssize_t) (image->columns+7)/8; for (x=0; x < (ssize_t) image->columns; x++) { byte<<=1; if (((MagickRealType) GetPixelRed(image,p) < threshold) || ((MagickRealType) GetPixelGreen(image,p) < threshold) || ((MagickRealType) GetPixelBlue(image,p) < threshold)) byte|=0x01; bit++; if (bit == 8) { value=bits[byte]; (void) SetMatrixElement(source_matrixs,--i,y,&value); bit=0; byte=0; } p+=GetPixelChannels(image); } if (bit != 0) { byte<<=(8-bit); value=bits[byte]; (void) SetMatrixElement(source_matrixs,--i,y,&value); } } RadonProjection(image,source_matrixs,destination_matrixs,-1,projection); (void) NullMatrix(source_matrixs); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t i, x; size_t bit, byte; unsigned short value; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } bit=0; byte=0; i=0; for (x=0; x < (ssize_t) image->columns; x++) { byte<<=1; if (((MagickRealType) GetPixelRed(image,p) < threshold) || ((MagickRealType) GetPixelGreen(image,p) < threshold) || ((MagickRealType) GetPixelBlue(image,p) < threshold)) byte|=0x01; bit++; if (bit == 8) { value=bits[byte]; (void) SetMatrixElement(source_matrixs,i++,y,&value); bit=0; byte=0; } p+=GetPixelChannels(image); } if (bit != 0) { byte<<=(8-bit); value=bits[byte]; (void) SetMatrixElement(source_matrixs,i++,y,&value); } } RadonProjection(image,source_matrixs,destination_matrixs,1,projection); image_view=DestroyCacheView(image_view); destination_matrixs=DestroyMatrixInfo(destination_matrixs); source_matrixs=DestroyMatrixInfo(source_matrixs); return(MagickTrue); } static void GetImageBackgroundColor(Image *image,const ssize_t offset, ExceptionInfo *exception) { CacheView *image_view; PixelInfo background; double count; ssize_t y; /* Compute average background color. */ if (offset <= 0) return; GetPixelInfo(image,&background); count=0.0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; if ((y >= offset) && (y < ((ssize_t) image->rows-offset))) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) continue; for (x=0; x < (ssize_t) image->columns; x++) { if ((x >= offset) && (x < ((ssize_t) image->columns-offset))) continue; background.red+=QuantumScale*GetPixelRed(image,p); background.green+=QuantumScale*GetPixelGreen(image,p); background.blue+=QuantumScale*GetPixelBlue(image,p); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) background.alpha+=QuantumScale*GetPixelAlpha(image,p); count++; p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); image->background_color.red=(double) ClampToQuantum(QuantumRange* background.red/count); image->background_color.green=(double) ClampToQuantum(QuantumRange* background.green/count); image->background_color.blue=(double) ClampToQuantum(QuantumRange* background.blue/count); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->background_color.alpha=(double) ClampToQuantum(QuantumRange* background.alpha/count); } MagickExport Image *DeskewImage(const Image *image,const double threshold, ExceptionInfo *exception) { AffineMatrix affine_matrix; const char *artifact; double degrees; Image *clone_image, *crop_image, *deskew_image, *median_image; MagickBooleanType status; RectangleInfo geometry; register ssize_t i; size_t max_projection, *projection, width; ssize_t skew; /* Compute deskew angle. */ for (width=1; width < ((image->columns+7)/8); width<<=1) ; projection=(size_t *) AcquireQuantumMemory((size_t) (2*width-1), sizeof(*projection)); if (projection == (size_t *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); status=RadonTransform(image,threshold,projection,exception); if (status == MagickFalse) { projection=(size_t *) RelinquishMagickMemory(projection); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } max_projection=0; skew=0; for (i=0; i < (ssize_t) (2*width-1); i++) { if (projection[i] > max_projection) { skew=i-(ssize_t) width+1; max_projection=projection[i]; } } projection=(size_t *) RelinquishMagickMemory(projection); degrees=RadiansToDegrees(-atan((double) skew/width/8)); if (image->debug != MagickFalse) (void) LogMagickEvent(TransformEvent,GetMagickModule(), " Deskew angle: %g",degrees); /* Deskew image. */ clone_image=CloneImage(image,0,0,MagickTrue,exception); if (clone_image == (Image *) NULL) return((Image *) NULL); { char angle[MagickPathExtent]; (void) FormatLocaleString(angle,MagickPathExtent,"%.20g",degrees); (void) SetImageArtifact(clone_image,"deskew:angle",angle); } (void) SetImageVirtualPixelMethod(clone_image,BackgroundVirtualPixelMethod, exception); affine_matrix.sx=cos(DegreesToRadians(fmod((double) degrees,360.0))); affine_matrix.rx=sin(DegreesToRadians(fmod((double) degrees,360.0))); affine_matrix.ry=(-sin(DegreesToRadians(fmod((double) degrees,360.0)))); affine_matrix.sy=cos(DegreesToRadians(fmod((double) degrees,360.0))); affine_matrix.tx=0.0; affine_matrix.ty=0.0; artifact=GetImageArtifact(image,"deskew:auto-crop"); if (IsStringTrue(artifact) == MagickFalse) { deskew_image=AffineTransformImage(clone_image,&affine_matrix,exception); clone_image=DestroyImage(clone_image); return(deskew_image); } /* Auto-crop image. */ GetImageBackgroundColor(clone_image,(ssize_t) StringToLong(artifact), exception); deskew_image=AffineTransformImage(clone_image,&affine_matrix,exception); clone_image=DestroyImage(clone_image); if (deskew_image == (Image *) NULL) return((Image *) NULL); median_image=StatisticImage(deskew_image,MedianStatistic,3,3,exception); if (median_image == (Image *) NULL) { deskew_image=DestroyImage(deskew_image); return((Image *) NULL); } geometry=GetImageBoundingBox(median_image,exception); median_image=DestroyImage(median_image); if (image->debug != MagickFalse) (void) LogMagickEvent(TransformEvent,GetMagickModule()," Deskew geometry: " "%.20gx%.20g%+.20g%+.20g",(double) geometry.width,(double) geometry.height,(double) geometry.x,(double) geometry.y); crop_image=CropImage(deskew_image,&geometry,exception); deskew_image=DestroyImage(deskew_image); return(crop_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n t e g r a l R o t a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IntegralRotateImage() rotates the image an integral of 90 degrees. It % allocates the memory necessary for the new Image structure and returns a % pointer to the rotated image. % % The format of the IntegralRotateImage method is: % % Image *IntegralRotateImage(const Image *image,size_t rotations, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o rotations: Specifies the number of 90 degree rotations. % */ MagickExport Image *IntegralRotateImage(const Image *image,size_t rotations, ExceptionInfo *exception) { #define RotateImageTag "Rotate/Image" CacheView *image_view, *rotate_view; Image *rotate_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; /* Initialize rotated image attributes. */ assert(image != (Image *) NULL); page=image->page; rotations%=4; if (rotations == 0) return(CloneImage(image,0,0,MagickTrue,exception)); if ((rotations == 1) || (rotations == 3)) rotate_image=CloneImage(image,image->rows,image->columns,MagickTrue, exception); else rotate_image=CloneImage(image,0,0,MagickTrue, exception); if (rotate_image == (Image *) NULL) return((Image *) NULL); /* Integral rotate the image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); rotate_view=AcquireAuthenticCacheView(rotate_image,exception); switch (rotations) { case 1: { size_t tile_height, tile_width; ssize_t tile_y; /* Rotate 90 degrees. */ GetPixelCacheTileSize(image,&tile_width,&tile_height); tile_width=image->columns; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows/tile_height,1) #endif for (tile_y=0; tile_y < (ssize_t) image->rows; tile_y+=(ssize_t) tile_height) { register ssize_t tile_x; if (status == MagickFalse) continue; tile_x=0; for ( ; tile_x < (ssize_t) image->columns; tile_x+=(ssize_t) tile_width) { MagickBooleanType sync; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t y; size_t height, width; width=tile_width; if ((tile_x+(ssize_t) tile_width) > (ssize_t) image->columns) width=(size_t) (tile_width-(tile_x+tile_width-image->columns)); height=tile_height; if ((tile_y+(ssize_t) tile_height) > (ssize_t) image->rows) height=(size_t) (tile_height-(tile_y+tile_height-image->rows)); p=GetCacheViewVirtualPixels(image_view,tile_x,tile_y,width,height, exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (y=0; y < (ssize_t) width; y++) { register const Quantum *magick_restrict tile_pixels; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(rotate_view,(ssize_t) (rotate_image->columns-(tile_y+height)),y+tile_x,height,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } tile_pixels=p+((height-1)*width+y)*GetPixelChannels(image); for (x=0; x < (ssize_t) height; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait rotate_traits=GetPixelChannelTraits(rotate_image, channel); if ((traits == UndefinedPixelTrait) || (rotate_traits == UndefinedPixelTrait)) continue; SetPixelChannel(rotate_image,channel,tile_pixels[i],q); } tile_pixels-=width*GetPixelChannels(image); q+=GetPixelChannels(rotate_image); } sync=SyncCacheViewAuthenticPixels(rotate_view,exception); if (sync == MagickFalse) status=MagickFalse; } } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_IntegralRotateImage) #endif proceed=SetImageProgress(image,RotateImageTag,progress+=tile_height, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } (void) SetImageProgress(image,RotateImageTag,(MagickOffsetType) image->rows-1,image->rows); Swap(page.width,page.height); Swap(page.x,page.y); if (page.width != 0) page.x=(ssize_t) (page.width-rotate_image->columns-page.x); break; } case 2: { register ssize_t y; /* Rotate 180 degrees. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(rotate_view,0,(ssize_t) (image->rows-y- 1),image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } q+=GetPixelChannels(rotate_image)*image->columns; for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; q-=GetPixelChannels(rotate_image); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait rotate_traits=GetPixelChannelTraits(rotate_image, channel); if ((traits == UndefinedPixelTrait) || (rotate_traits == UndefinedPixelTrait)) continue; SetPixelChannel(rotate_image,channel,p[i],q); } p+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(rotate_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_IntegralRotateImage) #endif proceed=SetImageProgress(image,RotateImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } (void) SetImageProgress(image,RotateImageTag,(MagickOffsetType) image->rows-1,image->rows); if (page.width != 0) page.x=(ssize_t) (page.width-rotate_image->columns-page.x); if (page.height != 0) page.y=(ssize_t) (page.height-rotate_image->rows-page.y); break; } case 3: { size_t tile_height, tile_width; ssize_t tile_y; /* Rotate 270 degrees. */ GetPixelCacheTileSize(image,&tile_width,&tile_height); tile_width=image->columns; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows/tile_height,1) #endif for (tile_y=0; tile_y < (ssize_t) image->rows; tile_y+=(ssize_t) tile_height) { register ssize_t tile_x; if (status == MagickFalse) continue; tile_x=0; for ( ; tile_x < (ssize_t) image->columns; tile_x+=(ssize_t) tile_width) { MagickBooleanType sync; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t y; size_t height, width; width=tile_width; if ((tile_x+(ssize_t) tile_width) > (ssize_t) image->columns) width=(size_t) (tile_width-(tile_x+tile_width-image->columns)); height=tile_height; if ((tile_y+(ssize_t) tile_height) > (ssize_t) image->rows) height=(size_t) (tile_height-(tile_y+tile_height-image->rows)); p=GetCacheViewVirtualPixels(image_view,tile_x,tile_y,width,height, exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (y=0; y < (ssize_t) width; y++) { register const Quantum *magick_restrict tile_pixels; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(rotate_view,tile_y,(ssize_t) (y+ rotate_image->rows-(tile_x+width)),height,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } tile_pixels=p+((width-1)-y)*GetPixelChannels(image); for (x=0; x < (ssize_t) height; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait rotate_traits=GetPixelChannelTraits(rotate_image, channel); if ((traits == UndefinedPixelTrait) || (rotate_traits == UndefinedPixelTrait)) continue; SetPixelChannel(rotate_image,channel,tile_pixels[i],q); } tile_pixels+=width*GetPixelChannels(image); q+=GetPixelChannels(rotate_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_IntegralRotateImage) #endif sync=SyncCacheViewAuthenticPixels(rotate_view,exception); if (sync == MagickFalse) status=MagickFalse; } } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,RotateImageTag,progress+=tile_height, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } (void) SetImageProgress(image,RotateImageTag,(MagickOffsetType) image->rows-1,image->rows); Swap(page.width,page.height); Swap(page.x,page.y); if (page.height != 0) page.y=(ssize_t) (page.height-rotate_image->rows-page.y); break; } default: break; } rotate_view=DestroyCacheView(rotate_view); image_view=DestroyCacheView(image_view); rotate_image->type=image->type; rotate_image->page=page; if (status == MagickFalse) rotate_image=DestroyImage(rotate_image); return(rotate_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + X S h e a r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % XShearImage() shears the image in the X direction with a shear angle of % 'degrees'. Positive angles shear counter-clockwise (right-hand rule), and % negative angles shear clockwise. Angles are measured relative to a vertical % Y-axis. X shears will widen an image creating 'empty' triangles on the left % and right sides of the source image. % % The format of the XShearImage method is: % % MagickBooleanType XShearImage(Image *image,const double degrees, % const size_t width,const size_t height, % const ssize_t x_offset,const ssize_t y_offset,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o degrees: A double representing the shearing angle along the X % axis. % % o width, height, x_offset, y_offset: Defines a region of the image % to shear. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType XShearImage(Image *image,const double degrees, const size_t width,const size_t height,const ssize_t x_offset, const ssize_t y_offset,ExceptionInfo *exception) { #define XShearImageTag "XShear/Image" typedef enum { LEFT, RIGHT } ShearDirection; CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo background; ssize_t y; /* X shear image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=MagickTrue; background=image->background_color; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,height,1) #endif for (y=0; y < (ssize_t) height; y++) { PixelInfo pixel, source, destination; double area, displacement; register Quantum *magick_restrict p, *magick_restrict q; register ssize_t i; ShearDirection direction; ssize_t step; if (status == MagickFalse) continue; p=GetCacheViewAuthenticPixels(image_view,0,y_offset+y,image->columns,1, exception); if (p == (Quantum *) NULL) { status=MagickFalse; continue; } p+=x_offset*GetPixelChannels(image); displacement=degrees*(double) (y-height/2.0); if (displacement == 0.0) continue; if (displacement > 0.0) direction=RIGHT; else { displacement*=(-1.0); direction=LEFT; } step=(ssize_t) floor((double) displacement); area=(double) (displacement-step); step++; pixel=background; GetPixelInfo(image,&source); GetPixelInfo(image,&destination); switch (direction) { case LEFT: { /* Transfer pixels left-to-right. */ if (step > x_offset) break; q=p-step*GetPixelChannels(image); for (i=0; i < (ssize_t) width; i++) { if ((x_offset+i) < step) { p+=GetPixelChannels(image); GetPixelInfoPixel(image,p,&pixel); q+=GetPixelChannels(image); continue; } GetPixelInfoPixel(image,p,&source); CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha, &source,(double) GetPixelAlpha(image,p),area,&destination); SetPixelViaPixelInfo(image,&destination,q); GetPixelInfoPixel(image,p,&pixel); p+=GetPixelChannels(image); q+=GetPixelChannels(image); } CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha, &background,(double) background.alpha,area,&destination); SetPixelViaPixelInfo(image,&destination,q); q+=GetPixelChannels(image); for (i=0; i < (step-1); i++) { SetPixelViaPixelInfo(image,&background,q); q+=GetPixelChannels(image); } break; } case RIGHT: { /* Transfer pixels right-to-left. */ p+=width*GetPixelChannels(image); q=p+step*GetPixelChannels(image); for (i=0; i < (ssize_t) width; i++) { p-=GetPixelChannels(image); q-=GetPixelChannels(image); if ((size_t) (x_offset+width+step-i) > image->columns) continue; GetPixelInfoPixel(image,p,&source); CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha, &source,(double) GetPixelAlpha(image,p),area,&destination); SetPixelViaPixelInfo(image,&destination,q); GetPixelInfoPixel(image,p,&pixel); } CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha, &background,(double) background.alpha,area,&destination); q-=GetPixelChannels(image); SetPixelViaPixelInfo(image,&destination,q); for (i=0; i < (step-1); i++) { q-=GetPixelChannels(image); SetPixelViaPixelInfo(image,&background,q); } break; } } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_XShearImage) #endif proceed=SetImageProgress(image,XShearImageTag,progress++,height); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Y S h e a r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % YShearImage shears the image in the Y direction with a shear angle of % 'degrees'. Positive angles shear counter-clockwise (right-hand rule), and % negative angles shear clockwise. Angles are measured relative to a % horizontal X-axis. Y shears will increase the height of an image creating % 'empty' triangles on the top and bottom of the source image. % % The format of the YShearImage method is: % % MagickBooleanType YShearImage(Image *image,const double degrees, % const size_t width,const size_t height, % const ssize_t x_offset,const ssize_t y_offset,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o degrees: A double representing the shearing angle along the Y % axis. % % o width, height, x_offset, y_offset: Defines a region of the image % to shear. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType YShearImage(Image *image,const double degrees, const size_t width,const size_t height,const ssize_t x_offset, const ssize_t y_offset,ExceptionInfo *exception) { #define YShearImageTag "YShear/Image" typedef enum { UP, DOWN } ShearDirection; CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo background; ssize_t x; /* Y Shear image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=MagickTrue; progress=0; background=image->background_color; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,width,1) #endif for (x=0; x < (ssize_t) width; x++) { ssize_t step; double area, displacement; PixelInfo pixel, source, destination; register Quantum *magick_restrict p, *magick_restrict q; register ssize_t i; ShearDirection direction; if (status == MagickFalse) continue; p=GetCacheViewAuthenticPixels(image_view,x_offset+x,0,1,image->rows, exception); if (p == (Quantum *) NULL) { status=MagickFalse; continue; } p+=y_offset*GetPixelChannels(image); displacement=degrees*(double) (x-width/2.0); if (displacement == 0.0) continue; if (displacement > 0.0) direction=DOWN; else { displacement*=(-1.0); direction=UP; } step=(ssize_t) floor((double) displacement); area=(double) (displacement-step); step++; pixel=background; GetPixelInfo(image,&source); GetPixelInfo(image,&destination); switch (direction) { case UP: { /* Transfer pixels top-to-bottom. */ if (step > y_offset) break; q=p-step*GetPixelChannels(image); for (i=0; i < (ssize_t) height; i++) { if ((y_offset+i) < step) { p+=GetPixelChannels(image); GetPixelInfoPixel(image,p,&pixel); q+=GetPixelChannels(image); continue; } GetPixelInfoPixel(image,p,&source); CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha, &source,(double) GetPixelAlpha(image,p),area, &destination); SetPixelViaPixelInfo(image,&destination,q); GetPixelInfoPixel(image,p,&pixel); p+=GetPixelChannels(image); q+=GetPixelChannels(image); } CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha, &background,(double) background.alpha,area,&destination); SetPixelViaPixelInfo(image,&destination,q); q+=GetPixelChannels(image); for (i=0; i < (step-1); i++) { SetPixelViaPixelInfo(image,&background,q); q+=GetPixelChannels(image); } break; } case DOWN: { /* Transfer pixels bottom-to-top. */ p+=height*GetPixelChannels(image); q=p+step*GetPixelChannels(image); for (i=0; i < (ssize_t) height; i++) { p-=GetPixelChannels(image); q-=GetPixelChannels(image); if ((size_t) (y_offset+height+step-i) > image->rows) continue; GetPixelInfoPixel(image,p,&source); CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha, &source,(double) GetPixelAlpha(image,p),area, &destination); SetPixelViaPixelInfo(image,&destination,q); GetPixelInfoPixel(image,p,&pixel); } CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha, &background,(double) background.alpha,area,&destination); q-=GetPixelChannels(image); SetPixelViaPixelInfo(image,&destination,q); for (i=0; i < (step-1); i++) { q-=GetPixelChannels(image); SetPixelViaPixelInfo(image,&background,q); } break; } } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_YShearImage) #endif proceed=SetImageProgress(image,YShearImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h e a r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShearImage() creates a new image that is a shear_image copy of an existing % one. Shearing slides one edge of an image along the X or Y axis, creating % a parallelogram. An X direction shear slides an edge along the X axis, % while a Y direction shear slides an edge along the Y axis. The amount of % the shear is controlled by a shear angle. For X direction shears, x_shear % is measured relative to the Y axis, and similarly, for Y direction shears % y_shear is measured relative to the X axis. Empty triangles left over from % shearing the image are filled with the background color defined by member % 'background_color' of the image.. ShearImage() allocates the memory % necessary for the new Image structure and returns a pointer to the new image. % % ShearImage() is based on the paper "A Fast Algorithm for General Raster % Rotatation" by Alan W. Paeth. % % The format of the ShearImage method is: % % Image *ShearImage(const Image *image,const double x_shear, % const double y_shear,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o x_shear, y_shear: Specifies the number of degrees to shear the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShearImage(const Image *image,const double x_shear, const double y_shear,ExceptionInfo *exception) { Image *integral_image, *shear_image; MagickBooleanType status; PointInfo shear; RectangleInfo border_info, bounds; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((x_shear != 0.0) && (fmod(x_shear,90.0) == 0.0)) ThrowImageException(ImageError,"AngleIsDiscontinuous"); if ((y_shear != 0.0) && (fmod(y_shear,90.0) == 0.0)) ThrowImageException(ImageError,"AngleIsDiscontinuous"); /* Initialize shear angle. */ integral_image=CloneImage(image,0,0,MagickTrue,exception); if (integral_image == (Image *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); shear.x=(-tan(DegreesToRadians(fmod(x_shear,360.0)))); shear.y=tan(DegreesToRadians(fmod(y_shear,360.0))); if ((shear.x == 0.0) && (shear.y == 0.0)) return(integral_image); if (SetImageStorageClass(integral_image,DirectClass,exception) == MagickFalse) { integral_image=DestroyImage(integral_image); return(integral_image); } if (integral_image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(integral_image,OpaqueAlphaChannel,exception); /* Compute image size. */ bounds.width=image->columns+(ssize_t) floor(fabs(shear.x)*image->rows+0.5); bounds.x=(ssize_t) ceil((double) image->columns+((fabs(shear.x)*image->rows)- image->columns)/2.0-0.5); bounds.y=(ssize_t) ceil((double) image->rows+((fabs(shear.y)*bounds.width)- image->rows)/2.0-0.5); /* Surround image with border. */ integral_image->border_color=integral_image->background_color; integral_image->compose=CopyCompositeOp; border_info.width=(size_t) bounds.x; border_info.height=(size_t) bounds.y; shear_image=BorderImage(integral_image,&border_info,image->compose,exception); integral_image=DestroyImage(integral_image); if (shear_image == (Image *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); /* Shear the image. */ if (shear_image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(shear_image,OpaqueAlphaChannel,exception); status=XShearImage(shear_image,shear.x,image->columns,image->rows,bounds.x, (ssize_t) (shear_image->rows-image->rows)/2,exception); if (status == MagickFalse) { shear_image=DestroyImage(shear_image); return((Image *) NULL); } status=YShearImage(shear_image,shear.y,bounds.width,image->rows,(ssize_t) (shear_image->columns-bounds.width)/2,bounds.y,exception); if (status == MagickFalse) { shear_image=DestroyImage(shear_image); return((Image *) NULL); } status=CropToFitImage(&shear_image,shear.x,shear.y,(MagickRealType) image->columns,(MagickRealType) image->rows,MagickFalse,exception); shear_image->alpha_trait=image->alpha_trait; shear_image->compose=image->compose; shear_image->page.width=0; shear_image->page.height=0; if (status == MagickFalse) shear_image=DestroyImage(shear_image); return(shear_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h e a r R o t a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShearRotateImage() creates a new image that is a rotated copy of an existing % one. Positive angles rotate counter-clockwise (right-hand rule), while % negative angles rotate clockwise. Rotated images are usually larger than % the originals and have 'empty' triangular corners. X axis. Empty % triangles left over from shearing the image are filled with the background % color defined by member 'background_color' of the image. ShearRotateImage % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % ShearRotateImage() is based on the paper "A Fast Algorithm for General % Raster Rotatation" by Alan W. Paeth. ShearRotateImage is adapted from a % similar method based on the Paeth paper written by Michael Halle of the % Spatial Imaging Group, MIT Media Lab. % % The format of the ShearRotateImage method is: % % Image *ShearRotateImage(const Image *image,const double degrees, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o degrees: Specifies the number of degrees to rotate the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShearRotateImage(const Image *image,const double degrees, ExceptionInfo *exception) { Image *integral_image, *rotate_image; MagickBooleanType status; MagickRealType angle; PointInfo shear; RectangleInfo border_info, bounds; size_t height, rotations, shear_width, width; /* Adjust rotation angle. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); angle=fmod(degrees,360.0); if (angle < -45.0) angle+=360.0; for (rotations=0; angle > 45.0; rotations++) angle-=90.0; rotations%=4; /* Calculate shear equations. */ integral_image=IntegralRotateImage(image,rotations,exception); if (integral_image == (Image *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); shear.x=(-tan((double) DegreesToRadians(angle)/2.0)); shear.y=sin((double) DegreesToRadians(angle)); if ((shear.x == 0.0) && (shear.y == 0.0)) return(integral_image); if (SetImageStorageClass(integral_image,DirectClass,exception) == MagickFalse) { integral_image=DestroyImage(integral_image); return(integral_image); } if (integral_image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(integral_image,OpaqueAlphaChannel,exception); /* Compute maximum bounds for 3 shear operations. */ width=integral_image->columns; height=integral_image->rows; bounds.width=(size_t) floor(fabs((double) height*shear.x)+width+0.5); bounds.height=(size_t) floor(fabs((double) bounds.width*shear.y)+height+0.5); shear_width=(size_t) floor(fabs((double) bounds.height*shear.x)+ bounds.width+0.5); bounds.x=(ssize_t) floor((double) ((shear_width > bounds.width) ? width : bounds.width-shear_width+2)/2.0+0.5); bounds.y=(ssize_t) floor(((double) bounds.height-height+2)/2.0+0.5); /* Surround image with a border. */ integral_image->border_color=integral_image->background_color; integral_image->compose=CopyCompositeOp; border_info.width=(size_t) bounds.x; border_info.height=(size_t) bounds.y; rotate_image=BorderImage(integral_image,&border_info,image->compose, exception); integral_image=DestroyImage(integral_image); if (rotate_image == (Image *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); /* Rotate the image. */ status=XShearImage(rotate_image,shear.x,width,height,bounds.x,(ssize_t) (rotate_image->rows-height)/2,exception); if (status == MagickFalse) { rotate_image=DestroyImage(rotate_image); return((Image *) NULL); } status=YShearImage(rotate_image,shear.y,bounds.width,height,(ssize_t) (rotate_image->columns-bounds.width)/2,bounds.y,exception); if (status == MagickFalse) { rotate_image=DestroyImage(rotate_image); return((Image *) NULL); } status=XShearImage(rotate_image,shear.x,bounds.width,bounds.height,(ssize_t) (rotate_image->columns-bounds.width)/2,(ssize_t) (rotate_image->rows- bounds.height)/2,exception); if (status == MagickFalse) { rotate_image=DestroyImage(rotate_image); return((Image *) NULL); } status=CropToFitImage(&rotate_image,shear.x,shear.y,(MagickRealType) width, (MagickRealType) height,MagickTrue,exception); rotate_image->alpha_trait=image->alpha_trait; rotate_image->compose=image->compose; rotate_image->page.width=0; rotate_image->page.height=0; if (status == MagickFalse) rotate_image=DestroyImage(rotate_image); return(rotate_image); }
lulesh.c
/* Copyright (c) 2010. Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory. LLNL-CODE-461231 All rights reserved. This file is part of LULESH, Version 1.0. Please also read this link -- http://www.opensource.org/licenses/index.php Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Additional BSD Notice 1. This notice is required to be provided under our contract with the U.S. Department of Energy (DOE). This work was produced at Lawrence Livermore National Laboratory under Contract No. DE-AC52-07NA27344 with the DOE. 2. Neither the United States Government nor Lawrence Livermore National Security, LLC nor any of their employees, makes any warranty, express or implied, or assumes any liability or responsibility for the accuracy, completeness, or usefulness of any information, apparatus, product, or process disclosed, or represents that its use would not infringe privately-owned rights. 3. Also, reference herein to any specific commercial products, process, or services by trade name, trademark, manufacturer or otherwise does not necessarily constitute or imply its endorsement, recommendation, or favoring by the United States Government or Lawrence Livermore National Security, LLC. The views and opinions of authors expressed herein do not necessarily state or reflect those of the United States Government or Lawrence Livermore National Security, LLC, and shall not be used for advertising or product endorsement purposes. */ #include <math.h> #include <stdio.h> #include <stdlib.h> #include <sys/time.h> //#define _OPENACCM #ifdef _OPENACCM #include <openacc.h> #endif #define LULESH_SHOW_PROGRESS 0 #define LULESH_MEASURE_TIME 1 #define LULESH_STORE_OUTPUT 1 #define LULESH_PRINT_SIZE 0 #define LULESH_CHECK_SIZE 0 #define ALLOW_ASPENIFSTMT 0 #ifndef T_EDGEELEM #define T_EDGEELEM 45 #endif #define T_EDGENODES (T_EDGEELEM+1) #if T_EDGEELEM == 10 #pragma openarc #define T_EDGEELEM 10 #elif T_EDGEELEM == 30 #pragma openarc #define T_EDGEELEM 30 #elif T_EDGEELEM == 45 #pragma openarc #define T_EDGEELEM 45 #elif T_EDGEELEM == 60 #pragma openarc #define T_EDGEELEM 60 #endif #pragma openarc #define T_EDGENODES (T_EDGEELEM+1) #define T_NUMELEM (T_EDGEELEM*T_EDGEELEM*T_EDGEELEM) #pragma openarc #define T_NUMELEM (T_EDGEELEM*T_EDGEELEM*T_EDGEELEM) #define T_LENGTH (T_EDGEELEM*T_EDGEELEM*T_EDGEELEM) #define T_NUMELEM8 (T_NUMELEM*8) #define T_NUMNODE (T_EDGENODES*T_EDGENODES*T_EDGENODES) #pragma openarc #define T_NUMNODE (T_EDGENODES*T_EDGENODES*T_EDGENODES) #define T_NUMNODESETS (T_EDGENODES*T_EDGENODES) #define T_NODEELEMCORNERLIST T_NUMELEM8 #if LULESH_MEASURE_TIME double my_timer() { struct timeval time; gettimeofday (&time, 0); return time.tv_sec + time.tv_usec / 1000000.0; } #endif #define std_max(a,b) (((a) > (b)) ? (a) : (b)) #define Real_t double #define Index_t int #define Int_t int enum { VolumeError = -1, QStopError = -2 } ; /****************************************************/ /* Allow flexibility for arithmetic representations */ /****************************************************/ /* Could also support fixed point and interval arithmetic types */ //typedef float real4 ; //typedef double real8 ; //typedef long double real10 ; /* 10 bytes on x86 */ //typedef int Index_t ; /* array subscript and loop index */ //typedef real8 Real_t ; /* floating point representation */ //typedef int Int_t ; /* integer representation */ inline float SQRT4(float arg) { return sqrtf(arg) ; } inline double SQRT8(double arg) { return sqrt(arg) ; } inline long double SQRT10(long double arg) { return sqrtl(arg) ; } inline float CBRT4(float arg) { return cbrtf(arg) ; } inline double CBRT8(double arg) { return cbrt(arg) ; } inline long double CBRT10(long double arg) { return cbrtl(arg) ; } inline float FABS4(float arg) { return fabsf(arg) ; } inline double FABS8(double arg) { return fabs(arg) ; } inline long double FABS10(long double arg) { return fabsl(arg) ; } /************************************************************/ /* Allow for flexible data layout experiments by separating */ /* array interface from underlying implementation. */ /************************************************************/ //struct Domain { /* This first implementation allows for runnable code */ /* and is not meant to be optimal. Final implementation */ /* should separate declaration and allocation phases */ /* so that allocation can be scheduled in a cache conscious */ /* manner. */ //private: /******************/ /* Implementation */ /******************/ /* Node-centered */ Real_t* m_x ; /* coordinates */ Real_t* m_y ; Real_t* m_z ; Real_t* m_xd ; /* velocities */ Real_t* m_yd ; Real_t* m_zd ; Real_t* m_xdd ; /* accelerations */ Real_t* m_ydd ; Real_t* m_zdd ; Real_t* m_fx ; /* forces */ Real_t* m_fy ; Real_t* m_fz ; Real_t* m_nodalMass ; /* mass */ Index_t* m_symmX ; /* symmetry plane nodesets */ Index_t* m_symmY ; Index_t* m_symmZ ; Index_t* m_nodeElemCount ; Index_t* m_nodeElemStart ; // Index_t* m_nodeElemList ; Index_t* m_nodeElemCornerList ; /* Element-centered */ Index_t* m_matElemlist ; /* material indexset */ Index_t* m_nodelist ; /* elemToNode connectivity */ Index_t* m_lxim ; /* element connectivity across each face */ Index_t* m_lxip ; Index_t* m_letam ; Index_t* m_letap ; Index_t* m_lzetam ; Index_t* m_lzetap ; Int_t* m_elemBC ; /* symmetry/free-surface flags for each elem face */ Real_t* m_dxx ; /* principal strains -- temporary */ Real_t* m_dyy ; Real_t* m_dzz ; Real_t* m_delv_xi ; /* velocity gradient -- temporary */ Real_t* m_delv_eta ; Real_t* m_delv_zeta ; Real_t* m_delx_xi ; /* coordinate gradient -- temporary */ Real_t* m_delx_eta ; Real_t* m_delx_zeta ; Real_t* m_e ; /* energy */ Real_t* m_p ; /* pressure */ Real_t* m_q ; /* q */ Real_t* m_ql ; /* linear term for q */ Real_t* m_qq ; /* quadratic term for q */ Real_t* m_v ; /* relative volume */ Real_t* m_volo ; /* reference volume */ Real_t* m_vnew ; /* new relative volume -- temporary */ Real_t* m_delv ; /* m_vnew - m_v */ Real_t* m_vdov ; /* volume derivative over volume */ Real_t* m_arealg ; /* characteristic length of an element */ Real_t* m_ss ; /* "sound speed" */ Real_t* m_elemMass ; /* mass */ /* Temporary variables from IntegrateStressForElems() and CalcFBHourglassForceForElems()*/ Real_t *fx_elem; Real_t *fy_elem; Real_t *fz_elem; /* Temporary variables from CalcHourglassControlForElems() */ Real_t *dvdx; Real_t *dvdy; Real_t *dvdz; Real_t *x8n; Real_t *y8n; Real_t *z8n; /* Temporary variables from EvalEOSForElems() */ Real_t *e_old; Real_t *delvc; Real_t *p_old; Real_t *q_old; Real_t *compression; Real_t *compHalfStep; Real_t *qq; Real_t *ql; Real_t *work; Real_t *p_new; Real_t *e_new; Real_t *q_new; Real_t *bvc; Real_t *pbvc; /* Temporary variables from ApplyMaterialPropertiesForElems() */ Real_t *vnewc; /* Temporary variables from CalcVolumeForceForElems() */ Real_t *sigxx; Real_t *sigyy; Real_t *sigzz; Real_t *determ; /* Parameters */ Real_t m_dtfixed ; /* fixed time increment */ Real_t m_time ; /* current time */ Real_t m_deltatime ; /* variable time increment */ Real_t m_deltatimemultlb ; Real_t m_deltatimemultub ; Real_t m_stoptime ; /* end time for simulation */ Real_t m_u_cut ; /* velocity tolerance */ Real_t m_hgcoef ; /* hourglass control */ Real_t m_qstop ; /* excessive q indicator */ Real_t m_monoq_max_slope ; Real_t m_monoq_limiter_mult ; Real_t m_e_cut ; /* energy tolerance */ Real_t m_p_cut ; /* pressure tolerance */ Real_t m_ss4o3 ; Real_t m_q_cut ; /* q tolerance */ Real_t m_v_cut ; /* relative volume tolerance */ Real_t m_qlc_monoq ; /* linear term coef for q */ Real_t m_qqc_monoq ; /* quadratic term coef for q */ Real_t m_qqc ; Real_t m_eosvmax ; Real_t m_eosvmin ; Real_t m_pmin ; /* pressure floor */ Real_t m_emin ; /* energy floor */ Real_t m_dvovmax ; /* maximum allowable volume change */ Real_t m_refdens ; /* reference density */ Real_t m_dtcourant ; /* courant constraint */ Real_t m_dthydro ; /* volume change constraint */ Real_t m_dtmax ; /* maximum allowable time increment */ Int_t m_cycle ; /* iteration count for simulation */ //[DEBUG] Moved from main() for easy model generation. Index_t edgeElems ; Index_t edgeNodes ; //#pragma aspen declare param(m_sizeX:edgeElems) Index_t m_sizeX ; /* X,Y,Z extent of this block */ Index_t m_sizeY ; Index_t m_sizeZ ; //#pragma aspen declare param(m_numElem:T_NUMELEM) Index_t m_numElem ; /* Elements/Nodes in this domain */ //#pragma aspen declare param(m_numElem8:m_numElem*8) Index_t m_numElem8; //#pragma aspen declare param(m_numNode:T_NUMNODE) Index_t m_numNode ; #pragma aspen declare param(m_nCorner:m_numElem8) Index_t m_nCorner ; //public: /**************/ /* Allocation */ /**************/ void AllocateNodalPersistent(size_t size) { m_x = (Real_t*)malloc(size*sizeof(Real_t)) ; m_y = (Real_t*)malloc(size*sizeof(Real_t)) ; m_z = (Real_t*)malloc(size*sizeof(Real_t)) ; m_xd = (Real_t*)calloc(size, sizeof(Real_t)) ; m_yd = (Real_t*)calloc(size, sizeof(Real_t)) ; m_zd = (Real_t*)calloc(size, sizeof(Real_t)) ; m_xdd = (Real_t*)calloc(size, sizeof(Real_t)) ; m_ydd = (Real_t*)calloc(size, sizeof(Real_t)) ; m_zdd = (Real_t*)calloc(size, sizeof(Real_t)) ; m_fx = (Real_t*)malloc(size*sizeof(Real_t)) ; m_fy = (Real_t*)malloc(size*sizeof(Real_t)) ; m_fz = (Real_t*)malloc(size*sizeof(Real_t)) ; m_nodalMass = (Real_t*)calloc(size, sizeof(Real_t)) ; } void AllocateElemPersistent(size_t size) { Index_t i; m_matElemlist = (Index_t*)malloc(size*sizeof(Index_t)) ; m_nodelist = (Index_t*)malloc(8*size*sizeof(Index_t)) ; m_lxim = (Index_t*)malloc(size*sizeof(Index_t)) ; m_lxip = (Index_t*)malloc(size*sizeof(Index_t)) ; m_letam = (Index_t*)malloc(size*sizeof(Index_t)) ; m_letap = (Index_t*)malloc(size*sizeof(Index_t)) ; m_lzetam = (Index_t*)malloc(size*sizeof(Index_t)) ; m_lzetap = (Index_t*)malloc(size*sizeof(Index_t)) ; m_elemBC = (Int_t*)malloc(size*sizeof(Int_t)) ; m_e = (Real_t*)calloc(size, sizeof(Real_t)) ; m_p = (Real_t*)calloc(size, sizeof(Real_t)) ; m_q = (Real_t*)malloc(size*sizeof(Real_t)) ; m_ql = (Real_t*)malloc(size*sizeof(Real_t)) ; m_qq = (Real_t*)malloc(size*sizeof(Real_t)) ; m_v = (Real_t*)malloc(size*sizeof(Real_t)) ; for (i=0;i<size;++i) { m_v[i] = 1.0; } m_volo = (Real_t*)malloc(size*sizeof(Real_t)) ; m_delv = (Real_t*)malloc(size*sizeof(Real_t)) ; m_vdov = (Real_t*)malloc(size*sizeof(Real_t)) ; m_arealg = (Real_t*)malloc(size*sizeof(Real_t)) ; m_ss = (Real_t*)malloc(size*sizeof(Real_t)) ; m_elemMass = (Real_t*)malloc(size*sizeof(Real_t)) ; } /* Temporaries should not be initialized in bulk but */ /* this is a runnable placeholder for now */ void AllocateElemTemporary(size_t size) { m_dxx = (Real_t*)malloc(size*sizeof(Real_t)) ; m_dyy = (Real_t*)malloc(size*sizeof(Real_t)) ; m_dzz = (Real_t*)malloc(size*sizeof(Real_t)) ; m_delv_xi = (Real_t*)malloc(size*sizeof(Real_t)) ; m_delv_eta = (Real_t*)malloc(size*sizeof(Real_t)) ; m_delv_zeta = (Real_t*)malloc(size*sizeof(Real_t)) ; m_delx_xi = (Real_t*)malloc(size*sizeof(Real_t)) ; m_delx_eta = (Real_t*)malloc(size*sizeof(Real_t)) ; m_delx_zeta = (Real_t*)malloc(size*sizeof(Real_t)) ; m_vnew = (Real_t*)malloc(size*sizeof(Real_t)) ; } void AllocateNodesets(size_t size) { #pragma aspen declare param(size:(edgeElems+1)*(edgeElems+1)) m_symmX = (Index_t*)malloc(size*sizeof(Index_t)) ; m_symmY = (Index_t*)malloc(size*sizeof(Index_t)) ; m_symmZ = (Index_t*)malloc(size*sizeof(Index_t)) ; } void AllocateNodeElemIndexes() { Index_t m, i, j; Index_t clSize; #if LULESH_PRINT_SIZE printf("T_NUMNODE\t%d\n", m_numNode); #endif #if LULESH_CHECK_SIZE if (m_numNode != T_NUMNODE) { printf("T_NUMNODE should be %d\n", m_numNode); exit(1); } #endif /* set up node-centered indexing of elements */ m_nodeElemCount = (Index_t*)malloc(m_numNode*sizeof(Index_t)) ; for (i=0;i<m_numNode;++i) { m_nodeElemCount[i]=0; } for (i=0; i<m_numElem; ++i) { Index_t *nl = &m_nodelist[8*i] ; for (j=0; j < 8; ++j) { ++m_nodeElemCount[nl[j]]; } } m_nodeElemStart = (Index_t*)malloc(m_numNode*sizeof(Index_t)) ; m_nodeElemStart[0]=0; for (i=1; i < m_numNode; ++i) { m_nodeElemStart[i] = m_nodeElemStart[i-1] + m_nodeElemCount[i-1] ; } m_nCorner = m_nodeElemStart[m_numNode-1] + m_nodeElemCount[m_numNode-1]; // m_nodeElemList.resize(nodeElemStart(m_numNode-1) + // nodeElemCount(m_numNode-1)); #if LULESH_PRINT_SIZE printf("T_NODEELEMCORNERLIST\t%d\n", (m_nodeElemStart[m_numNode-1]+m_nodeElemCount[m_numNode-1])); #endif #if LULESH_CHECK_SIZE if ((m_nodeElemStart[m_numNode-1]+m_nodeElemCount[m_numNode-1])!= T_NODEELEMCORNERLIST) { printf("T_NODEELEMCORNERLIST should be %d\n", (m_nodeElemStart[m_numNode-1]+m_nodeElemCount[m_numNode-1])); exit(1); } #endif #pragma aspen declare data(m_nodeElemCornerList:traits(Array(m_nCorner,aspen_param_int))) m_nodeElemCornerList = (Index_t*)malloc((m_nodeElemStart[m_numNode-1]+m_nodeElemCount[m_numNode-1])*sizeof(Index_t)) ; for (i=0; i < m_numNode; ++i) { m_nodeElemCount[i]=0; } for (i=0; i < m_numElem; ++i) { Index_t *nl = &m_nodelist[8*i] ; for (j=0; j < 8; ++j) { Index_t m = nl[j]; Index_t k = i*8 + j ; Index_t offset = m_nodeElemStart[m]+m_nodeElemCount[m] ; // nodeElemList(offset) = i; m_nodeElemCornerList[offset] = k; ++m_nodeElemCount[m]; } } clSize = (m_nodeElemStart[m_numNode-1]+m_nodeElemCount[m_numNode-1]); #pragma aspen control ignore for (i=0; i < clSize; ++i) { Index_t clv = m_nodeElemCornerList[i] ; if ((clv < 0) || (clv > m_numElem*8)) { fprintf(stderr, "AllocateNodeElemIndexes(): nodeElemCornerList entry out of range!\n"); exit(1); } } } //} domain ; Real_t *Allocate(size_t size) { return (Real_t *)(malloc(sizeof(Real_t)*size)) ; } void Release(Real_t **ptr) { #pragma aspen control probability(1) if (*ptr != NULL) { free(*ptr) ; *ptr = NULL ; } } void AllocateTemporary(size_t numElem8) { #if LULESH_PRINT_SIZE printf("T_NUMELEM8\t%d\n", numElem8); #endif #if LULESH_CHECK_SIZE if (numElem8 != T_NUMELEM8) { printf("T_NUMELEM8 should be %d\n", numElem8); exit(1); } #endif fx_elem = Allocate(numElem8) ; fy_elem = Allocate(numElem8) ; fz_elem = Allocate(numElem8) ; dvdx = Allocate(numElem8) ; dvdy = Allocate(numElem8) ; dvdz = Allocate(numElem8) ; x8n = Allocate(numElem8) ; y8n = Allocate(numElem8) ; z8n = Allocate(numElem8) ; } void AllocateTemporary2(size_t length) { e_old = Allocate(length) ; delvc = Allocate(length) ; p_old = Allocate(length) ; q_old = Allocate(length) ; compression = Allocate(length) ; compHalfStep = Allocate(length) ; qq = Allocate(length) ; ql = Allocate(length) ; work = Allocate(length) ; p_new = Allocate(length) ; e_new = Allocate(length) ; q_new = Allocate(length) ; bvc = Allocate(length) ; pbvc = Allocate(length) ; vnewc = Allocate(length) ; sigxx = Allocate(length) ; sigyy = Allocate(length) ; sigzz = Allocate(length) ; determ = Allocate(length) ; } /* Stuff needed for boundary conditions */ /* 2 BCs on each of 6 hexahedral faces (12 bits) */ #define XI_M 0x003 #define XI_M_SYMM 0x001 #define XI_M_FREE 0x002 #define XI_P 0x00c #define XI_P_SYMM 0x004 #define XI_P_FREE 0x008 #define ETA_M 0x030 #define ETA_M_SYMM 0x010 #define ETA_M_FREE 0x020 #define ETA_P 0x0c0 #define ETA_P_SYMM 0x040 #define ETA_P_FREE 0x080 #define ZETA_M 0x300 #define ZETA_M_SYMM 0x100 #define ZETA_M_FREE 0x200 #define ZETA_P 0xc00 #define ZETA_P_SYMM 0x400 #define ZETA_P_FREE 0x800 static inline void TimeIncrement() { Real_t targetdt = m_stoptime - m_time; //[DEBUG] Current ASPEN does not accept "<=" operator. #pragma aspen control probability(1) if ((m_dtfixed <= 0.0) && (m_cycle != 0)) { Real_t ratio ; Real_t olddt = m_deltatime; /* This will require a reduction in parallel */ Real_t newdt = 1.0e+20 ; #pragma aspen control probability(1) if (m_dtcourant < newdt) { newdt = m_dtcourant / 2.0 ; } #pragma aspen control probability(1) if (m_dthydro < newdt) { newdt = m_dthydro * 2.0 / 3.0 ; } ratio = newdt / olddt ; #pragma aspen control execute flops(3:traits(dp)) if (ratio >= 1.0) { if (ratio < m_deltatimemultlb) { newdt = olddt ; } else if (ratio > m_deltatimemultub) { newdt = olddt*m_deltatimemultub; } } if (newdt > m_dtmax) { newdt = m_dtmax; } m_deltatime = newdt ; } /* TRY TO PREVENT VERY SMALL SCALING ON THE NEXT CYCLE */ #pragma aspen control execute flops(6:traits(dp)) if ((targetdt > m_deltatime) && (targetdt < (4.0 * m_deltatime / 3.0)) ) { targetdt = 2.0 * m_deltatime / 3.0 ; } #pragma aspen control ignore if (targetdt < m_deltatime) { m_deltatime = targetdt ; } m_time += m_deltatime; ++m_cycle; } static inline void InitStressTermsForElems(Index_t numElem, Real_t sigxx[T_NUMELEM], Real_t sigyy[T_NUMELEM], Real_t sigzz[T_NUMELEM], Real_t p_p[T_NUMELEM], Real_t p_q[T_NUMELEM]) { // // pull in the stresses appropriate to the hydro integration // Index_t i; #ifdef _OPENACC #pragma acc parallel loop present(p_p, p_q, sigxx, sigyy, sigzz) #else #pragma omp parallel for private(i) firstprivate(numElem) #endif for (i = 0 ; i < numElem ; ++i){ sigxx[i] = sigyy[i] = sigzz[i] = - p_p[i] - p_q[i]; } } static inline void CalcElemShapeFunctionDerivatives( const Real_t* const x, const Real_t* const y, const Real_t* const z, Real_t b[][8], Real_t* const volume ) { const Real_t x0 = x[0] ; const Real_t x1 = x[1] ; const Real_t x2 = x[2] ; const Real_t x3 = x[3] ; const Real_t x4 = x[4] ; const Real_t x5 = x[5] ; const Real_t x6 = x[6] ; const Real_t x7 = x[7] ; const Real_t y0 = y[0] ; const Real_t y1 = y[1] ; const Real_t y2 = y[2] ; const Real_t y3 = y[3] ; const Real_t y4 = y[4] ; const Real_t y5 = y[5] ; const Real_t y6 = y[6] ; const Real_t y7 = y[7] ; const Real_t z0 = z[0] ; const Real_t z1 = z[1] ; const Real_t z2 = z[2] ; const Real_t z3 = z[3] ; const Real_t z4 = z[4] ; const Real_t z5 = z[5] ; const Real_t z6 = z[6] ; const Real_t z7 = z[7] ; Real_t fjxxi, fjxet, fjxze; Real_t fjyxi, fjyet, fjyze; Real_t fjzxi, fjzet, fjzze; Real_t cjxxi, cjxet, cjxze; Real_t cjyxi, cjyet, cjyze; Real_t cjzxi, cjzet, cjzze; fjxxi = .125 * ( (x6-x0) + (x5-x3) - (x7-x1) - (x4-x2) ); fjxet = .125 * ( (x6-x0) - (x5-x3) + (x7-x1) - (x4-x2) ); fjxze = .125 * ( (x6-x0) + (x5-x3) + (x7-x1) + (x4-x2) ); fjyxi = .125 * ( (y6-y0) + (y5-y3) - (y7-y1) - (y4-y2) ); fjyet = .125 * ( (y6-y0) - (y5-y3) + (y7-y1) - (y4-y2) ); fjyze = .125 * ( (y6-y0) + (y5-y3) + (y7-y1) + (y4-y2) ); fjzxi = .125 * ( (z6-z0) + (z5-z3) - (z7-z1) - (z4-z2) ); fjzet = .125 * ( (z6-z0) - (z5-z3) + (z7-z1) - (z4-z2) ); fjzze = .125 * ( (z6-z0) + (z5-z3) + (z7-z1) + (z4-z2) ); /* compute cofactors */ cjxxi = (fjyet * fjzze) - (fjzet * fjyze); cjxet = - (fjyxi * fjzze) + (fjzxi * fjyze); cjxze = (fjyxi * fjzet) - (fjzxi * fjyet); cjyxi = - (fjxet * fjzze) + (fjzet * fjxze); cjyet = (fjxxi * fjzze) - (fjzxi * fjxze); cjyze = - (fjxxi * fjzet) + (fjzxi * fjxet); cjzxi = (fjxet * fjyze) - (fjyet * fjxze); cjzet = - (fjxxi * fjyze) + (fjyxi * fjxze); cjzze = (fjxxi * fjyet) - (fjyxi * fjxet); /* calculate partials : this need only be done for l = 0,1,2,3 since , by symmetry , (6,7,4,5) = - (0,1,2,3) . */ b[0][0] = - cjxxi - cjxet - cjxze; b[0][1] = cjxxi - cjxet - cjxze; b[0][2] = cjxxi + cjxet - cjxze; b[0][3] = - cjxxi + cjxet - cjxze; b[0][4] = -b[0][2]; b[0][5] = -b[0][3]; b[0][6] = -b[0][0]; b[0][7] = -b[0][1]; b[1][0] = - cjyxi - cjyet - cjyze; b[1][1] = cjyxi - cjyet - cjyze; b[1][2] = cjyxi + cjyet - cjyze; b[1][3] = - cjyxi + cjyet - cjyze; b[1][4] = -b[1][2]; b[1][5] = -b[1][3]; b[1][6] = -b[1][0]; b[1][7] = -b[1][1]; b[2][0] = - cjzxi - cjzet - cjzze; b[2][1] = cjzxi - cjzet - cjzze; b[2][2] = cjzxi + cjzet - cjzze; b[2][3] = - cjzxi + cjzet - cjzze; b[2][4] = -b[2][2]; b[2][5] = -b[2][3]; b[2][6] = -b[2][0]; b[2][7] = -b[2][1]; /* calculate jacobian determinant (volume) */ *volume = 8. * ( fjxet * cjxet + fjyet * cjyet + fjzet * cjzet); } static inline void SumElemFaceNormal(Real_t *normalX0, Real_t *normalY0, Real_t *normalZ0, Real_t *normalX1, Real_t *normalY1, Real_t *normalZ1, Real_t *normalX2, Real_t *normalY2, Real_t *normalZ2, Real_t *normalX3, Real_t *normalY3, Real_t *normalZ3, const Real_t x0, const Real_t y0, const Real_t z0, const Real_t x1, const Real_t y1, const Real_t z1, const Real_t x2, const Real_t y2, const Real_t z2, const Real_t x3, const Real_t y3, const Real_t z3) { Real_t bisectX0 = 0.5 * (x3 + x2 - x1 - x0); Real_t bisectY0 = 0.5 * (y3 + y2 - y1 - y0); Real_t bisectZ0 = 0.5 * (z3 + z2 - z1 - z0); Real_t bisectX1 = 0.5 * (x2 + x1 - x3 - x0); Real_t bisectY1 = 0.5 * (y2 + y1 - y3 - y0); Real_t bisectZ1 = 0.5 * (z2 + z1 - z3 - z0); Real_t areaX = 0.25 * (bisectY0 * bisectZ1 - bisectZ0 * bisectY1); Real_t areaY = 0.25 * (bisectZ0 * bisectX1 - bisectX0 * bisectZ1); Real_t areaZ = 0.25 * (bisectX0 * bisectY1 - bisectY0 * bisectX1); *normalX0 += areaX; *normalX1 += areaX; *normalX2 += areaX; *normalX3 += areaX; *normalY0 += areaY; *normalY1 += areaY; *normalY2 += areaY; *normalY3 += areaY; *normalZ0 += areaZ; *normalZ1 += areaZ; *normalZ2 += areaZ; *normalZ3 += areaZ; } static inline void CalcElemNodeNormals(Real_t pfx[8], Real_t pfy[8], Real_t pfz[8], const Real_t x[8], const Real_t y[8], const Real_t z[8]) { Index_t i; for (i = 0 ; i < 8 ; ++i) { pfx[i] = 0.0; pfy[i] = 0.0; pfz[i] = 0.0; } /* evaluate face one: nodes 0, 1, 2, 3 */ SumElemFaceNormal(&pfx[0], &pfy[0], &pfz[0], &pfx[1], &pfy[1], &pfz[1], &pfx[2], &pfy[2], &pfz[2], &pfx[3], &pfy[3], &pfz[3], x[0], y[0], z[0], x[1], y[1], z[1], x[2], y[2], z[2], x[3], y[3], z[3]); /* evaluate face two: nodes 0, 4, 5, 1 */ SumElemFaceNormal(&pfx[0], &pfy[0], &pfz[0], &pfx[4], &pfy[4], &pfz[4], &pfx[5], &pfy[5], &pfz[5], &pfx[1], &pfy[1], &pfz[1], x[0], y[0], z[0], x[4], y[4], z[4], x[5], y[5], z[5], x[1], y[1], z[1]); /* evaluate face three: nodes 1, 5, 6, 2 */ SumElemFaceNormal(&pfx[1], &pfy[1], &pfz[1], &pfx[5], &pfy[5], &pfz[5], &pfx[6], &pfy[6], &pfz[6], &pfx[2], &pfy[2], &pfz[2], x[1], y[1], z[1], x[5], y[5], z[5], x[6], y[6], z[6], x[2], y[2], z[2]); /* evaluate face four: nodes 2, 6, 7, 3 */ SumElemFaceNormal(&pfx[2], &pfy[2], &pfz[2], &pfx[6], &pfy[6], &pfz[6], &pfx[7], &pfy[7], &pfz[7], &pfx[3], &pfy[3], &pfz[3], x[2], y[2], z[2], x[6], y[6], z[6], x[7], y[7], z[7], x[3], y[3], z[3]); /* evaluate face five: nodes 3, 7, 4, 0 */ SumElemFaceNormal(&pfx[3], &pfy[3], &pfz[3], &pfx[7], &pfy[7], &pfz[7], &pfx[4], &pfy[4], &pfz[4], &pfx[0], &pfy[0], &pfz[0], x[3], y[3], z[3], x[7], y[7], z[7], x[4], y[4], z[4], x[0], y[0], z[0]); /* evaluate face six: nodes 4, 7, 6, 5 */ SumElemFaceNormal(&pfx[4], &pfy[4], &pfz[4], &pfx[7], &pfy[7], &pfz[7], &pfx[6], &pfy[6], &pfz[6], &pfx[5], &pfy[5], &pfz[5], x[4], y[4], z[4], x[7], y[7], z[7], x[6], y[6], z[6], x[5], y[5], z[5]); } static inline void SumElemStressesToNodeForces( const Real_t B[][8], const Real_t stress_xx, const Real_t stress_yy, const Real_t stress_zz, Real_t* const fx, Real_t* const fy, Real_t* const fz ) { Real_t pfx0 = B[0][0] ; Real_t pfx1 = B[0][1] ; Real_t pfx2 = B[0][2] ; Real_t pfx3 = B[0][3] ; Real_t pfx4 = B[0][4] ; Real_t pfx5 = B[0][5] ; Real_t pfx6 = B[0][6] ; Real_t pfx7 = B[0][7] ; Real_t pfy0 = B[1][0] ; Real_t pfy1 = B[1][1] ; Real_t pfy2 = B[1][2] ; Real_t pfy3 = B[1][3] ; Real_t pfy4 = B[1][4] ; Real_t pfy5 = B[1][5] ; Real_t pfy6 = B[1][6] ; Real_t pfy7 = B[1][7] ; Real_t pfz0 = B[2][0] ; Real_t pfz1 = B[2][1] ; Real_t pfz2 = B[2][2] ; Real_t pfz3 = B[2][3] ; Real_t pfz4 = B[2][4] ; Real_t pfz5 = B[2][5] ; Real_t pfz6 = B[2][6] ; Real_t pfz7 = B[2][7] ; fx[0] = -( stress_xx * pfx0 ); fx[1] = -( stress_xx * pfx1 ); fx[2] = -( stress_xx * pfx2 ); fx[3] = -( stress_xx * pfx3 ); fx[4] = -( stress_xx * pfx4 ); fx[5] = -( stress_xx * pfx5 ); fx[6] = -( stress_xx * pfx6 ); fx[7] = -( stress_xx * pfx7 ); fy[0] = -( stress_yy * pfy0 ); fy[1] = -( stress_yy * pfy1 ); fy[2] = -( stress_yy * pfy2 ); fy[3] = -( stress_yy * pfy3 ); fy[4] = -( stress_yy * pfy4 ); fy[5] = -( stress_yy * pfy5 ); fy[6] = -( stress_yy * pfy6 ); fy[7] = -( stress_yy * pfy7 ); fz[0] = -( stress_zz * pfz0 ); fz[1] = -( stress_zz * pfz1 ); fz[2] = -( stress_zz * pfz2 ); fz[3] = -( stress_zz * pfz3 ); fz[4] = -( stress_zz * pfz4 ); fz[5] = -( stress_zz * pfz5 ); fz[6] = -( stress_zz * pfz6 ); fz[7] = -( stress_zz * pfz7 ); } static inline void IntegrateStressForElems( Index_t numElem, Real_t sigxx[T_NUMELEM], Real_t sigyy[T_NUMELEM], Real_t sigzz[T_NUMELEM], Real_t determ[T_NUMELEM], Index_t p_nodelist[T_NUMELEM8], Real_t p_x[T_NUMNODE], Real_t p_y[T_NUMNODE], Real_t p_z[T_NUMNODE], Index_t p_nodeElemCount[T_NUMNODE], Index_t p_nodeElemStart[T_NUMNODE], Index_t p_nodeElemCornerList[T_NODEELEMCORNERLIST], Real_t p_fx[T_NUMNODE], Real_t p_fy[T_NUMNODE], Real_t p_fz[T_NUMNODE]) { Index_t k, lnode, gnode, i; /* Real_t *fx_elem = Allocate(numElem8) ; Real_t *fy_elem = Allocate(numElem8) ; Real_t *fz_elem = Allocate(numElem8) ; */ // loop over all elements #ifdef _OPENACC #pragma acc parallel loop independent present(p_x, p_y, p_z, determ, p_nodelist, sigxx,\ sigyy, sigzz, fx_elem, fy_elem, fz_elem) #else #pragma omp parallel for private(k) firstprivate(numElem) #endif for( k=0 ; k<numElem ; ++k ) { Real_t B[3][8] ;// shape function derivatives Real_t x_local[8] ; Real_t y_local[8] ; Real_t z_local[8] ; #pragma aspen declare data(elemNodes:traits(Array(8,aspen_param_int))) const Index_t* const elemNodes = &p_nodelist[8*k]; // get nodal coordinates from global arrays and copy into local arrays. for( lnode=0 ; lnode<8 ; ++lnode ) { Index_t gnode = elemNodes[lnode]; x_local[lnode] = p_x[gnode]; y_local[lnode] = p_y[gnode]; z_local[lnode] = p_z[gnode]; } /* Volume calculation involves extra work for numerical consistency. */ CalcElemShapeFunctionDerivatives(x_local, y_local, z_local, B, &determ[k]); CalcElemNodeNormals( B[0] , B[1], B[2], x_local, y_local, z_local ); SumElemStressesToNodeForces( B, sigxx[k], sigyy[k], sigzz[k], &fx_elem[k*8], &fy_elem[k*8], &fz_elem[k*8] ) ; #if 0 // copy nodal force contributions to global force arrray. for( lnode=0 ; lnode<8 ; ++lnode ) { Index_t gnode = elemNodes[lnode]; p_fx(gnode) += fx_local[lnode]; p_fy(gnode) += fy_local[lnode]; p_fz(gnode) += fz_local[lnode]; } #endif } { Index_t numNode = m_numNode; #ifdef _OPENACC #pragma acc kernels loop independent present(fx_elem, fy_elem, fz_elem, \ p_fx, p_fy, p_fz, p_nodeElemCount, p_nodeElemCornerList, \ p_nodeElemStart) #else #pragma omp parallel for private(gnode) firstprivate(numNode) #endif for( gnode=0 ; gnode<numNode ; ++gnode ) { Index_t count = p_nodeElemCount[gnode]; Index_t start = p_nodeElemStart[gnode]; Real_t fx = 0.0 ; Real_t fy = 0.0 ; Real_t fz = 0.0 ; #pragma aspen declare param(aspen_param_elemCount:1) #pragma aspen control loop(aspen_param_elemCount) for (i=0 ; i < count ; ++i) { Index_t elem = p_nodeElemCornerList[start+i]; fx += fx_elem[elem] ; fy += fy_elem[elem] ; fz += fz_elem[elem] ; } p_fx[gnode] = fx ; p_fy[gnode] = fy ; p_fz[gnode] = fz ; } } /* Release(&fz_elem) ; Release(&fy_elem) ; Release(&fx_elem) ; */ } static inline void CollectDomainNodesToElemNodes(const Index_t* elemToNode, Real_t elemX[8], Real_t elemY[8], Real_t elemZ[8]) { Index_t nd0i = elemToNode[0] ; Index_t nd1i = elemToNode[1] ; Index_t nd2i = elemToNode[2] ; Index_t nd3i = elemToNode[3] ; Index_t nd4i = elemToNode[4] ; Index_t nd5i = elemToNode[5] ; Index_t nd6i = elemToNode[6] ; Index_t nd7i = elemToNode[7] ; elemX[0] = m_x[nd0i]; elemX[1] = m_x[nd1i]; elemX[2] = m_x[nd2i]; elemX[3] = m_x[nd3i]; elemX[4] = m_x[nd4i]; elemX[5] = m_x[nd5i]; elemX[6] = m_x[nd6i]; elemX[7] = m_x[nd7i]; elemY[0] = m_y[nd0i]; elemY[1] = m_y[nd1i]; elemY[2] = m_y[nd2i]; elemY[3] = m_y[nd3i]; elemY[4] = m_y[nd4i]; elemY[5] = m_y[nd5i]; elemY[6] = m_y[nd6i]; elemY[7] = m_y[nd7i]; elemZ[0] = m_z[nd0i]; elemZ[1] = m_z[nd1i]; elemZ[2] = m_z[nd2i]; elemZ[3] = m_z[nd3i]; elemZ[4] = m_z[nd4i]; elemZ[5] = m_z[nd5i]; elemZ[6] = m_z[nd6i]; elemZ[7] = m_z[nd7i]; } static inline void VoluDer(const Real_t x0, const Real_t x1, const Real_t x2, const Real_t x3, const Real_t x4, const Real_t x5, const Real_t y0, const Real_t y1, const Real_t y2, const Real_t y3, const Real_t y4, const Real_t y5, const Real_t z0, const Real_t z1, const Real_t z2, const Real_t z3, const Real_t z4, const Real_t z5, Real_t* dvdx, Real_t* dvdy, Real_t* dvdz) { const Real_t twelfth = 1.0 / 12.0 ; *dvdx = (y1 + y2) * (z0 + z1) - (y0 + y1) * (z1 + z2) + (y0 + y4) * (z3 + z4) - (y3 + y4) * (z0 + z4) - (y2 + y5) * (z3 + z5) + (y3 + y5) * (z2 + z5); *dvdy = - (x1 + x2) * (z0 + z1) + (x0 + x1) * (z1 + z2) - (x0 + x4) * (z3 + z4) + (x3 + x4) * (z0 + z4) + (x2 + x5) * (z3 + z5) - (x3 + x5) * (z2 + z5); *dvdz = - (y1 + y2) * (x0 + x1) + (y0 + y1) * (x1 + x2) - (y0 + y4) * (x3 + x4) + (y3 + y4) * (x0 + x4) + (y2 + y5) * (x3 + x5) - (y3 + y5) * (x2 + x5); *dvdx *= twelfth; *dvdy *= twelfth; *dvdz *= twelfth; } static inline void CalcElemVolumeDerivative(Real_t dvdx[8], Real_t dvdy[8], Real_t dvdz[8], const Real_t x[8], const Real_t y[8], const Real_t z[8]) { VoluDer(x[1], x[2], x[3], x[4], x[5], x[7], y[1], y[2], y[3], y[4], y[5], y[7], z[1], z[2], z[3], z[4], z[5], z[7], &dvdx[0], &dvdy[0], &dvdz[0]); VoluDer(x[0], x[1], x[2], x[7], x[4], x[6], y[0], y[1], y[2], y[7], y[4], y[6], z[0], z[1], z[2], z[7], z[4], z[6], &dvdx[3], &dvdy[3], &dvdz[3]); VoluDer(x[3], x[0], x[1], x[6], x[7], x[5], y[3], y[0], y[1], y[6], y[7], y[5], z[3], z[0], z[1], z[6], z[7], z[5], &dvdx[2], &dvdy[2], &dvdz[2]); VoluDer(x[2], x[3], x[0], x[5], x[6], x[4], y[2], y[3], y[0], y[5], y[6], y[4], z[2], z[3], z[0], z[5], z[6], z[4], &dvdx[1], &dvdy[1], &dvdz[1]); VoluDer(x[7], x[6], x[5], x[0], x[3], x[1], y[7], y[6], y[5], y[0], y[3], y[1], z[7], z[6], z[5], z[0], z[3], z[1], &dvdx[4], &dvdy[4], &dvdz[4]); VoluDer(x[4], x[7], x[6], x[1], x[0], x[2], y[4], y[7], y[6], y[1], y[0], y[2], z[4], z[7], z[6], z[1], z[0], z[2], &dvdx[5], &dvdy[5], &dvdz[5]); VoluDer(x[5], x[4], x[7], x[2], x[1], x[3], y[5], y[4], y[7], y[2], y[1], y[3], z[5], z[4], z[7], z[2], z[1], z[3], &dvdx[6], &dvdy[6], &dvdz[6]); VoluDer(x[6], x[5], x[4], x[3], x[2], x[0], y[6], y[5], y[4], y[3], y[2], y[0], z[6], z[5], z[4], z[3], z[2], z[0], &dvdx[7], &dvdy[7], &dvdz[7]); } static inline void CalcElemFBHourglassForce(Real_t *xd, Real_t *yd, Real_t *zd, Real_t *hourgam0, Real_t *hourgam1, Real_t *hourgam2, Real_t *hourgam3, Real_t *hourgam4, Real_t *hourgam5, Real_t *hourgam6, Real_t *hourgam7, Real_t coefficient, Real_t *hgfx, Real_t *hgfy, Real_t *hgfz ) { Index_t i00=0; Index_t i01=1; Index_t i02=2; Index_t i03=3; Real_t h00 = hourgam0[i00] * xd[0] + hourgam1[i00] * xd[1] + hourgam2[i00] * xd[2] + hourgam3[i00] * xd[3] + hourgam4[i00] * xd[4] + hourgam5[i00] * xd[5] + hourgam6[i00] * xd[6] + hourgam7[i00] * xd[7]; Real_t h01 = hourgam0[i01] * xd[0] + hourgam1[i01] * xd[1] + hourgam2[i01] * xd[2] + hourgam3[i01] * xd[3] + hourgam4[i01] * xd[4] + hourgam5[i01] * xd[5] + hourgam6[i01] * xd[6] + hourgam7[i01] * xd[7]; Real_t h02 = hourgam0[i02] * xd[0] + hourgam1[i02] * xd[1]+ hourgam2[i02] * xd[2] + hourgam3[i02] * xd[3]+ hourgam4[i02] * xd[4] + hourgam5[i02] * xd[5]+ hourgam6[i02] * xd[6] + hourgam7[i02] * xd[7]; Real_t h03 = hourgam0[i03] * xd[0] + hourgam1[i03] * xd[1] + hourgam2[i03] * xd[2] + hourgam3[i03] * xd[3] + hourgam4[i03] * xd[4] + hourgam5[i03] * xd[5] + hourgam6[i03] * xd[6] + hourgam7[i03] * xd[7]; hgfx[0] = coefficient * (hourgam0[i00] * h00 + hourgam0[i01] * h01 + hourgam0[i02] * h02 + hourgam0[i03] * h03); hgfx[1] = coefficient * (hourgam1[i00] * h00 + hourgam1[i01] * h01 + hourgam1[i02] * h02 + hourgam1[i03] * h03); hgfx[2] = coefficient * (hourgam2[i00] * h00 + hourgam2[i01] * h01 + hourgam2[i02] * h02 + hourgam2[i03] * h03); hgfx[3] = coefficient * (hourgam3[i00] * h00 + hourgam3[i01] * h01 + hourgam3[i02] * h02 + hourgam3[i03] * h03); hgfx[4] = coefficient * (hourgam4[i00] * h00 + hourgam4[i01] * h01 + hourgam4[i02] * h02 + hourgam4[i03] * h03); hgfx[5] = coefficient * (hourgam5[i00] * h00 + hourgam5[i01] * h01 + hourgam5[i02] * h02 + hourgam5[i03] * h03); hgfx[6] = coefficient * (hourgam6[i00] * h00 + hourgam6[i01] * h01 + hourgam6[i02] * h02 + hourgam6[i03] * h03); hgfx[7] = coefficient * (hourgam7[i00] * h00 + hourgam7[i01] * h01 + hourgam7[i02] * h02 + hourgam7[i03] * h03); h00 = hourgam0[i00] * yd[0] + hourgam1[i00] * yd[1] + hourgam2[i00] * yd[2] + hourgam3[i00] * yd[3] + hourgam4[i00] * yd[4] + hourgam5[i00] * yd[5] + hourgam6[i00] * yd[6] + hourgam7[i00] * yd[7]; h01 = hourgam0[i01] * yd[0] + hourgam1[i01] * yd[1] + hourgam2[i01] * yd[2] + hourgam3[i01] * yd[3] + hourgam4[i01] * yd[4] + hourgam5[i01] * yd[5] + hourgam6[i01] * yd[6] + hourgam7[i01] * yd[7]; h02 = hourgam0[i02] * yd[0] + hourgam1[i02] * yd[1]+ hourgam2[i02] * yd[2] + hourgam3[i02] * yd[3]+ hourgam4[i02] * yd[4] + hourgam5[i02] * yd[5]+ hourgam6[i02] * yd[6] + hourgam7[i02] * yd[7]; h03 = hourgam0[i03] * yd[0] + hourgam1[i03] * yd[1] + hourgam2[i03] * yd[2] + hourgam3[i03] * yd[3] + hourgam4[i03] * yd[4] + hourgam5[i03] * yd[5] + hourgam6[i03] * yd[6] + hourgam7[i03] * yd[7]; hgfy[0] = coefficient * (hourgam0[i00] * h00 + hourgam0[i01] * h01 + hourgam0[i02] * h02 + hourgam0[i03] * h03); hgfy[1] = coefficient * (hourgam1[i00] * h00 + hourgam1[i01] * h01 + hourgam1[i02] * h02 + hourgam1[i03] * h03); hgfy[2] = coefficient * (hourgam2[i00] * h00 + hourgam2[i01] * h01 + hourgam2[i02] * h02 + hourgam2[i03] * h03); hgfy[3] = coefficient * (hourgam3[i00] * h00 + hourgam3[i01] * h01 + hourgam3[i02] * h02 + hourgam3[i03] * h03); hgfy[4] = coefficient * (hourgam4[i00] * h00 + hourgam4[i01] * h01 + hourgam4[i02] * h02 + hourgam4[i03] * h03); hgfy[5] = coefficient * (hourgam5[i00] * h00 + hourgam5[i01] * h01 + hourgam5[i02] * h02 + hourgam5[i03] * h03); hgfy[6] = coefficient * (hourgam6[i00] * h00 + hourgam6[i01] * h01 + hourgam6[i02] * h02 + hourgam6[i03] * h03); hgfy[7] = coefficient * (hourgam7[i00] * h00 + hourgam7[i01] * h01 + hourgam7[i02] * h02 + hourgam7[i03] * h03); h00 = hourgam0[i00] * zd[0] + hourgam1[i00] * zd[1] + hourgam2[i00] * zd[2] + hourgam3[i00] * zd[3] + hourgam4[i00] * zd[4] + hourgam5[i00] * zd[5] + hourgam6[i00] * zd[6] + hourgam7[i00] * zd[7]; h01 = hourgam0[i01] * zd[0] + hourgam1[i01] * zd[1] + hourgam2[i01] * zd[2] + hourgam3[i01] * zd[3] + hourgam4[i01] * zd[4] + hourgam5[i01] * zd[5] + hourgam6[i01] * zd[6] + hourgam7[i01] * zd[7]; h02 = hourgam0[i02] * zd[0] + hourgam1[i02] * zd[1]+ hourgam2[i02] * zd[2] + hourgam3[i02] * zd[3]+ hourgam4[i02] * zd[4] + hourgam5[i02] * zd[5]+ hourgam6[i02] * zd[6] + hourgam7[i02] * zd[7]; h03 = hourgam0[i03] * zd[0] + hourgam1[i03] * zd[1] + hourgam2[i03] * zd[2] + hourgam3[i03] * zd[3] + hourgam4[i03] * zd[4] + hourgam5[i03] * zd[5] + hourgam6[i03] * zd[6] + hourgam7[i03] * zd[7]; hgfz[0] = coefficient * (hourgam0[i00] * h00 + hourgam0[i01] * h01 + hourgam0[i02] * h02 + hourgam0[i03] * h03); hgfz[1] = coefficient * (hourgam1[i00] * h00 + hourgam1[i01] * h01 + hourgam1[i02] * h02 + hourgam1[i03] * h03); hgfz[2] = coefficient * (hourgam2[i00] * h00 + hourgam2[i01] * h01 + hourgam2[i02] * h02 + hourgam2[i03] * h03); hgfz[3] = coefficient * (hourgam3[i00] * h00 + hourgam3[i01] * h01 + hourgam3[i02] * h02 + hourgam3[i03] * h03); hgfz[4] = coefficient * (hourgam4[i00] * h00 + hourgam4[i01] * h01 + hourgam4[i02] * h02 + hourgam4[i03] * h03); hgfz[5] = coefficient * (hourgam5[i00] * h00 + hourgam5[i01] * h01 + hourgam5[i02] * h02 + hourgam5[i03] * h03); hgfz[6] = coefficient * (hourgam6[i00] * h00 + hourgam6[i01] * h01 + hourgam6[i02] * h02 + hourgam6[i03] * h03); hgfz[7] = coefficient * (hourgam7[i00] * h00 + hourgam7[i01] * h01 + hourgam7[i02] * h02 + hourgam7[i03] * h03); } static inline void CalcFBHourglassForceForElems(Real_t determ[T_NUMELEM], Real_t x8n[T_NUMELEM8], Real_t y8n[T_NUMELEM8], Real_t z8n[T_NUMELEM8], Real_t dvdx[T_NUMELEM8], Real_t dvdy[T_NUMELEM8], Real_t dvdz[T_NUMELEM8], Real_t hourg, Real_t p_ss[T_NUMELEM], Index_t p_nodelist[T_NUMELEM8], Real_t p_elemMass[T_NUMELEM], Real_t p_xd[T_NUMNODE], Real_t p_yd[T_NUMNODE], Real_t p_zd[T_NUMNODE], Index_t p_nodeElemCount[T_NUMNODE], Index_t p_nodeElemStart[T_NUMNODE], Index_t p_nodeElemCornerList[T_NODEELEMCORNERLIST], Real_t p_fx[T_NUMNODE], Real_t p_fy[T_NUMNODE], Real_t p_fz[T_NUMNODE]) { /************************************************* * * FUNCTION: Calculates the Flanagan-Belytschko anti-hourglass * force. * *************************************************/ Index_t i1, i2, gnode, i; Index_t numElem = m_numElem; #pragma aspen declare param(numElem8:m_numElem*8) Index_t numElem8 = numElem * 8 ; #if LULESH_PRINT_SIZE printf("T_NUMELEM8\t%d\n", numElem8); #endif #if LULESH_CHECK_SIZE if (numElem8 != T_NUMELEM8) { printf("T_NUMELEM8 should be %d\n", numElem8); exit(1); } #endif /* Real_t *fx_elem = Allocate(numElem8) ; Real_t *fy_elem = Allocate(numElem8) ; Real_t *fz_elem = Allocate(numElem8) ; */ Real_t gamma[4][8]; gamma[0][0] = 1.; gamma[0][1] = 1.; gamma[0][2] = -1.; gamma[0][3] = -1.; gamma[0][4] = -1.; gamma[0][5] = -1.; gamma[0][6] = 1.; gamma[0][7] = 1.; gamma[1][0] = 1.; gamma[1][1] = -1.; gamma[1][2] = -1.; gamma[1][3] = 1.; gamma[1][4] = -1.; gamma[1][5] = 1.; gamma[1][6] = 1.; gamma[1][7] = -1.; gamma[2][0] = 1.; gamma[2][1] = -1.; gamma[2][2] = 1.; gamma[2][3] = -1.; gamma[2][4] = 1.; gamma[2][5] = -1.; gamma[2][6] = 1.; gamma[2][7] = -1.; gamma[3][0] = -1.; gamma[3][1] = 1.; gamma[3][2] = -1.; gamma[3][3] = 1.; gamma[3][4] = 1.; gamma[3][5] = -1.; gamma[3][6] = 1.; gamma[3][7] = -1.; /*************************************************/ /* compute the hourglass modes */ #ifdef _OPENACC #pragma acc kernels loop independent copyin(gamma[0:4][0:8]) present(fx_elem, fy_elem, fz_elem, \ p_xd, p_yd, p_zd, dvdx, dvdy, dvdz, x8n, y8n, z8n, p_nodelist, determ, \ p_ss, p_elemMass) #else #pragma omp parallel for private(i2, i1) firstprivate(numElem, hourg) #endif for(i2=0; i2<numElem; ++i2){ Real_t *fx_local, *fy_local, *fz_local ; Real_t hgfx[8], hgfy[8], hgfz[8] ; Real_t coefficient; Real_t hourgam0[4], hourgam1[4], hourgam2[4], hourgam3[4] ; Real_t hourgam4[4], hourgam5[4], hourgam6[4], hourgam7[4]; Real_t xd1[8], yd1[8], zd1[8] ; #pragma aspen declare data(elemToNode:traits(Array(8,aspen_param_int))) const Index_t *elemToNode = &p_nodelist[8*i2]; Index_t i3=8*i2; Real_t volinv=1.0/determ[i2]; Real_t ss1, mass1, volume13 ; Index_t n0si2; Index_t n1si2; Index_t n2si2; Index_t n3si2; Index_t n4si2; Index_t n5si2; Index_t n6si2; Index_t n7si2; for(i1=0;i1<4;++i1){ Real_t hourmodx = x8n[i3] * gamma[i1][0] + x8n[i3+1] * gamma[i1][1] + x8n[i3+2] * gamma[i1][2] + x8n[i3+3] * gamma[i1][3] + x8n[i3+4] * gamma[i1][4] + x8n[i3+5] * gamma[i1][5] + x8n[i3+6] * gamma[i1][6] + x8n[i3+7] * gamma[i1][7]; Real_t hourmody = y8n[i3] * gamma[i1][0] + y8n[i3+1] * gamma[i1][1] + y8n[i3+2] * gamma[i1][2] + y8n[i3+3] * gamma[i1][3] + y8n[i3+4] * gamma[i1][4] + y8n[i3+5] * gamma[i1][5] + y8n[i3+6] * gamma[i1][6] + y8n[i3+7] * gamma[i1][7]; Real_t hourmodz = z8n[i3] * gamma[i1][0] + z8n[i3+1] * gamma[i1][1] + z8n[i3+2] * gamma[i1][2] + z8n[i3+3] * gamma[i1][3] + z8n[i3+4] * gamma[i1][4] + z8n[i3+5] * gamma[i1][5] + z8n[i3+6] * gamma[i1][6] + z8n[i3+7] * gamma[i1][7]; hourgam0[i1] = gamma[i1][0] - volinv*(dvdx[i3 ] * hourmodx + dvdy[i3 ] * hourmody + dvdz[i3 ] * hourmodz ); hourgam1[i1] = gamma[i1][1] - volinv*(dvdx[i3+1] * hourmodx + dvdy[i3+1] * hourmody + dvdz[i3+1] * hourmodz ); hourgam2[i1] = gamma[i1][2] - volinv*(dvdx[i3+2] * hourmodx + dvdy[i3+2] * hourmody + dvdz[i3+2] * hourmodz ); hourgam3[i1] = gamma[i1][3] - volinv*(dvdx[i3+3] * hourmodx + dvdy[i3+3] * hourmody + dvdz[i3+3] * hourmodz ); hourgam4[i1] = gamma[i1][4] - volinv*(dvdx[i3+4] * hourmodx + dvdy[i3+4] * hourmody + dvdz[i3+4] * hourmodz ); hourgam5[i1] = gamma[i1][5] - volinv*(dvdx[i3+5] * hourmodx + dvdy[i3+5] * hourmody + dvdz[i3+5] * hourmodz ); hourgam6[i1] = gamma[i1][6] - volinv*(dvdx[i3+6] * hourmodx + dvdy[i3+6] * hourmody + dvdz[i3+6] * hourmodz ); hourgam7[i1] = gamma[i1][7] - volinv*(dvdx[i3+7] * hourmodx + dvdy[i3+7] * hourmody + dvdz[i3+7] * hourmodz ); } /* compute forces */ /* store forces into h arrays (force arrays) */ ss1=p_ss[i2]; mass1=p_elemMass[i2]; volume13=CBRT8(determ[i2]); n0si2 = elemToNode[0]; n1si2 = elemToNode[1]; n2si2 = elemToNode[2]; n3si2 = elemToNode[3]; n4si2 = elemToNode[4]; n5si2 = elemToNode[5]; n6si2 = elemToNode[6]; n7si2 = elemToNode[7]; xd1[0] = p_xd[n0si2]; xd1[1] = p_xd[n1si2]; xd1[2] = p_xd[n2si2]; xd1[3] = p_xd[n3si2]; xd1[4] = p_xd[n4si2]; xd1[5] = p_xd[n5si2]; xd1[6] = p_xd[n6si2]; xd1[7] = p_xd[n7si2]; yd1[0] = p_yd[n0si2]; yd1[1] = p_yd[n1si2]; yd1[2] = p_yd[n2si2]; yd1[3] = p_yd[n3si2]; yd1[4] = p_yd[n4si2]; yd1[5] = p_yd[n5si2]; yd1[6] = p_yd[n6si2]; yd1[7] = p_yd[n7si2]; zd1[0] = p_zd[n0si2]; zd1[1] = p_zd[n1si2]; zd1[2] = p_zd[n2si2]; zd1[3] = p_zd[n3si2]; zd1[4] = p_zd[n4si2]; zd1[5] = p_zd[n5si2]; zd1[6] = p_zd[n6si2]; zd1[7] = p_zd[n7si2]; coefficient = - hourg * 0.01 * ss1 * mass1 / volume13; CalcElemFBHourglassForce(xd1,yd1,zd1, hourgam0,hourgam1,hourgam2,hourgam3, hourgam4,hourgam5,hourgam6,hourgam7, coefficient, hgfx, hgfy, hgfz); #pragma aspen declare data(fx_local:traits(Array(8, aspen_param_double))) fx_local = &fx_elem[i3] ; fx_local[0] = hgfx[0]; fx_local[1] = hgfx[1]; fx_local[2] = hgfx[2]; fx_local[3] = hgfx[3]; fx_local[4] = hgfx[4]; fx_local[5] = hgfx[5]; fx_local[6] = hgfx[6]; fx_local[7] = hgfx[7]; #pragma aspen declare data(fy_local:traits(Array(8, aspen_param_double))) fy_local = &fy_elem[i3] ; fy_local[0] = hgfy[0]; fy_local[1] = hgfy[1]; fy_local[2] = hgfy[2]; fy_local[3] = hgfy[3]; fy_local[4] = hgfy[4]; fy_local[5] = hgfy[5]; fy_local[6] = hgfy[6]; fy_local[7] = hgfy[7]; #pragma aspen declare data(fz_local:traits(Array(8, aspen_param_double))) fz_local = &fz_elem[i3] ; fz_local[0] = hgfz[0]; fz_local[1] = hgfz[1]; fz_local[2] = hgfz[2]; fz_local[3] = hgfz[3]; fz_local[4] = hgfz[4]; fz_local[5] = hgfz[5]; fz_local[6] = hgfz[6]; fz_local[7] = hgfz[7]; #if 0 p_fx[n0si2] += hgfx[0]; p_fy[n0si2] += hgfy[0]; p_fz[n0si2] += hgfz[0]; p_fx[n1si2] += hgfx[1]; p_fy[n1si2] += hgfy[1]; p_fz[n1si2] += hgfz[1]; p_fx[n2si2] += hgfx[2]; p_fy[n2si2] += hgfy[2]; p_fz[n2si2] += hgfz[2]; p_fx[n3si2] += hgfx[3]; p_fy[n3si2] += hgfy[3]; p_fz[n3si2] += hgfz[3]; p_fx[n4si2] += hgfx[4]; p_fy[n4si2] += hgfy[4]; p_fz[n4si2] += hgfz[4]; p_fx[n5si2] += hgfx[5]; p_fy[n5si2] += hgfy[5]; p_fz[n5si2] += hgfz[5]; p_fx[n6si2] += hgfx[6]; p_fy[n6si2] += hgfy[6]; p_fz[n6si2] += hgfz[6]; p_fx[n7si2] += hgfx[7]; p_fy[n7si2] += hgfy[7]; p_fz[n7si2] += hgfz[7]; #endif } { Index_t numNode = m_numNode; #ifdef _OPENACC #pragma acc kernels loop independent present(p_nodeElemCount, p_nodeElemStart, \ p_nodeElemCornerList, fx_elem, fy_elem, fz_elem, p_fx, p_fy, p_fz) #else #pragma omp parallel for private(gnode, i) firstprivate(numNode) #endif for( gnode=0 ; gnode<numNode ; ++gnode ) { Index_t count = p_nodeElemCount[gnode]; Index_t start = p_nodeElemStart[gnode]; Real_t fx = 0.0 ; Real_t fy = 0.0 ; Real_t fz = 0.0 ; #pragma aspen declare param(aspen_param_elemCount:1) #pragma aspen control loop(aspen_param_elemCount) for (i=0 ; i < count ; ++i) { Index_t elem = p_nodeElemCornerList[start+i]; fx += fx_elem[elem] ; fy += fy_elem[elem] ; fz += fz_elem[elem] ; } p_fx[gnode] += fx ; p_fy[gnode] += fy ; p_fz[gnode] += fz ; } } /* Release(&fz_elem) ; Release(&fy_elem) ; Release(&fx_elem) ; */ } static inline void CalcHourglassControlForElems(Real_t determ[T_NUMELEM], Real_t hgcoef, Index_t p_nodelist[T_NUMELEM8], Real_t p_volo[T_NUMELEM], Real_t p_v[T_NUMELEM]) { Index_t i, ii; Index_t numElem = m_numElem; #pragma aspen declare param(numElem8:m_numElem*8) Index_t numElem8 = numElem * 8 ; #if LULESH_PRINT_SIZE printf("T_NUMELEM8\t%d\n", numElem8); #endif #if LULESH_CHECK_SIZE if (numElem8 != T_NUMELEM8) { printf("T_NUMELEM8 should be %d\n", numElem8); exit(1); } #endif /* Real_t *dvdx = Allocate(numElem8) ; Real_t *dvdy = Allocate(numElem8) ; Real_t *dvdz = Allocate(numElem8) ; Real_t *x8n = Allocate(numElem8) ; Real_t *y8n = Allocate(numElem8) ; Real_t *z8n = Allocate(numElem8) ; */ /* start loop over elements */ int abort = 0; #ifdef _OPENACC #pragma acc parallel loop present(dvdx, dvdy, dvdz, x8n, y8n, z8n, \ m_x, m_y, m_z, p_volo, p_v, determ, p_nodelist) reduction(||:abort) #else #pragma omp parallel for private(i, ii) firstprivate(numElem) reduction(||:abort) #endif for (i=0 ; i<numElem ; ++i){ Real_t x1[8], y1[8], z1[8] ; Real_t pfx[8], pfy[8], pfz[8] ; #pragma aspen declare data(elemToNode:traits(Array(8,aspen_param_int))) Index_t* elemToNode = &p_nodelist[8*i]; CollectDomainNodesToElemNodes(elemToNode, x1, y1, z1); CalcElemVolumeDerivative(pfx, pfy, pfz, x1, y1, z1); /* load into temporary storage for FB Hour Glass control */ for(ii=0;ii<8;++ii){ Index_t jj=8*i+ii; dvdx[jj] = pfx[ii]; dvdy[jj] = pfy[ii]; dvdz[jj] = pfz[ii]; x8n[jj] = x1[ii]; y8n[jj] = y1[ii]; z8n[jj] = z1[ii]; } determ[i] = p_volo[i] * p_v[i]; /* Do a check for negative volumes */ #pragma aspen control ignore if ( p_v[i] <= 0.0 ) { abort = 1; } } if ( abort ) { fprintf(stderr, "VolumeError in CalcHourglassControlForElems(); exit\n"); exit(VolumeError) ; } #pragma aspen control probability(1) if ( hgcoef > 0. ) { CalcFBHourglassForceForElems(determ,x8n,y8n,z8n,dvdx,dvdy,dvdz,hgcoef,m_ss,m_nodelist, m_elemMass,m_xd,m_yd,m_zd,m_nodeElemCount,m_nodeElemStart,m_nodeElemCornerList, m_fx,m_fy,m_fz) ; } /* Release(&z8n) ; Release(&y8n) ; Release(&x8n) ; Release(&dvdz) ; Release(&dvdy) ; Release(&dvdx) ; */ return ; } static inline void CalcVolumeForceForElems() { Index_t k; Index_t numElem = m_numElem; int abort = 0; #pragma aspen control probability(1) if (numElem != 0) { Real_t hgcoef = m_hgcoef; #if LULESH_PRINT_SIZE printf("T_NUMELEM\t%d\n", numElem); #endif #if LULESH_CHECK_SIZE if (numElem != T_NUMELEM) { printf("T_NUMELEM should be %d\n", numElem); exit(1); } #endif /* Real_t *sigxx = Allocate(numElem) ; Real_t *sigyy = Allocate(numElem) ; Real_t *sigzz = Allocate(numElem) ; Real_t *determ = Allocate(numElem) ; */ /* Sum contributions to total stress tensor */ InitStressTermsForElems(numElem, sigxx, sigyy, sigzz, m_p, m_q); // call elemlib stress integration loop to produce nodal forces from // material stresses. IntegrateStressForElems( numElem, sigxx, sigyy, sigzz, determ, m_nodelist, m_x, m_y, m_z, m_nodeElemCount, m_nodeElemStart, m_nodeElemCornerList, m_fx, m_fy, m_fz) ; // check for negative element volume #pragma aspen control ignore #ifdef _OPENACC #pragma acc parallel loop independent present(determ) reduction(||: abort) #else #pragma omp parallel for private(k) firstprivate(numElem) reduction(||: abort) #endif for ( k=0 ; k<numElem ; ++k ) { if (determ[k] <= 0.0) { abort = 1; } } if (abort == 1) { fprintf(stderr, "VolumeError in CalcVolumeForceForElems(); exit\n"); exit(VolumeError) ; } CalcHourglassControlForElems(determ,hgcoef,m_nodelist,m_volo,m_v) ; /* Release(&determ) ; Release(&sigzz) ; Release(&sigyy) ; Release(&sigxx) ; */ } } static inline void CalcForceForNodes(Real_t p_fx[T_NUMNODE], Real_t p_fy[T_NUMNODE], Real_t p_fz[T_NUMNODE]) { Index_t i; Index_t numNode = m_numNode; #ifdef _OPENACC #pragma acc parallel loop independent present(p_fx, p_fy, p_fz) #else #pragma omp parallel for private(i) firstprivate(numNode) #endif for (i=0; i<numNode; ++i) { p_fx[i] = 0.0 ; p_fy[i] = 0.0 ; p_fz[i] = 0.0 ; } /* Calcforce calls partial, force, hourq */ CalcVolumeForceForElems() ; /* Calculate Nodal Forces at domain boundaries */ /* problem->commSBN->Transfer(CommSBN::forces); */ } static inline void CalcAccelerationForNodes(Real_t p_fx[T_NUMNODE], Real_t p_fy[T_NUMNODE], Real_t p_fz[T_NUMNODE], Real_t p_xdd[T_NUMNODE], Real_t p_ydd[T_NUMNODE], Real_t p_zdd[T_NUMNODE], Real_t p_nodalMass[T_NUMNODE]) { Index_t i; Index_t numNode = m_numNode; #ifdef _OPENACC #pragma acc parallel loop present(p_fx, p_fy, p_fz, p_xdd, p_ydd, p_zdd, \ p_nodalMass) #else #pragma omp parallel for private(i) firstprivate(numNode) #endif for (i = 0; i < numNode; ++i) { p_xdd[i] = p_fx[i] / p_nodalMass[i]; p_ydd[i] = p_fy[i] / p_nodalMass[i]; p_zdd[i] = p_fz[i] / p_nodalMass[i]; } } static inline void ApplyAccelerationBoundaryConditionsForNodes(Real_t p_xdd[T_NUMNODE], Real_t p_ydd[T_NUMNODE], Real_t p_zdd[T_NUMNODE], Index_t p_symmX[T_NUMNODESETS], Index_t p_symmY[T_NUMNODESETS], Index_t p_symmZ[T_NUMNODESETS]) { Index_t i; Index_t numNodeBC = (m_sizeX+1)*(m_sizeX+1) ; #ifdef _OPENACC #pragma acc parallel firstprivate(numNodeBC) present(p_xdd, p_ydd, p_zdd, \ p_symmX, p_symmY, p_symmZ) #else #pragma omp parallel firstprivate(numNodeBC) #endif { #ifdef _OPENACC #pragma acc loop independent #else #pragma omp for nowait private(i) #endif for(i=0 ; i<numNodeBC ; ++i) p_xdd[p_symmX[i]] = 0.0 ; #ifdef _OPENACC #pragma acc loop independent #else #pragma omp for nowait private(i) #endif for(i=0 ; i<numNodeBC ; ++i) p_ydd[p_symmY[i]] = 0.0 ; #ifdef _OPENACC #pragma acc loop independent #else #pragma omp for nowait private(i) #endif for(i=0 ; i<numNodeBC ; ++i) p_zdd[p_symmZ[i]] = 0.0 ; } } static inline void CalcVelocityForNodes(const Real_t dt, const Real_t u_cut, Real_t p_xd[T_NUMNODE], Real_t p_yd[T_NUMNODE], Real_t p_zd[T_NUMNODE], Real_t p_xdd[T_NUMNODE], Real_t p_ydd[T_NUMNODE], Real_t p_zdd[T_NUMNODE]) { Index_t i; Index_t numNode = m_numNode; #ifdef _OPENACC #pragma acc parallel loop independent present(p_xd, p_yd, p_zd, p_xdd, p_ydd, p_zdd) #else #pragma omp parallel for private(i) firstprivate(numNode) #endif for ( i = 0 ; i < numNode ; ++i ) { Real_t xdtmp, ydtmp, zdtmp ; xdtmp = p_xd[i] + p_xdd[i] * dt ; #if ALLOW_ASPENIFSTMT == 1 #pragma aspen declare param(aspen_param_xdtmp:0.1) #pragma aspen control probability(aspen_param_xdtmp) #else #pragma aspen control probability(1) #endif if( FABS8(xdtmp) < u_cut ) xdtmp = 0.0; p_xd[i] = xdtmp ; ydtmp = p_yd[i] + p_ydd[i] * dt ; #if ALLOW_ASPENIFSTMT == 1 #pragma aspen declare param(aspen_param_ydtmp:0.1) #pragma aspen control probability(aspen_param_ydtmp) #else #pragma aspen control probability(1) #endif if( FABS8(ydtmp) < u_cut ) ydtmp = 0.0; p_yd[i] = ydtmp ; zdtmp = p_zd[i] + p_zdd[i] * dt ; #if ALLOW_ASPENIFSTMT == 1 #pragma aspen declare param(aspen_param_zdtmp:0.1) #pragma aspen control probability(aspen_param_zdtmp) #else #pragma aspen control probability(1) #endif if( FABS8(zdtmp) < u_cut ) zdtmp = 0.0; p_zd[i] = zdtmp ; } } static inline void CalcPositionForNodes(const Real_t dt, Real_t p_x[T_NUMNODE], Real_t p_y[T_NUMNODE], Real_t p_z[T_NUMNODE], Real_t p_xd[T_NUMNODE], Real_t p_yd[T_NUMNODE], Real_t p_zd[T_NUMNODE]) { Index_t i; Index_t numNode = m_numNode; #ifdef _OPENACC #pragma acc parallel loop independent present(p_x, p_y, p_z, p_xd, p_yd, p_zd) #else #pragma omp parallel for private(i) firstprivate(numNode) #endif for ( i = 0 ; i < numNode ; ++i ) { p_x[i] += p_xd[i] * dt ; p_y[i] += p_yd[i] * dt ; p_z[i] += p_zd[i] * dt ; } } static inline void LagrangeNodal() { const Real_t delt = m_deltatime; Real_t u_cut = m_u_cut; /* time of boundary condition evaluation is beginning of step for force and * acceleration boundary conditions. */ CalcForceForNodes(m_fx,m_fy,m_fz); CalcAccelerationForNodes(m_fx,m_fy,m_fz,m_xdd,m_ydd,m_zdd,m_nodalMass); ApplyAccelerationBoundaryConditionsForNodes(m_xdd,m_ydd,m_zdd,m_symmX,m_symmY,m_symmZ); CalcVelocityForNodes(delt,u_cut,m_xd,m_yd,m_zd,m_xdd,m_ydd,m_zdd); CalcPositionForNodes(delt,m_x,m_y,m_z,m_xd,m_yd,m_zd); return; } static inline Real_t CalcElemVolumeI( const Real_t x0, const Real_t x1, const Real_t x2, const Real_t x3, const Real_t x4, const Real_t x5, const Real_t x6, const Real_t x7, const Real_t y0, const Real_t y1, const Real_t y2, const Real_t y3, const Real_t y4, const Real_t y5, const Real_t y6, const Real_t y7, const Real_t z0, const Real_t z1, const Real_t z2, const Real_t z3, const Real_t z4, const Real_t z5, const Real_t z6, const Real_t z7 ) { Real_t twelveth = 1.0/12.0; Real_t dx61 = x6 - x1; Real_t dy61 = y6 - y1; Real_t dz61 = z6 - z1; Real_t dx70 = x7 - x0; Real_t dy70 = y7 - y0; Real_t dz70 = z7 - z0; Real_t dx63 = x6 - x3; Real_t dy63 = y6 - y3; Real_t dz63 = z6 - z3; Real_t dx20 = x2 - x0; Real_t dy20 = y2 - y0; Real_t dz20 = z2 - z0; Real_t dx50 = x5 - x0; Real_t dy50 = y5 - y0; Real_t dz50 = z5 - z0; Real_t dx64 = x6 - x4; Real_t dy64 = y6 - y4; Real_t dz64 = z6 - z4; Real_t dx31 = x3 - x1; Real_t dy31 = y3 - y1; Real_t dz31 = z3 - z1; Real_t dx72 = x7 - x2; Real_t dy72 = y7 - y2; Real_t dz72 = z7 - z2; Real_t dx43 = x4 - x3; Real_t dy43 = y4 - y3; Real_t dz43 = z4 - z3; Real_t dx57 = x5 - x7; Real_t dy57 = y5 - y7; Real_t dz57 = z5 - z7; Real_t dx14 = x1 - x4; Real_t dy14 = y1 - y4; Real_t dz14 = z1 - z4; Real_t dx25 = x2 - x5; Real_t dy25 = y2 - y5; Real_t dz25 = z2 - z5; #define TRIPLE_PRODUCT(x1, y1, z1, x2, y2, z2, x3, y3, z3) \ ((x1)*((y2)*(z3) - (z2)*(y3)) + (x2)*((z1)*(y3) - (y1)*(z3)) + (x3)*((y1)*(z2) - (z1)*(y2))) Real_t volume = TRIPLE_PRODUCT(dx31 + dx72, dx63, dx20, dy31 + dy72, dy63, dy20, dz31 + dz72, dz63, dz20) + TRIPLE_PRODUCT(dx43 + dx57, dx64, dx70, dy43 + dy57, dy64, dy70, dz43 + dz57, dz64, dz70) + TRIPLE_PRODUCT(dx14 + dx25, dx61, dx50, dy14 + dy25, dy61, dy50, dz14 + dz25, dz61, dz50); #undef TRIPLE_PRODUCT volume *= twelveth; return volume ; } static inline Real_t CalcElemVolume( const Real_t x[8], const Real_t y[8], const Real_t z[8] ) { return CalcElemVolumeI( x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7], y[0], y[1], y[2], y[3], y[4], y[5], y[6], y[7], z[0], z[1], z[2], z[3], z[4], z[5], z[6], z[7]); } static inline Real_t AreaFace( const Real_t x0, const Real_t x1, const Real_t x2, const Real_t x3, const Real_t y0, const Real_t y1, const Real_t y2, const Real_t y3, const Real_t z0, const Real_t z1, const Real_t z2, const Real_t z3) { Real_t fx = (x2 - x0) - (x3 - x1); Real_t fy = (y2 - y0) - (y3 - y1); Real_t fz = (z2 - z0) - (z3 - z1); Real_t gx = (x2 - x0) + (x3 - x1); Real_t gy = (y2 - y0) + (y3 - y1); Real_t gz = (z2 - z0) + (z3 - z1); Real_t area = (fx * fx + fy * fy + fz * fz) * (gx * gx + gy * gy + gz * gz) - (fx * gx + fy * gy + fz * gz) * (fx * gx + fy * gy + fz * gz); return area ; } static inline Real_t CalcElemCharacteristicLength( const Real_t x[8], const Real_t y[8], const Real_t z[8], const Real_t volume) { Real_t a, charLength = 0.0; a = AreaFace(x[0],x[1],x[2],x[3], y[0],y[1],y[2],y[3], z[0],z[1],z[2],z[3]) ; charLength = std_max(a,charLength) ; a = AreaFace(x[4],x[5],x[6],x[7], y[4],y[5],y[6],y[7], z[4],z[5],z[6],z[7]) ; charLength = std_max(a,charLength) ; a = AreaFace(x[0],x[1],x[5],x[4], y[0],y[1],y[5],y[4], z[0],z[1],z[5],z[4]) ; charLength = std_max(a,charLength) ; a = AreaFace(x[1],x[2],x[6],x[5], y[1],y[2],y[6],y[5], z[1],z[2],z[6],z[5]) ; charLength = std_max(a,charLength) ; a = AreaFace(x[2],x[3],x[7],x[6], y[2],y[3],y[7],y[6], z[2],z[3],z[7],z[6]) ; charLength = std_max(a,charLength) ; a = AreaFace(x[3],x[0],x[4],x[7], y[3],y[0],y[4],y[7], z[3],z[0],z[4],z[7]) ; charLength = std_max(a,charLength) ; charLength = 4.0 * volume / SQRT8(charLength); return charLength; } static inline void CalcElemVelocityGrandient( const Real_t* const xvel, const Real_t* const yvel, const Real_t* const zvel, const Real_t b[][8], const Real_t detJ, Real_t* const d ) { const Real_t inv_detJ = 1.0 / detJ ; Real_t dyddx, dxddy, dzddx, dxddz, dzddy, dyddz; #pragma aspen declare data(pfx:traits(Array(8, aspen_param_double))) const Real_t* const pfx = b[0]; #pragma aspen declare data(pfy:traits(Array(8, aspen_param_double))) const Real_t* const pfy = b[1]; #pragma aspen declare data(pfz:traits(Array(8, aspen_param_double))) const Real_t* const pfz = b[2]; d[0] = inv_detJ * ( pfx[0] * (xvel[0]-xvel[6]) + pfx[1] * (xvel[1]-xvel[7]) + pfx[2] * (xvel[2]-xvel[4]) + pfx[3] * (xvel[3]-xvel[5]) ); d[1] = inv_detJ * ( pfy[0] * (yvel[0]-yvel[6]) + pfy[1] * (yvel[1]-yvel[7]) + pfy[2] * (yvel[2]-yvel[4]) + pfy[3] * (yvel[3]-yvel[5]) ); d[2] = inv_detJ * ( pfz[0] * (zvel[0]-zvel[6]) + pfz[1] * (zvel[1]-zvel[7]) + pfz[2] * (zvel[2]-zvel[4]) + pfz[3] * (zvel[3]-zvel[5]) ); dyddx = inv_detJ * ( pfx[0] * (yvel[0]-yvel[6]) + pfx[1] * (yvel[1]-yvel[7]) + pfx[2] * (yvel[2]-yvel[4]) + pfx[3] * (yvel[3]-yvel[5]) ); dxddy = inv_detJ * ( pfy[0] * (xvel[0]-xvel[6]) + pfy[1] * (xvel[1]-xvel[7]) + pfy[2] * (xvel[2]-xvel[4]) + pfy[3] * (xvel[3]-xvel[5]) ); dzddx = inv_detJ * ( pfx[0] * (zvel[0]-zvel[6]) + pfx[1] * (zvel[1]-zvel[7]) + pfx[2] * (zvel[2]-zvel[4]) + pfx[3] * (zvel[3]-zvel[5]) ); dxddz = inv_detJ * ( pfz[0] * (xvel[0]-xvel[6]) + pfz[1] * (xvel[1]-xvel[7]) + pfz[2] * (xvel[2]-xvel[4]) + pfz[3] * (xvel[3]-xvel[5]) ); dzddy = inv_detJ * ( pfy[0] * (zvel[0]-zvel[6]) + pfy[1] * (zvel[1]-zvel[7]) + pfy[2] * (zvel[2]-zvel[4]) + pfy[3] * (zvel[3]-zvel[5]) ); dyddz = inv_detJ * ( pfz[0] * (yvel[0]-yvel[6]) + pfz[1] * (yvel[1]-yvel[7]) + pfz[2] * (yvel[2]-yvel[4]) + pfz[3] * (yvel[3]-yvel[5]) ); d[5] = .5 * ( dxddy + dyddx ); d[4] = .5 * ( dxddz + dzddx ); d[3] = .5 * ( dzddy + dyddz ); } static inline void CalcKinematicsForElems( Index_t numElem,Real_t dt, Index_t p_nodelist[T_NUMELEM8], Real_t p_x[T_NUMNODE], Real_t p_y[T_NUMNODE], Real_t p_z[T_NUMNODE], Real_t p_volo[T_NUMELEM], Real_t p_v[T_NUMELEM], Real_t p_vnew[T_NUMELEM], Real_t p_delv[T_NUMELEM], Real_t p_arealg[T_NUMELEM], Real_t p_xd[T_NUMNODE], Real_t p_yd[T_NUMNODE], Real_t p_zd[T_NUMNODE], Real_t p_dxx[T_NUMELEM], Real_t p_dyy[T_NUMELEM], Real_t p_dzz[T_NUMELEM]) { Index_t k, lnode, j; // loop over all elements #ifdef _OPENACC #pragma acc parallel loop independent present(p_dxx, p_dyy, p_dzz, p_x, p_y, p_z, p_xd, \ p_yd, p_zd, p_v, p_volo, p_vnew, p_delv, p_arealg, p_nodelist) #else #pragma omp parallel for private(k, lnode, j) firstprivate(numElem, dt) #endif for( k=0 ; k<numElem ; ++k ) { Real_t B[3][8] ; /** shape function derivatives */ Real_t D[6] ; Real_t x_local[8] ; Real_t y_local[8] ; Real_t z_local[8] ; Real_t xd_local[8] ; Real_t yd_local[8] ; Real_t zd_local[8] ; Real_t detJ = 0.0 ; Real_t volume ; Real_t relativeVolume ; #pragma aspen declare data(elemToNode:traits(Array(8,aspen_param_int))) const Index_t* const elemToNode = &p_nodelist[8*k] ; Real_t dt2; // get nodal coordinates from global arrays and copy into local arrays. for( lnode=0 ; lnode<8 ; ++lnode ) { Index_t gnode = elemToNode[lnode]; x_local[lnode] = p_x[gnode]; y_local[lnode] = p_y[gnode]; z_local[lnode] = p_z[gnode]; } // volume calculations volume = CalcElemVolume(x_local, y_local, z_local ); relativeVolume = volume / p_volo[k] ; p_vnew[k] = relativeVolume ; p_delv[k] = relativeVolume - p_v[k] ; // set characteristic length p_arealg[k] = CalcElemCharacteristicLength(x_local, y_local, z_local, volume); // get nodal velocities from global array and copy into local arrays. for( lnode=0 ; lnode<8 ; ++lnode ) { Index_t gnode = elemToNode[lnode]; xd_local[lnode] = p_xd[gnode]; yd_local[lnode] = p_yd[gnode]; zd_local[lnode] = p_zd[gnode]; } dt2 = 0.5 * dt; for ( j=0 ; j<8 ; ++j ) { x_local[j] -= dt2 * xd_local[j]; y_local[j] -= dt2 * yd_local[j]; z_local[j] -= dt2 * zd_local[j]; } CalcElemShapeFunctionDerivatives( x_local, y_local, z_local, B, &detJ ); CalcElemVelocityGrandient( xd_local, yd_local, zd_local, B, detJ, D ); // put velocity gradient quantities into their global arrays. p_dxx[k] = D[0]; p_dyy[k] = D[1]; p_dzz[k] = D[2]; } } static inline void CalcLagrangeElements(Real_t deltatime, Real_t p_vnew[T_NUMELEM], Real_t p_vdov[T_NUMELEM], Real_t p_dxx[T_NUMELEM], Real_t p_dyy[T_NUMELEM], Real_t p_dzz[T_NUMELEM]) { Index_t k; Index_t numElem = m_numElem; int abort = 0; #pragma aspen control probability(1) if (numElem > 0) { CalcKinematicsForElems(numElem,deltatime,m_nodelist,m_x,m_y,m_z,m_volo,m_v,p_vnew, m_delv,m_arealg,m_xd,m_yd,m_zd,p_dxx,p_dyy,p_dzz); // element loop to do some stuff not included in the elemlib function. #ifdef _OPENACC #pragma acc parallel loop independent present(p_vdov, p_dxx, p_dyy, p_dzz, p_vnew) \ reduction(||: abort) #else #pragma omp parallel for private(k) firstprivate(numElem) reduction(||:abort) #endif for ( k=0 ; k<numElem ; ++k ) { // calc strain rate and apply as constraint (only done in FB element) Real_t vdov = p_dxx[k] + p_dyy[k] + p_dzz[k] ; Real_t vdovthird = vdov/3.0 ; // make the rate of deformation tensor deviatoric p_vdov[k] = vdov ; p_dxx[k] -= vdovthird ; p_dyy[k] -= vdovthird ; p_dzz[k] -= vdovthird ; // See if any volumes are negative, and take appropriate action. #pragma aspen control ignore if (p_vnew[k] <= 0.0) { abort = 1; } } if (abort == 1) { fprintf(stderr, "VolumeError in CalcLagrangeElements(); exit\n"); exit(VolumeError) ; } } } static inline void CalcMonotonicQGradientsForElems(Index_t p_nodelist[T_NUMELEM8], Real_t p_x[T_NUMNODE], Real_t p_y[T_NUMNODE], Real_t p_z[T_NUMNODE], Real_t p_xd[T_NUMNODE], Real_t p_yd[T_NUMNODE],Real_t p_zd[T_NUMNODE], Real_t p_volo[T_NUMELEM], Real_t p_vnew[T_NUMELEM], Real_t p_delx_zeta[T_NUMELEM], Real_t p_delv_zeta[T_NUMELEM], Real_t p_delx_xi[T_NUMELEM], Real_t p_delv_xi[T_NUMELEM], Real_t p_delx_eta[T_NUMELEM], Real_t p_delv_eta[T_NUMELEM]) { Index_t i; #define SUM4(a,b,c,d) (a + b + c + d) Index_t numElem = m_numElem; #ifdef _OPENACC #pragma acc parallel loop independent present(p_vnew, p_nodelist, p_x, p_y, p_z, p_xd, \ p_yd, p_zd, p_volo, p_delx_xi, p_delx_eta, p_delx_zeta, p_delv_xi, p_delv_eta,\ p_delv_zeta) #else #pragma omp parallel for private(i) firstprivate(numElem) #endif for (i = 0 ; i < numElem ; ++i ) { const Real_t ptiny = 1.e-36 ; Real_t ax,ay,az ; Real_t dxv,dyv,dzv ; #pragma aspen declare data(elemToNode:traits(Array(8,aspen_param_int))) const Index_t *elemToNode = &p_nodelist[8*i]; Index_t n0 = elemToNode[0] ; Index_t n1 = elemToNode[1] ; Index_t n2 = elemToNode[2] ; Index_t n3 = elemToNode[3] ; Index_t n4 = elemToNode[4] ; Index_t n5 = elemToNode[5] ; Index_t n6 = elemToNode[6] ; Index_t n7 = elemToNode[7] ; Real_t x0 = p_x[n0] ; Real_t x1 = p_x[n1] ; Real_t x2 = p_x[n2] ; Real_t x3 = p_x[n3] ; Real_t x4 = p_x[n4] ; Real_t x5 = p_x[n5] ; Real_t x6 = p_x[n6] ; Real_t x7 = p_x[n7] ; Real_t y0 = p_y[n0] ; Real_t y1 = p_y[n1] ; Real_t y2 = p_y[n2] ; Real_t y3 = p_y[n3] ; Real_t y4 = p_y[n4] ; Real_t y5 = p_y[n5] ; Real_t y6 = p_y[n6] ; Real_t y7 = p_y[n7] ; Real_t z0 = p_z[n0] ; Real_t z1 = p_z[n1] ; Real_t z2 = p_z[n2] ; Real_t z3 = p_z[n3] ; Real_t z4 = p_z[n4] ; Real_t z5 = p_z[n5] ; Real_t z6 = p_z[n6] ; Real_t z7 = p_z[n7] ; Real_t xv0 = p_xd[n0] ; Real_t xv1 = p_xd[n1] ; Real_t xv2 = p_xd[n2] ; Real_t xv3 = p_xd[n3] ; Real_t xv4 = p_xd[n4] ; Real_t xv5 = p_xd[n5] ; Real_t xv6 = p_xd[n6] ; Real_t xv7 = p_xd[n7] ; Real_t yv0 = p_yd[n0] ; Real_t yv1 = p_yd[n1] ; Real_t yv2 = p_yd[n2] ; Real_t yv3 = p_yd[n3] ; Real_t yv4 = p_yd[n4] ; Real_t yv5 = p_yd[n5] ; Real_t yv6 = p_yd[n6] ; Real_t yv7 = p_yd[n7] ; Real_t zv0 = p_zd[n0] ; Real_t zv1 = p_zd[n1] ; Real_t zv2 = p_zd[n2] ; Real_t zv3 = p_zd[n3] ; Real_t zv4 = p_zd[n4] ; Real_t zv5 = p_zd[n5] ; Real_t zv6 = p_zd[n6] ; Real_t zv7 = p_zd[n7] ; Real_t vol = p_volo[i]*p_vnew[i] ; Real_t norm = 1.0 / ( vol + ptiny ) ; Real_t dxj = -0.25*(SUM4(x0,x1,x5,x4) - SUM4(x3,x2,x6,x7)) ; Real_t dyj = -0.25*(SUM4(y0,y1,y5,y4) - SUM4(y3,y2,y6,y7)) ; Real_t dzj = -0.25*(SUM4(z0,z1,z5,z4) - SUM4(z3,z2,z6,z7)) ; Real_t dxi = 0.25*(SUM4(x1,x2,x6,x5) - SUM4(x0,x3,x7,x4)) ; Real_t dyi = 0.25*(SUM4(y1,y2,y6,y5) - SUM4(y0,y3,y7,y4)) ; Real_t dzi = 0.25*(SUM4(z1,z2,z6,z5) - SUM4(z0,z3,z7,z4)) ; Real_t dxk = 0.25*(SUM4(x4,x5,x6,x7) - SUM4(x0,x1,x2,x3)) ; Real_t dyk = 0.25*(SUM4(y4,y5,y6,y7) - SUM4(y0,y1,y2,y3)) ; Real_t dzk = 0.25*(SUM4(z4,z5,z6,z7) - SUM4(z0,z1,z2,z3)) ; /* find delvk and delxk ( i cross j ) */ ax = dyi*dzj - dzi*dyj ; ay = dzi*dxj - dxi*dzj ; az = dxi*dyj - dyi*dxj ; p_delx_zeta[i] = vol / SQRT8(ax*ax + ay*ay + az*az + ptiny) ; ax *= norm ; ay *= norm ; az *= norm ; dxv = 0.25*(SUM4(xv4,xv5,xv6,xv7) - SUM4(xv0,xv1,xv2,xv3)) ; dyv = 0.25*(SUM4(yv4,yv5,yv6,yv7) - SUM4(yv0,yv1,yv2,yv3)) ; dzv = 0.25*(SUM4(zv4,zv5,zv6,zv7) - SUM4(zv0,zv1,zv2,zv3)) ; p_delv_zeta[i] = ax*dxv + ay*dyv + az*dzv ; /* find delxi and delvi ( j cross k ) */ ax = dyj*dzk - dzj*dyk ; ay = dzj*dxk - dxj*dzk ; az = dxj*dyk - dyj*dxk ; p_delx_xi[i] = vol / SQRT8(ax*ax + ay*ay + az*az + ptiny) ; ax *= norm ; ay *= norm ; az *= norm ; dxv = 0.25*(SUM4(xv1,xv2,xv6,xv5) - SUM4(xv0,xv3,xv7,xv4)) ; dyv = 0.25*(SUM4(yv1,yv2,yv6,yv5) - SUM4(yv0,yv3,yv7,yv4)) ; dzv = 0.25*(SUM4(zv1,zv2,zv6,zv5) - SUM4(zv0,zv3,zv7,zv4)) ; p_delv_xi[i] = ax*dxv + ay*dyv + az*dzv ; /* find delxj and delvj ( k cross i ) */ ax = dyk*dzi - dzk*dyi ; ay = dzk*dxi - dxk*dzi ; az = dxk*dyi - dyk*dxi ; p_delx_eta[i] = vol / SQRT8(ax*ax + ay*ay + az*az + ptiny) ; ax *= norm ; ay *= norm ; az *= norm ; dxv = -0.25*(SUM4(xv0,xv1,xv5,xv4) - SUM4(xv3,xv2,xv6,xv7)) ; dyv = -0.25*(SUM4(yv0,yv1,yv5,yv4) - SUM4(yv3,yv2,yv6,yv7)) ; dzv = -0.25*(SUM4(zv0,zv1,zv5,zv4) - SUM4(zv3,zv2,zv6,zv7)) ; p_delv_eta[i] = ax*dxv + ay*dyv + az*dzv ; } #undef SUM4 } static inline void CalcMonotonicQRegionForElems(// parameters Real_t qlc_monoq, Real_t qqc_monoq, Real_t monoq_limiter_mult, Real_t monoq_max_slope, Real_t ptiny, // the elementset length Index_t elength, Index_t p_matElemlist[T_NUMELEM], Int_t p_elemBC[T_NUMELEM], Real_t p_delx_xi[T_NUMELEM], Real_t p_delx_eta[T_NUMELEM], Real_t p_delx_zeta[T_NUMELEM], Real_t p_delv_xi[T_NUMELEM], Real_t p_delv_eta[T_NUMELEM], Real_t p_delv_zeta[T_NUMELEM], Index_t p_lxim[T_NUMELEM],Index_t p_lxip[T_NUMELEM], Index_t p_letam[T_NUMELEM],Index_t p_letap[T_NUMELEM], Index_t p_lzetam[T_NUMELEM],Index_t p_lzetap[T_NUMELEM], Real_t p_vnew[T_NUMELEM], Real_t p_vdov[T_NUMELEM], Real_t p_volo[T_NUMELEM], Real_t p_elemMass[T_NUMELEM], Real_t p_qq[T_NUMELEM], Real_t p_ql[T_NUMELEM]) { Index_t ielem; #ifdef _OPENACC #pragma acc parallel loop independent firstprivate(qlc_monoq, qqc_monoq, monoq_limiter_mult,\ monoq_max_slope, ptiny) present(p_matElemlist) present(p_vnew, p_vdov, p_delx_xi,\ p_delx_eta, p_delx_zeta, p_delv_xi, p_delv_eta, p_delv_zeta, p_elemMass, \ p_volo, p_lxip, p_lxim, p_letam, p_letap, p_lzetam, p_lzetap, p_ql, p_qq, \ p_elemBC) #else #pragma omp parallel for private(ielem) firstprivate(elength, qlc_monoq, qqc_monoq, monoq_limiter_mult, monoq_max_slope, ptiny) #endif for ( ielem = 0 ; ielem < elength; ++ielem ) { Real_t qlin, qquad ; Real_t phixi, phieta, phizeta ; Index_t i = p_matElemlist[ielem]; Int_t bcMask = p_elemBC[i] ; Real_t delvm, delvp ; /* phixi */ Real_t norm = 1. / ( p_delv_xi[i] + ptiny ) ; switch (bcMask & XI_M) { case 0: delvm = p_delv_xi[p_lxim[i]] ; break ; case XI_M_SYMM: delvm = p_delv_xi[i] ; break ; case XI_M_FREE: delvm = 0.0 ; break ; default: /* ERROR */ ; break ; } switch (bcMask & XI_P) { case 0: delvp = p_delv_xi[p_lxip[i]] ; break ; case XI_P_SYMM: delvp = p_delv_xi[i] ; break ; case XI_P_FREE: delvp = 0.0 ; break ; default: /* ERROR */ ; break ; } delvm = delvm * norm ; delvp = delvp * norm ; phixi = .5 * ( delvm + delvp ) ; delvm *= monoq_limiter_mult ; delvp *= monoq_limiter_mult ; if ( delvm < phixi ) phixi = delvm ; if ( delvp < phixi ) phixi = delvp ; if ( phixi < 0.) phixi = 0. ; if ( phixi > monoq_max_slope) phixi = monoq_max_slope; /* phieta */ norm = 1. / ( p_delv_eta[i] + ptiny ) ; switch (bcMask & ETA_M) { case 0: delvm = p_delv_eta[p_letam[i]] ; break ; case ETA_M_SYMM: delvm = p_delv_eta[i] ; break ; case ETA_M_FREE: delvm = 0.0 ; break ; default: /* ERROR */ ; break ; } switch (bcMask & ETA_P) { case 0: delvp = p_delv_eta[p_letap[i]] ; break ; case ETA_P_SYMM: delvp = p_delv_eta[i] ; break ; case ETA_P_FREE: delvp = 0.0 ; break ; default: /* ERROR */ ; break ; } delvm = delvm * norm ; delvp = delvp * norm ; phieta = .5 * ( delvm + delvp ) ; delvm *= monoq_limiter_mult ; delvp *= monoq_limiter_mult ; if ( delvm < phieta ) phieta = delvm ; if ( delvp < phieta ) phieta = delvp ; if ( phieta < 0.) phieta = 0. ; if ( phieta > monoq_max_slope) phieta = monoq_max_slope; /* phizeta */ norm = 1. / ( p_delv_zeta[i] + ptiny ) ; switch (bcMask & ZETA_M) { case 0: delvm = p_delv_zeta[p_lzetam[i]] ; break ; case ZETA_M_SYMM: delvm = p_delv_zeta[i] ; break ; case ZETA_M_FREE: delvm = 0.0 ; break ; default: /* ERROR */ ; break ; } switch (bcMask & ZETA_P) { case 0: delvp = p_delv_zeta[p_lzetap[i]] ; break ; case ZETA_P_SYMM: delvp = p_delv_zeta[i] ; break ; case ZETA_P_FREE: delvp = 0.0 ; break ; default: /* ERROR */ ; break ; } delvm = delvm * norm ; delvp = delvp * norm ; phizeta = .5 * ( delvm + delvp ) ; delvm *= monoq_limiter_mult ; delvp *= monoq_limiter_mult ; if ( delvm < phizeta ) phizeta = delvm ; if ( delvp < phizeta ) phizeta = delvp ; if ( phizeta < 0.) phizeta = 0.; if ( phizeta > monoq_max_slope ) phizeta = monoq_max_slope; /* Remove length scale */ #if ALLOW_ASPENIFSTMT == 1 #pragma aspen declare param(aspen_param_vdov2:0.1) #pragma aspen control probability(aspen_param_vdov2) //#pragma aspen control probability(aspen_param_vdov2) flops(1:traits(dp)) loads(1*aspen_param_double:from(p_vdov):traits(stride(1))) #else #pragma aspen control probability(1) #endif if ( p_vdov[i] > 0. ) { qlin = 0. ; qquad = 0. ; } else { Real_t delvxxi = p_delv_xi[i] * p_delx_xi[i] ; Real_t delvxeta = p_delv_eta[i] * p_delx_eta[i] ; Real_t delvxzeta = p_delv_zeta[i] * p_delx_zeta[i] ; Real_t rho; #pragma aspen control ignore if ( delvxxi > 0. ) delvxxi = 0. ; #pragma aspen control ignore if ( delvxeta > 0. ) delvxeta = 0. ; #pragma aspen control ignore if ( delvxzeta > 0. ) delvxzeta = 0. ; rho = p_elemMass[i] / (p_volo[i] * p_vnew[i]) ; qlin = -qlc_monoq * rho * ( delvxxi * (1. - phixi) + delvxeta * (1. - phieta) + delvxzeta * (1. - phizeta) ) ; qquad = qqc_monoq * rho * ( delvxxi*delvxxi * (1. - phixi*phixi) + delvxeta*delvxeta * (1. - phieta*phieta) + delvxzeta*delvxzeta * (1. - phizeta*phizeta) ) ; } p_qq[i] = qquad ; p_ql[i] = qlin ; } } static inline void CalcMonotonicQForElems() { // // initialize parameters // const Real_t ptiny = 1.e-36 ; Real_t monoq_max_slope = m_monoq_max_slope; Real_t monoq_limiter_mult = m_monoq_limiter_mult; // // calculate the monotonic q for pure regions // Index_t elength = m_numElem; #pragma aspen control probability(1) if (elength > 0) { Real_t qlc_monoq = m_qlc_monoq; Real_t qqc_monoq = m_qqc_monoq; CalcMonotonicQRegionForElems(// parameters qlc_monoq, qqc_monoq, monoq_limiter_mult, monoq_max_slope, ptiny, // the elemset length elength, m_matElemlist,m_elemBC,m_delx_xi,m_delx_eta,m_delx_zeta, m_delv_xi,m_delv_eta,m_delv_zeta,m_lxim,m_lxip,m_letam,m_letap, m_lzetam,m_lzetap,m_vnew,m_vdov,m_volo,m_elemMass,m_qq,m_ql); } } static inline void CalcQForElems() { Index_t i; Real_t qstop = m_qstop; Index_t numElem = m_numElem; // // MONOTONIC Q option // /* Calculate velocity gradients */ CalcMonotonicQGradientsForElems(m_nodelist,m_x,m_y,m_z,m_xd,m_yd,m_zd,m_volo,m_vnew, m_delx_zeta,m_delv_zeta,m_delx_xi,m_delv_xi,m_delx_eta,m_delv_eta) ; /* Transfer veloctiy gradients in the first order elements */ /* problem->commElements->Transfer(CommElements::monoQ) ; */ CalcMonotonicQForElems() ; /* Don't allow excessive artificial viscosity */ #pragma aspen control ignore if (numElem != 0) { //Index_t idx = -1; Index_t idx = 0; #ifdef _OPENACC #pragma acc parallel loop independent present(m_q) reduction(||:idx) #else #pragma omp parallel for reduction(||:idx) #endif for (i=0; i<numElem; ++i) { if ( m_q[i] > qstop ) { idx = 1 ; //break ; } } if(idx == 1) { fprintf(stderr, "QStopError in CalcQForElems(); exit\n"); exit(QStopError) ; } } } static inline void CalcPressureForElems(Real_t p_new[T_NUMELEM], Real_t bvc[T_NUMELEM], Real_t pbvc[T_NUMELEM], Real_t e_old[T_NUMELEM], Real_t compression[T_NUMELEM], Real_t vnewc[T_NUMELEM], Real_t pmin, Real_t p_cut, Real_t eosvmax, Index_t length) { Index_t i; #ifdef _OPENACC #pragma acc parallel loop independent present(bvc, pbvc, compression) #else #pragma omp parallel for private(i) firstprivate(length) #endif for (i = 0; i < length ; ++i) { Real_t c1s = 2.0/3.0 ; bvc[i] = c1s * (compression[i] + 1.); pbvc[i] = c1s; } #ifdef _OPENACC #pragma acc parallel loop independent present(bvc, p_new, e_old, vnewc) #else #pragma omp parallel for private(i) firstprivate(length, pmin, p_cut, eosvmax) #endif for (i = 0 ; i < length ; ++i){ p_new[i] = bvc[i] * e_old[i] ; #if ALLOW_ASPENIFSTMT == 1 #pragma aspen declare param(aspen_param_pnew:0.1) #pragma aspen control probability(aspen_param_pnew) #else #pragma aspen control probability(1) #endif if (FABS8(p_new[i]) < p_cut ) p_new[i] = 0.0 ; #pragma aspen control ignore if ( vnewc[i] >= eosvmax ) /* impossible condition here? */ p_new[i] = 0.0 ; #if ALLOW_ASPENIFSTMT == 1 #pragma aspen declare param(aspen_param_pnew2:0.1) #pragma aspen control probability(aspen_param_pnew2) #else #pragma aspen control probability(1) #endif if (p_new[i] < pmin) p_new[i] = pmin ; } } static inline void CalcEnergyForElems(Real_t p_new[T_LENGTH], Real_t e_new[T_LENGTH], Real_t q_new[T_LENGTH], Real_t bvc[T_LENGTH], Real_t pbvc[T_LENGTH], Real_t p_old[T_LENGTH], Real_t e_old[T_LENGTH], Real_t q_old[T_LENGTH], Real_t compression[T_LENGTH], Real_t compHalfStep[T_LENGTH], Real_t vnewc[T_LENGTH], Real_t* work, Real_t* delvc, Real_t pmin, Real_t p_cut, Real_t e_cut, Real_t q_cut, Real_t emin, Real_t* qq, Real_t* ql, Real_t rho0, Real_t eosvmax, Index_t length) { Index_t i; #if LULESH_PRINT_SIZE printf("T_LENGTH\t%d\n", length); #endif #if LULESH_CHECK_SIZE if (length != T_LENGTH) { printf("T_LENGTH should be %d\n", length); exit(1); } #endif Real_t *pHalfStep = Allocate(length) ; #ifdef _OPENACC #pragma acc data create(pHalfStep[0:length]) { #endif #ifdef _OPENACC #pragma acc parallel loop independent present(e_new, e_old, p_old, q_old, delvc, work) #else #pragma omp parallel for private(i) firstprivate(length, emin) #endif for (i = 0 ; i < length ; ++i) { e_new[i] = e_old[i] - 0.5 * delvc[i] * (p_old[i] + q_old[i]) + 0.5 * work[i]; #if ALLOW_ASPENIFSTMT == 1 #pragma aspen declare param(aspen_param_e_new_prob:1) #pragma aspen control probability(aspen_param_e_new_prob) #else #pragma aspen control probability(1) #endif if (e_new[i] < emin ) { e_new[i] = emin ; } } CalcPressureForElems(pHalfStep, bvc, pbvc, e_new, compHalfStep, vnewc, pmin, p_cut, eosvmax, length); #ifdef _OPENACC #pragma acc parallel loop independent present(compHalfStep, pHalfStep, delvc, p_old,\ q_old, ql, qq, q_new, pbvc, bvc, e_new) #else #pragma omp parallel for private(i) firstprivate(length, rho0) #endif for (i = 0 ; i < length ; ++i) { Real_t vhalf = 1. / (1. + compHalfStep[i]) ; #if ALLOW_ASPENIFSTMT == 1 #pragma aspen declare param(aspen_param_delvc:0.1) #pragma aspen control probability(aspen_param_delvc) #else #pragma aspen control probability(1) #endif if ( delvc[i] > 0. ) { q_new[i] /* = qq[i] = ql[i] */ = 0. ; } else { Real_t ssc = ( pbvc[i] * e_new[i] + vhalf * vhalf * bvc[i] * pHalfStep[i] ) / rho0 ; #pragma aspen control ignore if ( ssc <= 0. ) { ssc =.333333e-36 ; } else { ssc = SQRT8(ssc) ; } q_new[i] = (ssc*ql[i] + qq[i]) ; } e_new[i] = e_new[i] + 0.5 * delvc[i] * ( 3.0*(p_old[i] + q_old[i]) - 4.0*(pHalfStep[i] + q_new[i])) ; } #ifdef _OPENACC #pragma acc parallel loop present(e_new, work) #else #pragma omp parallel for private(i) firstprivate(length, emin, e_cut) #endif for (i = 0 ; i < length ; ++i) { e_new[i] += 0.5 * work[i]; #if ALLOW_ASPENIFSTMT == 1 #pragma aspen declare param(aspen_param_enew:0.1) #pragma aspen control probability(aspen_param_enew) #else #pragma aspen control probability(1) #endif if (FABS8(e_new[i]) < e_cut) { e_new[i] = 0. ; } #if ALLOW_ASPENIFSTMT == 1 #pragma aspen declare param(aspen_param_enew2:0.1) #pragma aspen control probability(aspen_param_enew2) #else #pragma aspen control probability(1) #endif if ( e_new[i] < emin ) { e_new[i] = emin ; } } CalcPressureForElems(p_new, bvc, pbvc, e_new, compression, vnewc, pmin, p_cut, eosvmax, length); #ifdef _OPENACC #pragma acc parallel loop present(pHalfStep, delvc, pbvc, e_new, bvc, ql,\ qq, p_old, q_old, p_new, q_new, vnewc) #else #pragma omp parallel for private(i) firstprivate(length, rho0, emin, e_cut) #endif for (i = 0 ; i < length ; ++i){ const Real_t sixth = 1.0 / 6.0 ; Real_t q_tilde ; #if ALLOW_ASPENIFSTMT == 1 #pragma aspen declare param(aspen_param_delvc:0.1) #pragma aspen control probability(aspen_param_delvc) #else #pragma aspen control probability(1) #endif if (delvc[i] > 0.) { q_tilde = 0. ; } else { Real_t ssc = ( pbvc[i] * e_new[i] + vnewc[i] * vnewc[i] * bvc[i] * p_new[i] ) / rho0 ; #pragma aspen control ignore if ( ssc <= 0. ) { ssc = .333333e-36 ; } else { ssc = SQRT8(ssc) ; } q_tilde = (ssc*ql[i] + qq[i]) ; } e_new[i] = e_new[i] - ( 7.0*(p_old[i] + q_old[i]) - 8.0*(pHalfStep[i] + q_new[i]) + (p_new[i] + q_tilde)) * delvc[i]*sixth ; #if ALLOW_ASPENIFSTMT == 1 #pragma aspen declare param(aspen_param_enew3:0.1) #pragma aspen control probability(aspen_param_enew3) #else #pragma aspen control probability(1) #endif if (FABS8(e_new[i]) < e_cut) { e_new[i] = 0. ; } #if ALLOW_ASPENIFSTMT == 1 #pragma aspen declare param(aspen_param_enew4:0.1) #pragma aspen control probability(aspen_param_enew4) #else #pragma aspen control probability(1) #endif if ( e_new[i] < emin ) { e_new[i] = emin ; } } CalcPressureForElems(p_new, bvc, pbvc, e_new, compression, vnewc, pmin, p_cut, eosvmax, length); #ifdef _OPENACC #pragma acc parallel loop present(delvc, pbvc, e_new, vnewc, bvc, ql,\ qq, p_new, q_new) #else #pragma omp parallel for private(i) firstprivate(length, rho0, q_cut) #endif for (i = 0 ; i < length ; ++i){ #if ALLOW_ASPENIFSTMT == 1 #pragma aspen declare param(aspen_param_delvc2:0.1) #pragma aspen control probability(aspen_param_delvc2) #else #pragma aspen control probability(1) #endif if ( delvc[i] <= 0. ) { Real_t ssc = ( pbvc[i] * e_new[i] + vnewc[i] * vnewc[i] * bvc[i] * p_new[i] ) / rho0 ; #pragma aspen control ignore if ( ssc <= 0. ) { ssc = .333333e-36 ; } else { ssc = SQRT8(ssc) ; } q_new[i] = (ssc*ql[i] + qq[i]) ; #if ALLOW_ASPENIFSTMT == 1 #pragma aspen declare param(aspen_param_qnew:0.1) #pragma aspen control probability(aspen_param_qnew) #else #pragma aspen control probability(1) #endif if (FABS8(q_new[i]) < q_cut) q_new[i] = 0. ; } } #ifdef _OPENACC } //end acc data #endif Release(&pHalfStep) ; return ; } static inline void CalcSoundSpeedForElems(Real_t vnewc[T_LENGTH], Real_t rho0, Real_t *enewc, Real_t *pnewc, Real_t *pbvc, Real_t *bvc, Real_t ss4o3, Index_t nz, Index_t p_matElemlist[T_NUMELEM], Real_t p_ss[T_NUMELEM]) { Index_t i; #ifdef _OPENACC #pragma acc parallel loop present(vnewc, \ p_matElemlist, \ pbvc, \ enewc, \ bvc, \ pnewc, \ p_ss) \ firstprivate(rho0) #else #pragma omp parallel for private(i) firstprivate(nz, rho0) #endif for (i = 0; i < nz ; ++i) { Index_t iz = p_matElemlist[i]; Real_t ssTmp = (pbvc[i] * enewc[i] + vnewc[i] * vnewc[i] * bvc[i] * pnewc[i]) / rho0; if (ssTmp <= 1.111111e-36) { ssTmp = 1.111111e-36; } p_ss[iz] = SQRT8(ssTmp); } } static inline void EvalEOSForElems(Real_t vnewc[T_LENGTH], Index_t length, Index_t p_matElemlist[T_NUMELEM], Real_t p_e[T_NUMELEM], Real_t p_delv[T_NUMELEM], Real_t p_p[T_NUMELEM], Real_t p_q[T_NUMELEM], Real_t p_qq[T_NUMELEM], Real_t p_ql[T_NUMELEM], Real_t p_ss[T_NUMELEM]) { Real_t e_cut = m_e_cut; Real_t p_cut = m_p_cut; Real_t ss4o3 = m_ss4o3; Real_t q_cut = m_q_cut; Real_t eosvmax = m_eosvmax ; Real_t eosvmin = m_eosvmin ; Real_t pmin = m_pmin ; Real_t emin = m_emin ; Real_t rho0 = m_refdens ; #if LULESH_PRINT_SIZE printf("T_LENGTH\t%d\n", length); #endif #if LULESH_CHECK_SIZE if (length != T_LENGTH) { printf("T_LENGTH should be %d\n", length); exit(1); } #endif /* Real_t *e_old = Allocate(length) ; Real_t *delvc = Allocate(length) ; Real_t *p_old = Allocate(length) ; Real_t *q_old = Allocate(length) ; Real_t *compression = Allocate(length) ; Real_t *compHalfStep = Allocate(length) ; Real_t *qq = Allocate(length) ; Real_t *ql = Allocate(length) ; Real_t *work = Allocate(length) ; Real_t *p_new = Allocate(length) ; Real_t *e_new = Allocate(length) ; Real_t *q_new = Allocate(length) ; Real_t *bvc = Allocate(length) ; Real_t *pbvc = Allocate(length) ; */ Index_t i; /* compress data, minimal set */ #ifdef _OPENACC #pragma acc data present(e_old, delvc, p_old, compression,\ compHalfStep, qq, ql, work, p_new, e_new, q_new, \ bvc, pbvc) present(p_matElemlist) # else #pragma omp parallel #endif { #ifdef _OPENACC #pragma acc parallel loop independent present(e_old, delvc, p_old, q_old, \ p_e, p_delv, p_p, p_q, p_matElemlist) #else #pragma omp for private(i) firstprivate(length) #endif for (i=0; i<length; ++i) { Index_t zidx = p_matElemlist[i] ; e_old[i] = p_e[zidx] ; delvc[i] = p_delv[zidx] ; p_old[i] = p_p[zidx] ; q_old[i] = p_q[zidx] ; } #ifdef _OPENACC #pragma acc parallel loop independent present(compression, vnewc, delvc, compHalfStep) #else #pragma omp for private(i) firstprivate(length) #endif for (i = 0; i < length ; ++i) { Real_t vchalf ; compression[i] = 1. / vnewc[i] - 1.; vchalf = vnewc[i] - delvc[i] * .5; compHalfStep[i] = 1. / vchalf - 1.; } /* Check for v > eosvmax or v < eosvmin */ #ifdef _OPENACC #pragma acc parallel loop independent present(vnewc, compHalfStep, compression, \ p_matElemlist, qq, ql, p_qq, p_ql, p_old, work) #else #pragma omp for private(i) firstprivate(length, eosvmax, eosvmin) #endif for (i = 0 ; i < length ; ++i) { Index_t zidx = p_matElemlist[i] ; qq[i] = p_qq[zidx] ; ql[i] = p_ql[zidx] ; work[i] = 0. ; #if ALLOW_ASPENIFSTMT == 1 #pragma aspen declare param(aspen_param_vnewc:0) #pragma aspen control probability(aspen_param_vnewc) #else #pragma aspen control probability(1) #endif if( (eosvmin != 0.0) && (vnewc[i] <= eosvmin) ) { compHalfStep[i] = compression[i] ; } #if ALLOW_ASPENIFSTMT == 1 #pragma aspen declare param(aspen_param_vnewc2:0) #pragma aspen control probability(aspen_param_vnewc2) #else #pragma aspen control probability(1) #endif if( (eosvmax != 0.0) && (vnewc[i] >= eosvmax) ) { p_old[i] = 0. ; compression[i] = 0. ; compHalfStep[i] = 0. ; } } } CalcEnergyForElems(p_new, e_new, q_new, bvc, pbvc, p_old, e_old, q_old, compression, compHalfStep, vnewc, work, delvc, pmin, p_cut, e_cut, q_cut, emin, qq, ql, rho0, eosvmax, length); #ifdef _OPENACC #pragma acc parallel loop independent present(p_new, e_new, q_new, p_p, p_e, p_q, p_matElemlist) #else #pragma omp parallel for firstprivate(length) #endif for (i=0; i<length; ++i) { Index_t zidx = p_matElemlist[i] ; p_p[zidx] = p_new[i] ; p_e[zidx] = e_new[i] ; p_q[zidx] = q_new[i] ; } CalcSoundSpeedForElems(vnewc, rho0, e_new, p_new, pbvc, bvc, ss4o3, length, p_matElemlist, p_ss) ; /* Release(&pbvc) ; Release(&bvc) ; Release(&q_new) ; Release(&e_new) ; Release(&p_new) ; Release(&work) ; Release(&ql) ; Release(&qq) ; Release(&compHalfStep) ; Release(&compression) ; Release(&q_old) ; Release(&p_old) ; Release(&delvc) ; Release(&e_old) ; */ } static inline void ApplyMaterialPropertiesForElems(Index_t p_matElemlist[T_NUMELEM], Real_t p_vnew[T_NUMELEM], Real_t p_v[T_NUMELEM], Real_t p_e[T_NUMELEM], Real_t p_delv[T_NUMELEM], Real_t p_p[T_NUMELEM], Real_t p_q[T_NUMELEM], Real_t p_qq[T_NUMELEM], Real_t p_ql[T_NUMELEM], Real_t p_ss[T_NUMELEM]) { Index_t i; Index_t length = m_numElem; #pragma aspen control probability(1) if (length != 0) { /* Expose all of the variables needed for material evaluation */ Real_t eosvmin = m_eosvmin; Real_t eosvmax = m_eosvmax; #if LULESH_PRINT_SIZE printf("T_LENGTH\t%d\n", length); #endif #if LULESH_CHECK_SIZE if (length != T_LENGTH) { printf("T_LENGTH should be %d\n", length); exit(1); } #endif /* Real_t *vnewc = Allocate(length) ; */ Real_t vc = 1.0; #ifdef _OPENACC #pragma acc data present(vnewc[0:m_numElem]) present(p_v, p_matElemlist) #else #pragma omp parallel firstprivate(length, eosvmin, eosvmax) #endif { #ifdef _OPENACC #pragma acc parallel loop independent #else #pragma omp for private(i) #endif for (i=0 ; i<length ; ++i) { Index_t zn = p_matElemlist[i] ; vnewc[i] = p_vnew[zn] ; #if ALLOW_ASPENIFSTMT == 1 #pragma aspen declare param(aspen_param_vnewc3:0.1) #pragma aspen control probability(aspen_param_vnewc3) #else #pragma aspen control probability(1) #endif if( (eosvmin != 0.0) && (vnewc[i] < eosvmin) ) { vnewc[i] = eosvmin ; } #if ALLOW_ASPENIFSTMT == 1 #pragma aspen declare param(aspen_param_vnewc4:0.1) #pragma aspen control probability(aspen_param_vnewc4) #else #pragma aspen control probability(1) #endif if( (eosvmax != 0.0) && (vnewc[i] > eosvmax) ) { vnewc[i] = eosvmax ; } } #ifdef _OPENACC #pragma acc parallel loop reduction(min: vc) present(p_v, p_matElemlist) #else //#pragma omp for private(i) reduction(min:vc) //min is not recognized by GCC. #pragma omp for private(i) #endif for (i=0; i<length; ++i) { Index_t zn = p_matElemlist[i] ; vc = p_v[zn] ; #pragma aspen control execute flops(2:traits(dp)) if (eosvmin != 0.) { if (vc < eosvmin) vc = eosvmin ; } #pragma aspen control execute flops(2:traits(dp)) if (eosvmax != 0.) { if (vc > eosvmax) vc = eosvmax ; } #ifndef _OPENACC if (vc <= 0.) { fprintf(stderr, "VolumeError in ApplyMaterialPropertiesForElems(); exit\n"); exit(VolumeError) ; } #endif } } #ifdef _OPENACC #pragma aspen control ignore if (vc <= 0.) { fprintf(stderr, "VolumeError in ApplyMaterialPropertiesForElems(); exit\n"); exit(VolumeError) ; } #endif EvalEOSForElems(vnewc,length,p_matElemlist,p_e,p_delv,p_p,p_q,p_qq,p_ql,p_ss); /* Release(&vnewc) ; */ } } static inline void UpdateVolumesForElems(Real_t p_vnew[T_NUMELEM], Real_t p_v[T_NUMELEM]) { Index_t i; Index_t numElem = m_numElem; #pragma aspen control probability(1) if (numElem != 0) { Real_t v_cut = m_v_cut; #ifdef _OPENACC #pragma acc parallel loop present(p_vnew, p_v) #else #pragma omp parallel for private(i) firstprivate(numElem,v_cut) #endif for(i=0 ; i<numElem ; ++i) { Real_t tmpV ; tmpV = p_vnew[i] ; #if ALLOW_ASPENIFSTMT == 1 #pragma aspen declare param(aspen_param_tmpV:0.1) #pragma aspen control probability(aspen_param_tmpV) #else #pragma aspen control probability(1) #endif if ( FABS8(tmpV - 1.0) < v_cut ) tmpV = 1.0 ; p_v[i] = tmpV ; } } return ; } static inline void LagrangeElements() { const Real_t deltatime = m_deltatime; CalcLagrangeElements(deltatime,m_vnew,m_vdov,m_dxx,m_dyy,m_dzz) ; /* Calculate Q. (Monotonic q option requires communication) */ CalcQForElems() ; ApplyMaterialPropertiesForElems(m_matElemlist,m_vnew,m_v,m_e,m_delv,m_p,m_q, m_qq,m_ql,m_ss); UpdateVolumesForElems(m_vnew,m_v) ; } static inline void CalcCourantConstraintForElems(Index_t p_matElemlist[T_NUMELEM],Real_t p_ss[T_NUMELEM], Real_t p_vdov[T_NUMELEM], Real_t p_arealg[T_NUMELEM]) { Index_t i; Real_t dtcourant = 1.0e+20 ; Index_t courant_elem = -1 ; Real_t qqc = m_qqc; Index_t length = m_numElem ; Real_t qqc2 = 64.0 * qqc * qqc ; //[FIXME] OpenMP pragma is temporarily disabled due to a bug. //#pragma omp parallel for private(i) firstprivate(length,qqc2) shared(dtcourant,courant_elem, p_matElemlist, p_ss, p_vdov, p_arealg) for (i = 0 ; i < length ; ++i) { Index_t indx = p_matElemlist[i] ; Real_t dtf = p_ss[indx] * p_ss[indx] ; #if ALLOW_ASPENIFSTMT == 1 #pragma aspen declare param(aspen_param_vdov3:0.1) #pragma aspen control probability(aspen_param_vdov3) #else #pragma aspen control probability(1) #endif if ( p_vdov[indx] < 0. ) { dtf = dtf + qqc2 * p_arealg[indx] * p_arealg[indx] * p_vdov[indx] * p_vdov[indx] ; } dtf = SQRT8(dtf) ; dtf = p_arealg[indx] / dtf ; /* determine minimum timestep with its corresponding elem */ #pragma aspen control execute flops(2:traits(dp)) loads(1*aspen_param_double:from(p_vdov):traits(stride(1))) if (p_vdov[indx] != 0.) { if ( dtf < dtcourant ) { //#pragma omp critical { dtcourant = dtf ; courant_elem = indx ; } } } } /* Don't try to register a time constraint if none of the elements * were active */ if (courant_elem != -1) { m_dtcourant = dtcourant ; } return ; } static inline void CalcHydroConstraintForElems(Index_t p_matElemlist[T_NUMELEM], Real_t p_vdov[T_NUMELEM]) { Index_t i; Real_t dthydro = 1.0e+20 ; Index_t hydro_elem = -1 ; Real_t dvovmax = m_dvovmax; Index_t length = m_numElem; //[FIXME] OpenMP pragma is temporarily disabled due to a bug. //#pragma omp parallel for private(i) firstprivate(length) shared(dthydro,hydro_elem, p_matElemlist, p_vdov) for (i = 0 ; i < length ; ++i) { Index_t indx = p_matElemlist[i] ; #if ALLOW_ASPENIFSTMT == 1 #pragma aspen declare param(aspen_param_vdov:0.1) #pragma aspen control probability(aspen_param_vdov) #else #pragma aspen control probability(1) #endif if (p_vdov[indx] != 0.) { Real_t dtdvov = dvovmax / (FABS8(p_vdov[indx])+1.e-20) ; #pragma aspen control ignore if ( dthydro > dtdvov ) { //#pragma omp critical { dthydro = dtdvov ; hydro_elem = indx ; } } } } if (hydro_elem != -1) { m_dthydro = dthydro ; } return ; } static inline void CalcTimeConstraintsForElems() { #ifdef _OPENACC #pragma acc update host(m_vdov, m_ss, m_arealg) #endif /* evaluate time constraint */ CalcCourantConstraintForElems(m_matElemlist,m_ss,m_vdov,m_arealg); /* check hydro constraint */ CalcHydroConstraintForElems(m_matElemlist,m_vdov); } static inline void LagrangeLeapFrog() { //#pragma acc wait /* calculate nodal forces, accelerations, velocities, positions, with * applied boundary conditions and slide surface considerations */ LagrangeNodal(); /* calculate element quantities (i.e. velocity gradient & q), and update * material states */ LagrangeElements(); CalcTimeConstraintsForElems(); // LagrangeRelease() ; Creation/destruction of temps may be important to capture } int main(int argc, char *argv[]) { Index_t plane, row, col, i, lnode, j; // Real_t ds = Real_t(1.125)/Real_t(edgeElems) ; /* may accumulate roundoff */ Real_t tx, ty, tz ; Index_t nidx, zidx ; Index_t domElems ; #if LULESH_STORE_OUTPUT FILE *fp; #endif #if LULESH_MEASURE_TIME double strt_time1, end_time1; double strt_time2, end_time2; strt_time1 = my_timer(); #endif edgeElems = T_EDGEELEM ; edgeNodes = edgeElems+1 ; /* get run options to measure various metrics */ /* ... */ /****************************/ /* Initialize Sedov Mesh */ /****************************/ /* construct a uniform box for this processor */ m_sizeX = edgeElems ; m_sizeY = edgeElems ; m_sizeZ = edgeElems ; m_numElem = edgeElems*edgeElems*edgeElems ; m_numElem8 = m_numElem * 8 ; m_numNode = edgeNodes*edgeNodes*edgeNodes ; domElems = m_numElem; /* allocate field memory */ #if LULESH_PRINT_SIZE printf("T_NUMELEM\t%d\n", m_numElem); printf("T_NUMNODE\t%d\n", m_numNode); printf("T_NUMNODESETS\t%d\n", edgeNodes*edgeNodes); #endif #if LULESH_CHECK_SIZE if (m_numElem != T_NUMELEM) { printf("T_NUMELEM should be %d\n", m_numElem); exit(1); } if (m_numNode != T_NUMNODE) { printf("T_NUMNODE should be %d\n", m_numNode); exit(1); } if ((edgeNodes*edgeNodes) != T_NUMNODESETS) { printf("T_NUMNODESETS should be %d\n", (edgeNodes*edgeNodes)); exit(1); } #endif AllocateElemPersistent(m_numElem) ; AllocateElemTemporary (m_numElem) ; AllocateNodalPersistent(m_numNode) ; AllocateNodesets(edgeNodes*edgeNodes) ; AllocateTemporary(m_numElem8); AllocateTemporary2(m_numElem); /* initialize nodal coordinates */ nidx = 0 ; tz = 0. ; for (plane=0; plane<edgeNodes; ++plane) { ty = 0. ; for (row=0; row<edgeNodes; ++row) { tx = 0. ; for (col=0; col<edgeNodes; ++col) { m_x[nidx] = tx ; m_y[nidx] = ty ; m_z[nidx] = tz ; ++nidx ; // tx += ds ; /* may accumulate roundoff... */ tx = 1.125*((Real_t)(col+1))/((Real_t)edgeElems) ; } // ty += ds ; /* may accumulate roundoff... */ ty = 1.125*((Real_t)(row+1))/((Real_t)edgeElems) ; } // tz += ds ; /* may accumulate roundoff... */ tz = 1.125*((Real_t)(plane+1))/((Real_t)edgeElems) ; } /* embed hexehedral elements in nodal point lattice */ nidx = 0 ; zidx = 0 ; for (plane=0; plane<edgeElems; ++plane) { for (row=0; row<edgeElems; ++row) { for (col=0; col<edgeElems; ++col) { Index_t *localNode = &m_nodelist[8*zidx]; localNode[0] = nidx ; localNode[1] = nidx + 1 ; localNode[2] = nidx + edgeNodes + 1 ; localNode[3] = nidx + edgeNodes ; localNode[4] = nidx + edgeNodes*edgeNodes ; localNode[5] = nidx + edgeNodes*edgeNodes + 1 ; localNode[6] = nidx + edgeNodes*edgeNodes + edgeNodes + 1 ; localNode[7] = nidx + edgeNodes*edgeNodes + edgeNodes ; ++zidx ; ++nidx ; } ++nidx ; } nidx += edgeNodes ; } AllocateNodeElemIndexes() ; /* Create a material IndexSet (entire domain same material for now) */ for (i=0; i<domElems; ++i) { m_matElemlist[i] = i ; } /* initialize material parameters */ m_dtfixed = -1.0e-7 ; m_deltatime = 1.0e-7 ; m_deltatimemultlb = 1.1 ; m_deltatimemultub = 1.2 ; m_stoptime = 1.0e-2 ; m_dtcourant = 1.0e+20 ; m_dthydro = 1.0e+20 ; m_dtmax = 1.0e-2 ; m_time = 0. ; m_cycle = 0 ; m_e_cut = 1.0e-7 ; m_p_cut = 1.0e-7 ; m_q_cut = 1.0e-7 ; m_u_cut = 1.0e-7 ; m_v_cut = 1.0e-10 ; m_hgcoef = 3.0 ; m_ss4o3 = 4.0/3.0 ; m_qstop = 1.0e+12 ; m_monoq_max_slope = 1.0 ; m_monoq_limiter_mult = 2.0 ; m_qlc_monoq = 0.5 ; m_qqc_monoq = 2.0/3.0 ; m_qqc = 2.0 ; m_pmin = 0. ; m_emin = -1.0e+15 ; m_dvovmax = 0.1 ; m_eosvmax = 1.0e+9 ; m_eosvmin = 1.0e-9 ; m_refdens = (1.0) ; /* initialize field data */ for (i=0; i<domElems; ++i) { Real_t x_local[8], y_local[8], z_local[8] ; #pragma aspen declare data(elemToNode:traits(Array(8,aspen_param_int))) Index_t *elemToNode = &m_nodelist[8*i] ; Real_t volume; for( lnode=0 ; lnode<8 ; ++lnode ) { Index_t gnode = elemToNode[lnode]; x_local[lnode] = m_x[gnode]; y_local[lnode] = m_y[gnode]; z_local[lnode] = m_z[gnode]; } // volume calculations volume = CalcElemVolume(x_local, y_local, z_local ); m_volo[i] = volume ; m_elemMass[i] = volume ; for (j=0; j<8; ++j) { Index_t idx = elemToNode[j] ; m_nodalMass[idx] += volume / (8.0) ; } } /* deposit energy */ m_e[0] = (3.948746e+7) ; /* set up symmetry nodesets */ nidx = 0 ; for (i=0; i<edgeNodes; ++i) { Index_t planeInc = i*edgeNodes*edgeNodes ; Index_t rowInc = i*edgeNodes ; for (j=0; j<edgeNodes; ++j) { m_symmX[nidx] = planeInc + j*edgeNodes ; m_symmY[nidx] = planeInc + j ; m_symmZ[nidx] = rowInc + j ; ++nidx ; } } /* set up elemement connectivity information */ m_lxim[0] = 0 ; for (i=1; i<domElems; ++i) { m_lxim[i] = i-1 ; m_lxip[i-1] = i ; } m_lxip[domElems-1] = domElems-1 ; for (i=0; i<edgeElems; ++i) { m_letam[i] = i ; m_letap[domElems-edgeElems+i] = domElems-edgeElems+i ; } for (i=edgeElems; i<domElems; ++i) { m_letam[i] = i-edgeElems ; m_letap[i-edgeElems] = i ; } for (i=0; i<edgeElems*edgeElems; ++i) { m_lzetam[i] = i ; m_lzetap[domElems-edgeElems*edgeElems+i] = domElems-edgeElems*edgeElems+i ; } for (i=edgeElems*edgeElems; i<domElems; ++i) { m_lzetam[i] = i - edgeElems*edgeElems ; m_lzetap[i-edgeElems*edgeElems] = i ; } /* set up boundary condition information */ for (i=0; i<domElems; ++i) { m_elemBC[i] = 0 ; /* clear BCs by default */ } /* faces on "external" boundaries will be */ /* symmetry plane or free surface BCs */ for (i=0; i<edgeElems; ++i) { Index_t planeInc = i*edgeElems*edgeElems ; Index_t rowInc = i*edgeElems ; for (j=0; j<edgeElems; ++j) { m_elemBC[planeInc+j*edgeElems] |= XI_M_SYMM ; m_elemBC[planeInc+j*edgeElems+edgeElems-1] |= XI_P_FREE ; m_elemBC[planeInc+j] |= ETA_M_SYMM ; m_elemBC[planeInc+j+edgeElems*edgeElems-edgeElems] |= ETA_P_FREE ; m_elemBC[rowInc+j] |= ZETA_M_SYMM ; m_elemBC[rowInc+j+domElems-edgeElems*edgeElems] |= ZETA_P_FREE ; } } #if LULESH_MEASURE_TIME end_time1 = my_timer(); strt_time2 = my_timer(); #endif i = 0; /* timestep to solution */ //[DEBUG] while-loop count = 593 for edgeElem == 10 // = 1041 for edgeElem == 20 // = 1248 for edgeElem == 30 // = 1420 for edgeElem == 40 // = 1566 for edgeElem == 50 // Use linear regression to estimate loop count using edgeElems. #pragma aspen declare param(aspen_param_whilecnt:23.25*edgeElems+476.1) #ifdef _OPENACCM acc_init(acc_device_default); #endif #pragma aspen modelregion #ifdef _OPENACC #pragma acc data create(m_fx[0:m_numNode], \ m_fy[0:m_numNode], \ m_fz[0:m_numNode], \ fx_elem[0:m_numElem8], \ fy_elem[0:m_numElem8], \ fz_elem[0:m_numElem8], \ dvdx[0:m_numElem8], \ dvdy[0:m_numElem8], \ dvdz[0:m_numElem8], \ x8n[0:m_numElem8], \ y8n[0:m_numElem8], \ z8n[0:m_numElem8], \ sigxx[0:m_numElem], \ sigyy[0:m_numElem], \ sigzz[0:m_numElem], \ determ[0:m_numElem], \ m_dxx[0:m_numElem], \ m_dyy[0:m_numElem], \ m_dzz[0:m_numElem], \ m_vnew[0:m_numElem], \ m_delx_xi[0:m_numElem], \ m_delx_eta[0:m_numElem], \ m_delx_zeta[0:m_numElem], \ m_delv_xi[0:m_numElem], \ m_delv_eta[0:m_numElem], \ m_delv_zeta[0:m_numElem], \ e_old[0:m_numElem], \ delvc[0:m_numElem], \ p_old[0:m_numElem], \ q_old[0:m_numElem], \ compression[0:m_numElem], \ compHalfStep[0:m_numElem], \ qq[0:m_numElem], \ ql[0:m_numElem], \ work[0:m_numElem], \ p_new[0:m_numElem], \ e_new[0:m_numElem], \ q_new[0:m_numElem], \ bvc[0:m_numElem], \ pbvc[0:m_numElem], \ vnewc[0:m_numElem]) \ copy(m_x[0:m_numNode], \ m_y[0:m_numNode], \ m_z[0:m_numNode], \ m_xd[0:m_numNode], \ m_yd[0:m_numNode], \ m_zd[0:m_numNode], \ m_p[0:m_numElem], \ m_e[0:m_numElem]) \ create( m_arealg[0:m_numElem], \ m_delv[0:m_numElem], \ m_q[0:m_numElem], \ m_ql[0:m_numElem], \ m_qq[0:m_numElem], \ m_ss[0:m_numElem], \ m_vdov[0:m_numElem] \ ) \ copyin(m_symmX[0:edgeNodes*edgeNodes], \ m_symmY[0:edgeNodes*edgeNodes], \ m_symmZ[0:edgeNodes*edgeNodes], \ m_xdd[0:m_numNode], \ m_ydd[0:m_numNode], \ m_zdd[0:m_numNode], \ m_v[0:m_numElem], \ m_volo[0:m_numElem], \ m_nodalMass[0:m_numNode], \ m_elemMass[0:m_numElem], \ m_lxim[0:m_numElem], \ m_lxip[0:m_numElem], \ m_letam[0:m_numElem], \ m_letap[0:m_numElem], \ m_lzetam[0:m_numElem], \ m_lzetap[0:m_numElem], \ m_nodelist[0:m_numElem8], \ m_nodeElemCount[0:m_numNode], \ m_nodeElemStart[0:m_numNode], \ m_nodeElemCornerList[0:m_nCorner], \ m_matElemlist[0:m_numElem], \ m_elemBC[m_numElem]) #endif { #pragma aspen control loop(aspen_param_whilecnt) while(m_time < m_stoptime ) { TimeIncrement() ; LagrangeLeapFrog() ; i++; /* problem->commNodes->Transfer(CommNodes::syncposvel) ; */ #if LULESH_SHOW_PROGRESS printf("time = %e, dt=%e\n", (double)m_time, (double)m_deltatime ) ; #endif } printf("iterations: %d\n",i); } #ifdef _OPENACCM acc_shutdown(acc_device_default); #endif #if LULESH_MEASURE_TIME end_time2 = my_timer(); printf ("Init time = %lf sec\n", end_time1 - strt_time1); printf ("Main Comp. time = %lf sec\n", end_time2 - strt_time2); printf ("Total elapsed time = %lf sec\n", (end_time1 - strt_time1) + (end_time2 - strt_time2)); #endif #if LULESH_STORE_OUTPUT fp = fopen("lulesh.out", "w"); for (i=0; i<m_numElem; i++) { fprintf(fp, "%.6f\n",m_x[i]); } for (i=0; i<m_numElem; i++) { fprintf(fp, "%.6f\n",m_y[i]); } for (i=0; i<m_numElem; i++) { fprintf(fp, "%.6f\n",m_z[i]); } fclose(fp); #endif return 0 ; }
VariableSizeMatrix.h
// Copyright (c) 2017, Lawrence Livermore National Security, LLC and // UT-Battelle, LLC. // Produced at the Lawrence Livermore National Laboratory and the Oak Ridge // National Laboratory. // LLNL-CODE-743438 // All rights reserved. // This file is part of MGmol. For details, see https://github.com/llnl/mgmol. // Please also read this link https://github.com/llnl/mgmol/LICENSE /*! * Variable size csr/csc matrix used for data transfer operations */ #ifndef MGMOL_VARIABLESIZEMATRIX_H_ #define MGMOL_VARIABLESIZEMATRIX_H_ #include "LocalMatrices.h" #include "SparseRow.h" #include "SparseRowAndTable.h" #include "SquareSubMatrix.h" #include "Table.h" #include "VariableSizeMatrixInterface.h" #include <iostream> #include <set> #include <vector> // typedef enum INSERTMODE {INSERT, ADD} INSERTMODE; /* define maximum and minimum local matrix size */ #define MAX_MAT_SIZE 10000 #define MIN_MAT_SIZE 10 /* define default tolerance for pruning matrix entries */ #define MAT_TOL 1.0e-14 /* define maximum number of print rows */ #define MAX_PRINT_ROWS 100 /* define default number of print rows for diagnostics */ #define NUM_PRINT_ROWS 5 class DataDistribution; /* define matrix row datatype */ typedef SparseRow sparserow; typedef SparseRowAndTable sparserowtab; template <class T> class VariableSizeMatrix : public VariableSizeMatrixInterface { typedef typename std::vector<T*>::iterator TvecIterator; typedef typename std::vector<T*>::const_iterator const_TvecIterator; const std::string name_; int n_; // the dimension of the matrix int nzmax_; // max. nnz in each row int totnnz_; // total nnz of matrix std::vector<int> lvars_; // Local variables in global indices Table* table_; // Hash table for holding global, local index pairs std::vector<T*> data_; public: VariableSizeMatrix( const std::string& name, const int alloc_size); // setup data structures VariableSizeMatrix(const VariableSizeMatrix& A, const bool copy_table = true); // Copy constructor template <class T2> VariableSizeMatrix(const VariableSizeMatrix<T2>& A, const bool copy_table = true); // Copy constructor VariableSizeMatrix<T>& operator=(const VariableSizeMatrix<T>& a); /* initialize a local row of the local matrix */ void updateLocalRowSquareMatrix(const int count, const int lrindex, const int* const cols, const double* const vals, const INSERTMODE mode); /* update current local row by considering only column indices for which there are rows in the local matrix. ie. ensure a square matrix is preserved */ void insertNewRow(const int ncols, const int row, const int* cols, const double* vals, const bool append); /* Augment current matrix by inserting a new row */ /* initialize matrix data from square local matrix object */ void insertMatrixElements( const LocalMatrices<MATDTYPE, MemorySpace::Host>& ss, const std::vector<std::vector<int>>& global_indexes, const int numst, const double tol = MAT_TOL); void insertMatrixElements( const SquareSubMatrix<MATDTYPE>& ss, const double tol); void sparsify(const std::vector<bool>& keeprow); void sparsify(const std::vector<int>& gids); void print(std::ostream& os, const std::vector<int>& locfcns, int nrows = NUM_PRINT_ROWS) const; void printMatCSR(const char* fname); /* print CSR matrix */ void printMatBlock2(const int gid0, const int gid1, std::ostream& os); // void printMatMM(ofstream& outfile); /* print // MM matrix */ void reset(); /* reset CSR matrix to be reused */ void clear(); void setupSparseRows(const std::vector<int>& rows); /* reset/ initialize matrix with sparse rows */ void copyData(const VariableSizeMatrix<T>& A, const int n); /* Copy data from matrix A. Copies n rows of A */ void set2Identity(); /* Set matrix to identity */ ~VariableSizeMatrix() override; // destructor void printMat(const char* fname, std::vector<int>& lvec); /* print select rows of CSR matrix */ template <typename T2> double AmultSymBdiag(VariableSizeMatrix<T2>* B, const int row); double AmultSymB_ij(VariableSizeMatrix<T>* B, const int row, const int col); /* compute ij-th entry of A*B */ double trace(); /* compute the trace of the matrix */ double trace(const std::vector<int>& rows); /* compute the trace of selected rows of the matrix */ double getTraceDiagProductWithMat(const std::vector<double>& ddiag); /* return sum_i ( ddiag[i]*Mat[i][i] ) */ void copyDataToArray(int* locvars, int* colidx, double* colvals); /* get table value */ void* getTableValue(const int key) const { return (*table_).get_value(key); } /* update/ insert key into table */ void updateTableValue(const int key) { (*table_).insert(key); } /* update/ insert key, value into table */ void updateTableValue(const int key, const int value) { (*table_).insert(key, value); } /* get local size */ int n() const { return n_; } /* get nzmax */ int nzmax() const { int nzmax = 0; const_TvecIterator it; for (it = data_.begin(); it != data_.end(); ++it) nzmax = nzmax > (int)(*it)->nnz() ? nzmax : (int)(*it)->nnz(); return nzmax; } /* get nzmin */ int nzmin() const { int nzmin = n_; const_TvecIterator it; for (it = data_.begin(); it != data_.end(); ++it) nzmin = nzmin < (int)(*it)->nnz() ? nzmin : (int)(*it)->nnz(); return nzmin; } /* get nzmax of submatrix from row begin to row end */ int getNzmaxSubmat(const int begin, const int end) { if (end >= n_) return 0; int nzmax = 0; for (int i = begin; i <= end; i++) nzmax += (int)data_[i]->nnz(); return nzmax; } /* get totnnz */ int nnzmat() const { return totnnz_; } /* get number of nonzeros for a local row */ int nnzrow(const int row) const { if (row >= n_) return 0; return (int)data_[row]->nnz(); } /* get global index of local variable */ int getLocalVariableGlobalIndex(const int lrindex) const { return lvars_[lrindex]; } /* get column position on local row. Return -1 if not on local row */ bool isColumnHere(const int lrindex, const int col) const { int colpos = data_[lrindex]->getColumnPosition(col); if (colpos != -1) return true; else return false; } /* set pointer to array of global index of local variables */ int* rowIndexes() { return &lvars_[0]; } /* get (global) column index */ int getColumnIndex(const int lrindex, const int pos) const { return data_[lrindex]->getColumnIndex(pos); } void getColumnIndexes(const int lrindex, std::vector<int>& indexes) const { indexes = data_[lrindex]->getColumnIndexes(); } void getAllColumnIndexes(std::vector<int>& indexes) const; /* get value on local row */ double getRowEntry(const int lrindex, const int pos) const { assert(lrindex < n_); return data_[lrindex]->getEntryFromPosition(pos); } void getRowEntries(const int lrindex, std::vector<double>& values) const { assert(lrindex < n_); values = data_[lrindex]->getColumnEntries(); } int getMaxAbsOffDiagonalRowEntry(const int gid, double& value) const; int getColumnPos(const int lrindex, const int col) { return data_[lrindex]->getColumnPosition(col); } void row_daxpy( const int lrindex, const int size, const double alpha, double* y) { data_[lrindex]->axpy(size, alpha, y); } Table* getTable() { return table_; } /* initialize a local row of the local matrix */ /* Assumes nnzrow is initially zero - matrix has been reset */ void initializeLocalRow( const int ncols, const int lrindex, const int* cols, const double* vals) { if (ncols) { data_[lrindex]->assign(ncols, cols, vals); /* update local matrix variables */ #ifdef _OPENMP #pragma omp atomic #endif totnnz_ += ncols; } return; } /* Update current local rows by adding or inserting new columns. */ void updateLocalRow(const int count, const int lrindex, const int* const cols, const double* const vals, const INSERTMODE mode) { // updateRow_tm_.start(); totnnz_ += data_[lrindex]->updateRow(count, cols, vals, mode); // updateRow_tm_.stop(); return; } void updateLocalRowAdd(const int count, const int lrindex, const int* const cols, const double* const vals) { // updateRow_tm_.start(); const int newnnz = data_[lrindex]->updateRowAdd(count, cols, vals); #ifdef _OPENMP #pragma omp atomic #endif totnnz_ += newnnz; // updateRow_tm_.stop(); return; } void updateLocalRowInsert(const int count, const int lrindex, const int* const cols, const double* const vals) { // updateRow_tm_.start(); totnnz_ += data_[lrindex]->updateRowInsert(count, cols, vals); // updateRow_tm_.stop(); return; } /* Update current local row by adding or inserting a new column. */ void updateLocalRow(const int lrindex, const int col, const double val, const INSERTMODE mode) { // updateRow_tm_.start(); totnnz_ += data_[lrindex]->updateRow(col, val, mode); // updateRow_tm_.stop(); return; } void updateLocalRowAdd(const int lrindex, const int col, const double val) { // updateRow_tm_.start(); totnnz_ += data_[lrindex]->updateRowAdd(col, val); // updateRow_tm_.stop(); return; } void updateLocalRowInsert( const int lrindex, const int col, const double val) { // updateRow_tm_.start(); totnnz_ += data_[lrindex]->updateRowInsert(col, val); // updateRow_tm_.stop(); return; } /* Update current local entry by adding or inserting a new value. * Assumes that the local row index and column position of the entry * is known. */ void updateLocalEntry(const int lrindex, const int pos, const double val, const INSERTMODE mode) { /* begin */ /* Add or insert entry */ data_[lrindex]->updateEntry(pos, val, mode); return; } void updateLocalEntryAdd(const int lrindex, const int pos, const double val) { /* begin */ /* Add or insert entry */ data_[lrindex]->updateEntryAdd(pos, val); return; } void updateLocalEntryInsert( const int lrindex, const int pos, const double val) { /* begin */ /* Add or insert entry */ data_[lrindex]->updateEntryInsert(pos, val); return; } /* Insert entry into matrix */ void insertMatrixElement(const int row, const int col, const double val, const INSERTMODE mode, const bool append) { #ifdef _OPENMP #pragma omp critical(insertMatrixElement) #endif { /* begin */ /* check if row exists */ int* rindex = (int*)getTableValue(row); if (rindex != nullptr) /* row exists */ { /* insert column */ updateLocalRow(*rindex, col, val, mode); } else /* insert new row */ { insertNewRow(1, row, &col, &val, append); } } } /* get matrix entry */ double get_value(const int row, const int col) const { double value = 0.0; int* rindex = (int*)getTableValue(row); if (rindex != nullptr) value = data_[*rindex]->getColumnEntry(col); return value; } /* get matrix entries from a local row = lrindex */ void getLocalRowValues(const int lrindex, const std::vector<int>& cols, std::vector<double>& vals) const { vals.reserve(cols.size()); for (std::vector<int>::const_iterator it = cols.begin(); it != cols.end(); ++it) { vals.push_back(data_[lrindex]->getColumnEntry(*it)); } assert(vals.size() == cols.size()); } /* get matrix entries from a global row*/ void getRowValues(const int row, const std::vector<int>& cols, std::vector<double>& vals) const { int* rindex = (int*)getTableValue(row); if (rindex != nullptr) { const int lrindex = *rindex; getLocalRowValues(lrindex, cols, vals); /* T* data=data_[*rindex]; vals.reserve(cols.size()); for(std::vector<int>::const_iterator it =cols.begin(); it!=cols.end(); ++it) { vals.push_back( data->getColumnEntry(*it) ); } */ } else { vals.resize(cols.size()); memset(&vals[0], 0, vals.size() * sizeof(double)); } assert(vals.size() == cols.size()); } /* get matrix entries from a sorted row*/ void getSortedRowValues(const int row, const std::vector<int>& cols, std::vector<double>& vals) const { int* rindex = (int*)getTableValue(row); if (rindex != nullptr) { T* data = data_[*rindex]; vals.reserve(cols.size()); sort_col_tm_.start(); data_[*rindex]->sortData(); sort_col_tm_.stop(); for (std::vector<int>::const_iterator it = cols.begin(); it != cols.end(); ++it) { int pos = data->getSortedDataColumnPosition(*it); if (pos != -1) vals.push_back(data->getEntryFromPosition(pos)); else vals.push_back(0.0); } } else { vals.resize(cols.size()); memset(&vals[0], 0, vals.size() * sizeof(double)); } assert(vals.size() == cols.size()); } /* Scale the row of the CSR matrix */ void scaleRow(const int row, const double coeff) { int* rindex = (int*)getTableValue(row); if (rindex == nullptr) return; data_[*rindex]->scale(coeff); } /* Scale the CSR matrix */ void scale(const double coeff) { const int n = n_; for (int lrindex = 0; lrindex < n; lrindex++) data_[lrindex]->scale(coeff); } // matrix multiplication operations (locally centered contributions only) // flag== true => compute entries for specific nonzero pattern only void AmultSymBLocal(VariableSizeMatrix<T>* B, VariableSizeMatrix<T>& C, const std::vector<int>& locfcns, VariableSizeMatrix<SparseRowAndTable>& pattern, bool flag = true); // matrix multiplication operations void AmultSymB(VariableSizeMatrix<T>* B, VariableSizeMatrix<T>& C, VariableSizeMatrix<SparseRowAndTable>& pattern, bool flag = true); const std::vector<int>& lvars() const { return lvars_; } // get reference to local row at index rindex T& getRow(int rindex) const { return *data_[rindex]; } void sortColumnIndexes() { sort_col_tm_.start(); for (const_TvecIterator it = data_.begin(); it != data_.end(); ++it) (*it)->sortData(); sort_col_tm_.stop(); } // get pointer to row data double* getRowEntries(const int lrindex) { assert(lrindex < n_); return data_[lrindex]->getPtrToColumnEntries(); } void axpy(const double alpha, const VariableSizeMatrix<T>& B); void gemv(const double alpha, const std::vector<double>& x, const double beta, std::vector<double>& y); // compute dot product of matrix row with an array double rowDotVec(const int row, const double* x) { return data_[row]->dotVec(x); } double pnorm(const int row, const int p) { return data_[row]->pnorm(p); } VariableSizeMatrix<T>& operator+=(const VariableSizeMatrix<T>& a) { axpy(1.0, a); return *this; } VariableSizeMatrix<T>& operator-=(const VariableSizeMatrix<T>& a) { axpy(-1.0, a); return *this; } std::string name() { return name_; } }; #endif
Sema.h
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/Attr.h" #include "clang/AST/Availability.h" #include "clang/AST/ComparisonCategories.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/LocInfoType.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/TypeLoc.h" #include "clang/AST/TypeOrdering.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/PragmaKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/CleanupInfo.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include <deque> #include <memory> #include <string> #include <vector> namespace llvm { class APSInt; template <typename ValueT> struct DenseMapInfo; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; struct InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class ParsedAttr; class BindingDecl; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class CoroutineBodyStmt; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPRequiresDecl; class OMPDeclareReductionDecl; class OMPDeclareSimdDecl; class OMPClause; struct OMPVarListLocTy; struct OverloadCandidate; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateInstantiationCallback; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class Capture; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class SemaPPCallbacks; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// The end location for the first pointer declarator in the file. Used for /// placing fix-its. SourceLocation PointerEndLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Keeps track of expected type during expression parsing. The type is tied to /// a particular token, all functions that update or consume the type take a /// start location of the token they are looking at as a parameter. This allows /// to avoid updating the type on hot paths in the parser. class PreferredTypeBuilder { public: PreferredTypeBuilder() = default; explicit PreferredTypeBuilder(QualType Type) : Type(Type) {} void enterCondition(Sema &S, SourceLocation Tok); void enterReturn(Sema &S, SourceLocation Tok); void enterVariableInit(SourceLocation Tok, Decl *D); /// Computing a type for the function argument may require running /// overloading, so we postpone its computation until it is actually needed. /// /// Clients should be very careful when using this funciton, as it stores a /// function_ref, clients should make sure all calls to get() with the same /// location happen while function_ref is alive. void enterFunctionArgument(SourceLocation Tok, llvm::function_ref<QualType()> ComputeType); void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc); void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind, SourceLocation OpLoc); void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op); void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base); void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS); /// Handles all type casts, including C-style cast, C++ casts, etc. void enterTypeCast(SourceLocation Tok, QualType CastType); QualType get(SourceLocation Tok) const { if (Tok != ExpectedLoc) return QualType(); if (!Type.isNull()) return Type; if (ComputeType) return ComputeType(); return QualType(); } private: /// Start position of a token for which we store expected type. SourceLocation ExpectedLoc; /// Expected type for a token starting at ExpectedLoc. QualType Type; /// A function to compute expected type at ExpectedLoc. It is only considered /// if Type is null. llvm::function_ref<QualType()> ComputeType; }; /// Sema - This implements semantic analysis and AST building for C. class Sema { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; ///Source of additional semantic information. ExternalSemaSource *ExternalSource; ///Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); /// Determine whether two declarations should be linked together, given that /// the old declaration might not be visible and the new declaration might /// not have external linkage. bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { if (isVisible(Old)) return true; // See comment in below overload for why it's safe to compute the linkage // of the new declaration here. if (New->isExternallyDeclarable()) { assert(Old->isExternallyDeclarable() && "should not have found a non-externally-declarable previous decl"); return true; } return false; } bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New); void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem, QualType ResultTy, ArrayRef<QualType> Args); public: typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions FPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; /// Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; bool MSStructPragmaOn; // True when \#pragma ms_struct on /// Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; /// pragma clang section kind enum PragmaClangSectionKind { PCSK_Invalid = 0, PCSK_BSS = 1, PCSK_Data = 2, PCSK_Rodata = 3, PCSK_Text = 4 }; enum PragmaClangSectionAction { PCSA_Set = 0, PCSA_Clear = 1 }; struct PragmaClangSection { std::string SectionName; bool Valid = false; SourceLocation PragmaLocation; void Act(SourceLocation PragmaLocation, PragmaClangSectionAction Action, StringLiteral* Name); }; PragmaClangSection PragmaClangBSSSection; PragmaClangSection PragmaClangDataSection; PragmaClangSection PragmaClangRodataSection; PragmaClangSection PragmaClangTextSection; enum PragmaMsStackAction { PSK_Reset = 0x0, // #pragma () PSK_Set = 0x1, // #pragma (value) PSK_Push = 0x2, // #pragma (push[, id]) PSK_Pop = 0x4, // #pragma (pop[, id]) PSK_Show = 0x8, // #pragma (show) -- only for "pack"! PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value) PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value) }; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; SourceLocation PragmaPushLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation, SourceLocation PragmaPushLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation), PragmaPushLocation(PragmaPushLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value); // MSVC seems to add artificial slots to #pragma stacks on entering a C++ // method body to restore the stacks on exit, so it works like this: // // struct S { // #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>) // void Method {} // #pragma <name>(pop, InternalPragmaSlot) // }; // // It works even with #pragma vtordisp, although MSVC doesn't support // #pragma vtordisp(push [, id], n) // syntax. // // Push / pop a named sentinel slot. void SentinelAction(PragmaMsStackAction Action, StringRef Label) { assert((Action == PSK_Push || Action == PSK_Pop) && "Can only push / pop #pragma stack sentinels!"); Act(CurrentPragmaLocation, Action, Label, CurrentValue); } // Constructors. explicit PragmaStack(const ValueType &Default) : DefaultValue(Default), CurrentValue(Default) {} bool hasValue() const { return CurrentValue != DefaultValue; } SmallVector<Slot, 2> Stack; ValueType DefaultValue; // Value used for PSK_Reset action. ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). /// Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects PragmaStack<MSVtorDispAttr::Mode> VtorDispStack; // #pragma pack. // Sentinel to represent when the stack is set to mac68k alignment. static const unsigned kMac68kAlignmentSentinel = ~0U; PragmaStack<unsigned> PackStack; // The current #pragma pack values and locations at each #include. struct PackIncludeState { unsigned CurrentValue; SourceLocation CurrentPragmaLocation; bool HasNonDefaultValue, ShouldWarnOnInclude; }; SmallVector<PackIncludeState, 8> PackIncludeStack; // Segment #pragmas. PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; // RAII object to push / pop sentinel slots for all MS #pragma stacks. // Actions should be performed only if we enter / exit a C++ method body. class PragmaStackSentinelRAII { public: PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct); ~PragmaStackSentinelRAII(); private: Sema &S; StringRef SlotLabel; bool ShouldAct; }; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// This an attribute introduced by \#pragma clang attribute. struct PragmaAttributeEntry { SourceLocation Loc; ParsedAttr *Attribute; SmallVector<attr::SubjectMatchRule, 4> MatchRules; bool IsUsed; }; /// A push'd group of PragmaAttributeEntries. struct PragmaAttributeGroup { /// The location of the push attribute. SourceLocation Loc; /// The namespace of this push group. const IdentifierInfo *Namespace; SmallVector<PragmaAttributeEntry, 2> Entries; }; SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack; /// The declaration that is currently receiving an attribute from the /// #pragma attribute stack. const Decl *PragmaAttributeCurrentTargetDecl; /// This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// Used to control the generation of ExprWithCleanups. CleanupInfo Cleanup; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. The /// element type here is ExprWithCleanups::Object. SmallVector<BlockDecl*, 8> ExprCleanupObjects; /// Store a set of either DeclRefExprs or MemberExprs that contain a reference /// to a variable (constant) that may or may not be odr-used in this Expr, and /// we won't know until all lvalue-to-rvalue and discarded value conversions /// have been applied to all subexpressions of the enclosing full expression. /// This is cleared at the end of each full expression. using MaybeODRUseExprSet = llvm::SmallPtrSet<Expr *, 2>; MaybeODRUseExprSet MaybeODRUseExprs; std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope; /// Stack containing information about each of the nested /// function, block, and method scopes that are currently active. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType; /// Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedOverridingExceptionSpecChecks; /// All the function redeclarations seen during a class definition that had /// their exception spec checks delayed, plus the prior declaration they /// should be checked against. Except during error recovery, the new decl /// should always be a friend declaration, as that's the only valid way to /// redeclare a special member before its class is complete. SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2> DelayedEquivalentExceptionSpecChecks; typedef llvm::MapVector<const FunctionDecl *, std::unique_ptr<LateParsedTemplate>> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// Used to change context to isConstantEvaluated without pushing a heavy /// ExpressionEvaluationContextRecord object. bool isConstantEvaluatedOverride; bool isConstantEvaluated() { return ExprEvalContexts.back().isConstantEvaluated() || isConstantEvaluatedOverride; } /// RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; bool PushedCodeSynthesisContext = false; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::PotentiallyEvaluated); if (auto *FD = dyn_cast<FunctionDecl>(DC)) FD->setWillHaveBody(true); else assert(isa<ObjCMethodDecl>(DC)); } void addContextNote(SourceLocation UseLoc) { assert(!PushedCodeSynthesisContext); Sema::CodeSynthesisContext Ctx; Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction; Ctx.PointOfInstantiation = UseLoc; Ctx.Entity = cast<Decl>(S.CurContext); S.pushCodeSynthesisContext(Ctx); PushedCodeSynthesisContext = true; } ~SynthesizedFunctionScope() { if (PushedCodeSynthesisContext) S.popCodeSynthesisContext(); if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext)) FD->setWillHaveBody(false); S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// The C++ "std::align_val_t" enum class, which is defined by the C++ /// standard library. LazyDeclPtr StdAlignValT; /// The C++ "std::experimental" namespace, where the experimental parts /// of the standard library resides. NamespaceDecl *StdExperimentalNamespaceCache; /// The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// The C++ "std::coroutine_traits" template, which is defined in /// \<coroutine_traits> ClassTemplateDecl *StdCoroutineTraitsCache; /// The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// Caches identifiers/selectors for NSFoundation APIs. std::unique_ptr<NSAPI> NSAPIObj; /// The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// Pointer to NSString type (NSString *). QualType NSStringPointer; /// The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// id<NSCopying> type. QualType QIDNSCopying; /// will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// A flag to indicate that we're in a context that permits abstract /// references to fields. This is really a bool AllowAbstractFieldReference; /// Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum class ExpressionEvaluationContext { /// The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// The current expression occurs within a braced-init-list within /// an unevaluated operand. This is mostly like a regular unevaluated /// context, except that we still instantiate constexpr functions that are /// referenced here so that we can perform narrowing checks correctly. UnevaluatedList, /// The current expression occurs within a discarded statement. /// This behaves largely similarly to an unevaluated operand in preventing /// definitions from being required, but not in other ways. DiscardedStatement, /// The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; /// Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// The expression evaluation context. ExpressionEvaluationContext Context; /// Whether the enclosing context needed a cleanup. CleanupInfo ParentCleanup; /// Whether we are in a decltype expression. bool IsDecltype; /// The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; MaybeODRUseExprSet SavedMaybeODRUseExprs; /// The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// The context information used to mangle lambda expressions /// and block literals within this context. /// /// This mangling information is allocated lazily, since most contexts /// do not have lambda expressions or block literals. std::unique_ptr<MangleNumberingContext> MangleNumbering; /// If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs; /// \brief Describes whether we are in an expression constext which we have /// to handle differently. enum ExpressionKind { EK_Decltype, EK_TemplateArgument, EK_Other } ExprContext; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, CleanupInfo ParentCleanup, Decl *ManglingContextDecl, ExpressionKind ExprContext) : Context(Context), ParentCleanup(ParentCleanup), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), MangleNumbering(), ExprContext(ExprContext) {} /// Retrieve the mangling numbering context, used to consistently /// number constructs like lambdas for mangling. MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx); bool isUnevaluated() const { return Context == ExpressionEvaluationContext::Unevaluated || Context == ExpressionEvaluationContext::UnevaluatedAbstract || Context == ExpressionEvaluationContext::UnevaluatedList; } bool isConstantEvaluated() const { return Context == ExpressionEvaluationContext::ConstantEvaluated; } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// Emit a warning for all pending noderef expressions that we recorded. void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec); /// Compute the mangling number context for a lambda expression or /// block literal. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. /// \param[out] ManglingContextDecl - Returns the ManglingContextDecl /// associated with the context, if relevant. MangleNumberingContext *getCurrentMangleNumberContext( const DeclContext *DC, Decl *&ManglingContextDecl); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl*, 2> Pair; public: SpecialMemberOverloadResult() : Pair() {} SpecialMemberOverloadResult(CXXMethodDecl *MD) : Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; class SpecialMemberOverloadResultEntry : public llvm::FastFoldingSetNode, public SpecialMemberOverloadResult { public: SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} }; /// A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache; /// A cache of the flags available in enumerations with the flag_bits /// attribute. mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache; /// The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed; /// Determine if VD, which must be a variable or function, is an external /// symbol that nonetheless can't be referenced from outside this translation /// unit because its type has no linkage and it's not extern "C". bool isExternalWithNoLinkageType(ValueDecl *VD); /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods; typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// List of SourceLocations where 'self' is implicitly retained inside a /// block. llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1> ImplicitlyRetainedSelfLocs; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; /// The function definitions which were renamed as part of typo-correction /// to match their respective declarations. We want to keep track of them /// to ensure that we don't emit a "redefinition" error if we encounter a /// correctly named definition after the renamed definition. llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions; /// Stack of types that correspond to the parameter entities that are /// currently being copy-initialized. Can be empty. llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes; void ReadMethodPool(Selector Sel); void updateOutOfDateSelector(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the FP_CONTRACT state on entry/exit of compound /// statements. class FPContractStateRAII { public: FPContractStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {} ~FPContractStateRAII() { S.FPFeatures = OldFPFeaturesState; } private: Sema& S; FPOptions OldFPFeaturesState; }; void addImplicitTypedef(StringRef Name, QualType T); public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getFPOptions() { return FPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } ///Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. SemaDiagnosticBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class SemaDiagnosticBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { } // This is a cunning lie. DiagnosticBuilder actually performs move // construction in its copy constructor (but due to varied uses, it's not // possible to conveniently express this as actual move construction). So // the default copy ctor here is fine, because the base class disables the // source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op // in that case anwyay. SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default; ~SemaDiagnosticBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First flush the underlying // DiagnosticBuilder data, and clear the diagnostic builder itself so it // won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. FlushCounts(); Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template<typename T> friend const SemaDiagnosticBuilder &operator<<( const SemaDiagnosticBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } }; /// Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) { DiagnosticBuilder DB = Diags.Report(Loc, DiagID); return SemaDiagnosticBuilder(DB, *this, DiagID); } /// Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD); /// Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h bool findMacroSpelling(SourceLocation &loc, StringRef name); /// Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; void emitAndClearUnusedLocalTypedefWarnings(); enum TUFragmentKind { /// The global module fragment, between 'module;' and a module-declaration. Global, /// A normal translation unit fragment. For a non-module unit, this is the /// entire translation unit. Otherwise, it runs from the module-declaration /// to the private-module-fragment (if any) or the end of the TU (if not). Normal, /// The private module fragment, between 'module :private;' and the end of /// the translation unit. Private }; void ActOnStartOfTranslationUnit(); void ActOnEndOfTranslationUnit(); void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K); /// Custom deleter to allow FunctionScopeInfos to be kept alive for a short /// time after they've been popped. class PoppedFunctionScopeDeleter { Sema *Self; public: explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {} void operator()(sema::FunctionScopeInfo *Scope) const; }; using PoppedFunctionScopePtr = std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>; PoppedFunctionScopePtr PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, QualType BlockType = QualType()); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.empty() ? nullptr : FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const; void setFunctionHasBranchIntoScope(); void setFunctionHasBranchProtectedScope(); void setFunctionHasIndirectGoto(); void PushCompoundScope(bool IsStmtExpr); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// Retrieve the current lambda scope info, if any. /// \param IgnoreNonLambdaCapturingScope true if should find the top-most /// lambda scope info ignoring all inner capturing scopes that are not /// lambda scopes. sema::LambdaScopeInfo * getCurLambda(bool IgnoreNonLambdaCapturingScope = false); /// Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace, SourceLocation AttrLoc); /// Same as above, but constructs the AddressSpace index if not provided. QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace, SourceLocation AttrLoc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); /// Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); QualType BuildReadPipeType(QualType T, SourceLocation Loc); QualType BuildWritePipeType(QualType T, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); /// Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Expr *E); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool handlerCanCatch(QualType HandlerType, QualType ExceptionType); bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID, const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const PartialDiagnostic &NoThrowDiagID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// Abstract class used to diagnose incomplete types. struct TypeDiagnoser { TypeDiagnoser() {} virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, std::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(), DiagID(DiagID), Args(Args...) { assert(DiagID != 0 && "no diagnostic for type diagnoser"); } void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, std::index_sequence_for<Ts...>()); DB << T; } }; private: /// Methods for marking which expressions involve dereferencing a pointer /// marked with the 'noderef' attribute. Expressions are checked bottom up as /// they are parsed, meaning that a noderef pointer may not be accessed. For /// example, in `&*p` where `p` is a noderef pointer, we will first parse the /// `*p`, but need to check that `address of` is called on it. This requires /// keeping a container of all pending expressions and checking if the address /// of them are eventually taken. void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E); void CheckAddressOfNoDeref(const Expr *E); void CheckMemberAccessOfNoDeref(const MemberExpr *E); bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, TypeDiagnoser *Diagnoser); struct ModuleScope { SourceLocation BeginLoc; clang::Module *Module = nullptr; bool ModuleInterface = false; bool ImplicitGlobalModuleFragment = false; VisibleModuleSet OuterVisibleModules; }; /// The modules we're currently parsing. llvm::SmallVector<ModuleScope, 16> ModuleScopes; /// Namespace definitions that we will export when they finish. llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces; /// Get the module whose scope we are currently within. Module *getCurrentModule() const { return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module; } VisibleModuleSet VisibleModules; public: /// Get the module owning an entity. Module *getOwningModule(Decl *Entity) { return Entity->getOwningModule(); } /// Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND); bool isModuleVisible(const Module *M, bool ModulePrivate = false); /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return !D->isHidden() || isVisibleSlow(D); } /// Determine whether any declaration of an entity is visible. bool hasVisibleDeclaration(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr) { return isVisible(D) || hasVisibleDeclarationSlow(D, Modules); } bool hasVisibleDeclarationSlow(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules); bool hasVisibleMergedDefinition(NamedDecl *Def); bool hasMergedDefinitionInCurrentModule(NamedDecl *Def); /// Determine if \p D and \p Suggested have a structurally compatible /// layout as described in C11 6.2.7/1. bool hasStructuralCompatLayout(Decl *D, Decl *Suggested); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is an explicit /// specialization declaration for a specialization of a template. (For a /// member specialization, use hasVisibleMemberSpecialization.) bool hasVisibleExplicitSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is a member /// specialization declaration (as opposed to an instantiated declaration). bool hasVisibleMemberSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if \p A and \p B are equivalent internal linkage declarations /// from different modules, and thus an ambiguity error can be downgraded to /// an extension warning. bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A, const NamedDecl *B); void diagnoseEquivalentInternalLinkageDeclarations( SourceLocation Loc, const NamedDecl *D, ArrayRef<const NamedDecl *> Equiv); bool isUsualDeallocationFunction(const CXXMethodDecl *FD); bool isCompleteType(SourceLocation Loc, QualType T) { return !RequireCompleteTypeImpl(Loc, T, nullptr); } bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } void completeExprArrayBound(Expr *E); bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T, TagDecl *OwnedTagDecl = nullptr); QualType BuildTypeofExprType(Expr *E, SourceLocation Loc); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, SourceLocation Loc, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr), New(nullptr) {} bool ShouldSkip; bool CheckSameAsPrevious; NamedDecl *Previous; NamedDecl *New; }; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = nullptr, bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, bool IsClassTemplateDeductionContext = true, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool IsTemplateName = false); /// Attempt to behave like MSVC in situations where lookup of an unqualified /// type name has failed in a dependent context. In these situations, we /// automatically form a DependentTypeName that will retry lookup in a related /// scope during instantiation. ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II, SourceLocation NameLoc, bool IsTemplateTypeArg); /// Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { NC_Unknown, NC_Error, NC_Keyword, NC_Type, NC_Expression, NC_NestedNameSpecifier, NC_TypeTemplate, NC_VarTemplate, NC_FunctionTemplate, NC_UndeclaredTemplate, }; class NameClassification { NameClassificationKind Kind; ExprResult Expr; TemplateName Template; ParsedType Type; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {} NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {} static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification NestedNameSpecifier() { return NameClassification(NC_NestedNameSpecifier); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } static NameClassification UndeclaredTemplate(TemplateName Name) { NameClassification Result(NC_UndeclaredTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } ExprResult getExpression() const { assert(Kind == NC_Expression); return Expr; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate || Kind == NC_UndeclaredTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; case NC_UndeclaredTemplate: return TNK_Undeclared_template; default: llvm_unreachable("unsupported name classification."); } } }; /// Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param IsAddressOfOperand True if this name is the operand of a unary /// address of ('&') expression, assuming it is classified as an /// expression. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr); /// Describes the detailed kind of a template name. Used in diagnostics. enum class TemplateNameKindForDiagnostics { ClassTemplate, FunctionTemplate, VarTemplate, AliasTemplate, TemplateTemplateParam, Concept, DependentTemplate }; TemplateNameKindForDiagnostics getTemplateNameKindForDiagnostics(TemplateName Name); /// Determine whether it's plausible that E was intended to be a /// template-name. bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) { if (!getLangOpts().CPlusPlus || E.isInvalid()) return false; Dependent = false; if (auto *DRE = dyn_cast<DeclRefExpr>(E.get())) return !DRE->hasExplicitTemplateArgs(); if (auto *ME = dyn_cast<MemberExpr>(E.get())) return !ME->hasExplicitTemplateArgs(); Dependent = true; if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get())) return !DSDRE->hasExplicitTemplateArgs(); if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get())) return !DSME->hasExplicitTemplateArgs(); // Any additional cases recognized here should also be handled by // diagnoseExprIntendedAsTemplateName. return false; } void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName, SourceLocation Less, SourceLocation Greater); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc, bool IsTemplateId); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation(), SourceLocation UnalignedQualLoc = SourceLocation()); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R); void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl, const LookupResult &R); void CheckShadow(Scope *S, VarDecl *D); /// Warn if 'E', which is an expression that is about to be modified, refers /// to a shadowing declaration. void CheckShadowingDeclModification(Expr *E, SourceLocation Loc); void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI); private: /// Map of current shadowing declarations to shadowed declarations. Warn if /// it looks like the user is trying to modify the shadowing declaration. llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls; public: void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope, ArrayRef<BindingDecl *> Bindings = None); NamedDecl * ActOnDecompositionDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists); // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous); void CheckVariableDeclarationType(VarDecl *NewVD); bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit, Expr *Init); void CheckCompleteVariableDeclaration(VarDecl *VD); void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); enum class CheckConstexprKind { /// Diagnose issues that are non-constant or that are extensions. Diagnose, /// Identify whether this function satisfies the formal rules for constexpr /// functions in the current lanugage mode (with no extensions). CheckValid }; bool CheckConstexprFunctionDefinition(const FunctionDecl *FD, CheckConstexprKind Kind); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsMemberSpecialization); bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl); bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD, QualType NewT, QualType OldT); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD, bool IsDefinition); void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SC); void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit); void ActOnUninitializedDecl(Decl *dcl); void ActOnInitializerError(Decl *Dcl); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void CheckStaticLocalForDllExport(VarDecl *VD); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition( FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D, SkipBodyInfo *SkipBody = nullptr); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineFunctionDef(FunctionDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters); /// Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList, SourceLocation SemiLoc); enum class ModuleDeclKind { Interface, ///< 'export module X;' Implementation, ///< 'module X;' }; /// The parser has processed a module-declaration that begins the definition /// of a module interface or implementation. DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc, ModuleDeclKind MDK, ModuleIdPath Path, bool IsFirstDecl); /// The parser has processed a global-module-fragment declaration that begins /// the definition of the global module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc); /// The parser has processed a private-module-fragment declaration that begins /// the definition of the private module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. /// \param PrivateLoc The location of the 'private' keyword. DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc, SourceLocation PrivateLoc); /// The parser has processed a module import declaration. /// /// \param StartLoc The location of the first token in the declaration. This /// could be the location of an '@', 'export', or 'import'. /// \param ExportLoc The location of the 'export' keyword, if any. /// \param ImportLoc The location of the 'import' keyword. /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, ModuleIdPath Path); DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, Module *M, ModuleIdPath Path = {}); /// The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument, ExplicitSpecialization, PartialSpecialization }; /// Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, MissingImportKind MIK, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc, SourceLocation LBraceLoc); Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl, SourceLocation RBraceLoc); /// We've found a use of a templated declaration that would trigger an /// implicit instantiation. Check that any relevant explicit specializations /// and partial specializations are visible, and diagnose if not. void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// We've found a use of a template specialization that would select a /// partial specialization. Check that the partial specialization is visible, /// and diagnose if not. void checkPartialSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// Retrieve a suitable printing policy for diagnostics. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// Retrieve a suitable printing policy for diagnostics. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, RecordDecl *&AnonRecord); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation, RecordDecl *&AnonRecord); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); /// Common ways to introduce type names without a tag for use in diagnostics. /// Keep in sync with err_tag_reference_non_tag. enum NonTagKind { NTK_NonStruct, NTK_NonClass, NTK_NonUnion, NTK_NonEnum, NTK_Typedef, NTK_TypeAlias, NTK_Template, NTK_TypeAliasTemplate, NTK_TemplateTemplateArgument, }; /// Given a non-tag type declaration, returns an enum useful for indicating /// what kind of non-tag type this is. NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, bool IsTemplateParamOrArg, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, const ParsedAttr &MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); enum TrivialABIHandling { /// The triviality of a method unaffected by "trivial_abi". TAH_IgnoreTrivialABI, /// The triviality of a method affected by "trivial_abi". TAH_ConsiderTrivialABI }; bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, TrivialABIHandling TAH = TAH_IgnoreTrivialABI, bool Diagnose = false); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD); void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); /// Perform ODR-like check for C/ObjC when merging tag types from modules. /// Differently from C++, actually parse the body and reject / error out /// in case of a structural mismatch. bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev, SkipBodyInfo &SkipBody); typedef void *SkippedDefinitionContext; /// Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceRange BraceRange); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, bool IsFixed, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, const ParsedAttributesView &Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, const ParsedAttributesView &Attr); DeclContext *getContainingDC(DeclContext *DC); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// Don't merge availability attributes at all. AMK_None, /// Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override, /// Merge availability attributes for an implementation of /// a protocol requirement. AMK_ProtocolImplementation, }; /// Describes the kind of priority given to an availability attribute. /// /// The sum of priorities deteremines the final priority of the attribute. /// The final priority determines how the attribute will be merged. /// An attribute with a lower priority will always remove higher priority /// attributes for the specified platform when it is being applied. An /// attribute with a higher priority will not be applied if the declaration /// already has an availability attribute with a lower priority for the /// specified platform. The final prirority values are not expected to match /// the values in this enumeration, but instead should be treated as a plain /// integer value. This enumeration just names the priority weights that are /// used to calculate that final vaue. enum AvailabilityPriority : int { /// The availability attribute was specified explicitly next to the /// declaration. AP_Explicit = 0, /// The availability attribute was applied using '#pragma clang attribute'. AP_PragmaClangAttribute = 1, /// The availability attribute for a specific platform was inferred from /// an availability attribute for another platform. AP_InferredFromOtherPlatform = 2 }; /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr *mergeAvailabilityAttr( NamedDecl *D, SourceRange Range, IdentifierInfo *Platform, bool Implicit, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool IsStrict, StringRef Replacement, AvailabilityMergeKind AMK, int Priority, unsigned AttrSpellingListIndex); TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range, TypeVisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range, VisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); UuidAttr *mergeUuidAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex, StringRef Uuid); DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); MSInheritanceAttr * mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase, unsigned AttrSpellingListIndex, MSInheritanceAttr::Spelling SemanticSpelling); FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range, IdentifierInfo *Format, int FormatIdx, int FirstArg, unsigned AttrSpellingListIndex); SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex); CodeSegAttr *mergeCodeSegAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, SourceRange Range, IdentifierInfo *Ident, unsigned AttrSpellingListIndex); MinSizeAttr *mergeMinSizeAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); NoSpeculativeLoadHardeningAttr * mergeNoSpeculativeLoadHardeningAttr(Decl *D, const NoSpeculativeLoadHardeningAttr &AL); SpeculativeLoadHardeningAttr * mergeSpeculativeLoadHardeningAttr(Decl *D, const SpeculativeLoadHardeningAttr &AL); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const InternalLinkageAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL); void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous); void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld); void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn); void notePreviousDefinition(const NamedDecl *Old, SourceLocation New); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl, bool ConsiderCudaAttrs = true); ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, bool AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess, bool Diagnose = true); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsFunctionConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const VarDecl *NRVOCandidate, QualType ResultType, Expr *Value, bool AllowNRVO = true); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); /// Check that the lifetime of the initializer (and its subobjects) is /// sufficient for initializing the entity, and perform lifetime extension /// (when permitted) if not. void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator. CCEK_ConstexprIf, ///< Condition in a constexpr if statement. CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE); /// Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet; using ADLCallKind = CallExpr::ADLCallKind; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, bool AllowExplicitConversion = false, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, ConversionSequenceList EarlyConversions = None); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false, bool FirstArgumentIsBase = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, ConversionSequenceList EarlyConversions = None); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddTemplateOverloadCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, ADLCallKind IsADLCandidate = ADLCallKind::NotADL); bool CheckNonDependentConversions(FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, ConversionSequenceList &Conversions, bool SuppressUserConversions, CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(), Expr::Classification ObjectClassification = {}); void AddConversionCandidate( CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddTemplateConversionCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, SourceRange OpRange = SourceRange()); void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate(NamedDecl *Found, FunctionDecl *Fn, QualType DestType = QualType(), bool TakingAddress = false); // Emit as a series of 'note's all template and non-templates identified by // the expression Expr void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(), bool TakingAddress = false); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); /// Find the failed Boolean condition within a given Boolean /// constant expression, and describe it with a string. std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// non-ArgDependent DiagnoseIfAttrs. /// /// Argument-dependent diagnose_if attributes should be checked each time a /// function is used as a direct callee of a function call. /// /// Returns true if any errors were emitted. bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function, const Expr *ThisArg, ArrayRef<const Expr *> Args, SourceLocation Loc); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// ArgDependent DiagnoseIfAttrs. /// /// Argument-independent diagnose_if attributes should be checked on every use /// of a function. /// /// Returns true if any errors were emitted. bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND, SourceLocation Loc); /// Returns whether the given function's address can be taken or not, /// optionally emitting a diagnostic if the address can't be taken. /// /// Returns false if taking the address of the function is illegal. bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function, bool Complain = false, SourceLocation Loc = SourceLocation()); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * resolveAddressOfOnlyViableOverloadCandidate(Expr *E, DeclAccessPair &FoundResult); bool resolveAndFixAddressOfOnlyViableOverloadCandidate( ExprResult &SrcExpr, bool DoFunctionPointerConversion = false); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, SourceRange OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc, SourceLocation RangeLoc, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true, bool CalleesAddressIsTaken=false); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *input, bool RequiresADL = true); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, bool RequiresADL = true); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// Look up the name of an OpenMP user-defined reduction operation. LookupOMPReductionName, /// Look up the name of an OpenMP user-defined mapper. LookupOMPMapperName, /// Look up any declaration with any name. LookupAnyName }; /// Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists and is visible. ForVisibleRedeclaration, /// The lookup results will be used for redeclaration of a name /// with external linkage; non-visible lookup results with external linkage /// may also be found. ForExternalRedeclaration }; RedeclarationKind forRedeclarationInCurContext() { // A declaration with an owning module for linkage can never link against // anything that is not visible. We don't need to check linkage here; if // the context has internal linkage, redeclaration lookup won't find things // from other TUs, and we can't safely compute linkage yet in general. if (cast<Decl>(CurContext) ->getOwningModuleForLinkage(/*IgnoreLinkage*/true)) return ForVisibleRedeclaration; return ForExternalRedeclaration; } /// The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// The lookup resulted in an error. LOLR_Error, /// The lookup found no match but no diagnostic was issued. LOLR_ErrorNoDiagnostic, /// The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplate }; SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState &&other) noexcept; TypoExprState &operator=(TypoExprState &&other) noexcept; }; /// The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC); // The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, QualType T1, QualType T2, UnresolvedSetImpl &Functions); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate, bool DiagnoseMissing); bool isKnownName(StringRef name); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool LoadExternal = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool IncludeDependentBases = false, bool LoadExternal = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr(Expr *E, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(E, nullptr, Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(ER, nullptr, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); // Helper for delayed processing of attributes. void ProcessDeclAttributeDelayed(Decl *D, const ParsedAttributesView &AttrList); void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const ParsedAttributesView &AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value); bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckAttrTarget(const ParsedAttr &CurrAttr); bool CheckAttrNoArgs(const ParsedAttr &CurrAttr); bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceAttr::Spelling SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor, SourceLocation Loc); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Stmt attributes - this routine is the top level dispatcher. StmtResult ProcessStmtAttributes(Stmt *Stmt, const ParsedAttributesView &Attrs, SourceRange Range); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl, ObjCInterfaceDecl *IDecl, SourceLocation AtEnd); void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, unsigned &Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCInterfaceDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// - Returns instance or factory methods in global method pool for /// given selector. It checks the desired kind first, if none is found, and /// parameter checkTheOther is set, it then checks the other kind. If no such /// method or only one method is found, function returns false; otherwise, it /// returns true. bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool InstanceFirst, bool CheckTheOther, const ObjCObjectType *TypeBound = nullptr); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass, SmallVectorImpl<ObjCMethodDecl*>& Methods); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance, SmallVectorImpl<ObjCMethodDecl*>& Methods); /// Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg() : E(nullptr) { } FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg( ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true); StmtResult ActOnExprStmtError(); StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(bool IsStmtExpr); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) { S.ActOnStartOfCompoundStmt(IsStmtExpr); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS, SourceLocation DotDotDotLoc, ExprResult RHS, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult ActOnAttributedStmt(SourceLocation AttrLoc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); class ConditionResult; StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Stmt *InitStmt, ConditionResult Cond); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, ConditionResult Second, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *Begin, Stmt *End, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); enum CopyElisionSemanticsKind { CES_Strict = 0, CES_AllowParameters = 1, CES_AllowDifferentTypes = 2, CES_AllowExceptionVariables = 4, CES_FormerDefault = (CES_AllowParameters), CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes), CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes | CES_AllowExceptionVariables), }; VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E, CopyElisionSemanticsKind CESK); bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD, CopyElisionSemanticsKind CESK); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, unsigned NumLabels, SourceLocation RParenLoc); void FillInlineAsmIdentifierInfo(Expr *Res, llvm::InlineAsmIdentifierInfo &Info); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); /// Warn if we're implicitly casting from a _Nullable pointer type to a /// _Nonnull one. void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType, SourceLocation Loc); /// Warn when implicitly casting 0 to nullptr. void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass, bool ObjCPropertyAccess, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReceiver = nullptr); bool makeUnavailableInSystemHeader(SourceLocation loc, UnavailableAttr::ImplicitReason reason); /// Issue any -Wunguarded-availability warnings in \c FD void DiagnoseUnguardedAvailabilityViolations(Decl *FD); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid); bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass = nullptr, bool ObjCPropertyAccess = false, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReciever = nullptr); void NoteDeletedFunction(FunctionDecl *FD); void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. // // MightBeOdrUse indicates whether the use could possibly be an odr-use, and // should usually be true. This only needs to be set to false if the lack of // odr-use cannot be determined from the current context (for instance, // because the name denotes a virtual function and was written without an // explicit nested-name-specifier). void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr); void MarkMemberReferenced(MemberExpr *E); void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E); void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc, unsigned CapturingScopeIndex); ExprResult CheckLValueToRValueConversionOperand(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); /// Mark all of the declarations referenced within a particular AST node as /// referenced. Used when template instantiation instantiates a non-dependent /// type -- entities referenced by the type are now referenced. void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false); /// Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); /// Similar, but diagnostic is only produced if all the specified statements /// are reachable. bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, CorrectionCandidateCallback &CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); /// If \p D cannot be odr-used in the current expression evaluation context, /// return a reason explaining why. Otherwise, return NOUR_None. NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D); DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, NestedNameSpecifierLoc NNS, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance, const Scope *S); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentKind IK); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); bool isQualifiedMemberAccess(Expr *E); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, SourceRange ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc, Expr *LowerBound, SourceLocation ColonLoc, Expr *Length, SourceLocation RBLoc); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, FieldDecl *Field, DeclAccessPair FoundDecl, const DeclarationNameInfo &MemberNameInfo); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec *SS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr); ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false, ADLCallKind UsesADL = ADLCallKind::NotADL); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation Loc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); // "({..})" // Handle the final expression in a statement expression. ExprResult ActOnStmtExprResult(ExprResult E); void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(), // __builtin_COLUMN() ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc); // Build a potentially resolved SourceLocExpr. ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc, DeclContext *ParentContext); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// Describes the result of an "if-exists" condition check. enum IfExistsResult { /// The symbol exists. IER_Exists, /// The symbol does not exist. IER_DoesNotExist, /// The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, const ParsedAttributesView &AttrList, UsingDirectiveDecl *&UsingDecl); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); NamespaceDecl *lookupStdExperimentalNamespace(); CXXRecordDecl *getStdBadAlloc() const; EnumDecl *getStdAlignValT() const; private: // A cache representing if we've fully checked the various comparison category // types stored in ASTContext. The bit-index corresponds to the integer value // of a ComparisonCategoryType enumerator. llvm::SmallBitVector FullyCheckedComparisonCategories; ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl, CXXScopeSpec &SS, ParsedType TemplateTypeTy, IdentifierInfo *MemberOrBase); public: /// Lookup the specified comparison category types in the standard /// library, an check the VarDecls possibly returned by the operator<=> /// builtins for that type. /// /// \return The type of the comparison category type corresponding to the /// specified Kind, or a null type if an error occurs QualType CheckComparisonCategoryType(ComparisonCategoryType Kind, SourceLocation Loc); /// Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const FunctionDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, const ParsedAttributesView &AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc); NamedDecl *BuildUsingDeclaration( Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList, bool IsInstantiation); NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom, ArrayRef<NamedDecl *> Expansions); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); /// Given a derived-class using shadow declaration for a constructor and the /// correspnding base class constructor, find or create the implicit /// synthesized derived class constructor to use for this initialization. CXXConstructorDecl * findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor, ConstructorUsingShadowDecl *DerivedShadow); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation TypenameLoc, CXXScopeSpec &SS, UnqualifiedId &Name, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, const ParsedAttributesView &AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); /// Build a CXXConstructExpr whose constructor has already been resolved if /// it denotes an inherited constructor. ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// Instantiate or parse a C++ default argument expression as necessary. /// Return true on error. bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(!isComputedNoexcept(ComputedEST) && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// Integrate an invoked expression into the collected data. void CalledExpr(Expr *E); /// Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_NoexceptFalse; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// Determine what sort of exception specification a defaulted /// copy constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// default constructor of a class will have, and whether the parameter /// will be const. ImplicitExceptionSpecification ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// copy assignment operator of a class will have, and whether the /// parameter will be const. ImplicitExceptionSpecification ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// assignment operator of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// destructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification an inheriting /// constructor of a class will have. ImplicitExceptionSpecification ComputeInheritingCtorExceptionSpec(SourceLocation Loc, CXXConstructorDecl *CD); /// Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// Check the given noexcept-specifier, convert its expression, and compute /// the appropriate ExceptionSpecificationType. ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr, ExceptionSpecificationType &EST); /// Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); class InheritedConstructorInfo; /// Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, InheritedConstructorInfo *ICI = nullptr, bool Diagnose = false); /// Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor); /// Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// Check a completed declaration of an implicit special member. void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD); /// Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr*> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, bool EnteringContext); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorTypeForDecltype(const DeclSpec &DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); /// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl, ExprResult Operand, SourceLocation RParenLoc); ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI, Expr *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc, Optional<unsigned> NumExpansions); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// Build a CXXThisExpr and mark it referenced in the current context. Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit); void MarkThisReferenced(CXXThisExpr *This); /// Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr, bool ByCopy = false); /// Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); ExprResult ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs, SourceLocation AtLoc, SourceLocation RParen); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenOrBraceLoc, MultiExprArg Exprs, SourceLocation RParenOrBraceLoc, bool ListInitialization); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc, bool ListInitialization); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Optional<Expr *> ArraySize, SourceRange DirectInitRange, Expr *Initializer); /// Determine whether \p FD is an aligned allocation or deallocation /// function that is unavailable. bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const; /// Produce diagnostics if \p FD is an aligned allocation or deallocation /// function that is unavailable. void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD, SourceLocation Loc); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); /// The scope in which to find allocation functions. enum AllocationFunctionScope { /// Only look for allocation functions in the global scope. AFS_Global, /// Only look for allocation functions in the scope of the /// allocated class. AFS_Class, /// Look for allocation functions in both the global scope /// and in the scope of the allocated class. AFS_Both }; /// Finds the overloads of operator new and delete that are appropriate /// for the allocation. bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, AllocationFunctionScope NewScope, AllocationFunctionScope DeleteScope, QualType AllocType, bool IsArray, bool &PassAlignment, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, ArrayRef<QualType> Params); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, bool Overaligned, DeclarationName Name); FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc, CXXRecordDecl *RD); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc, bool IsDelete, bool CallCanBeVirtual, bool WarnOnNonAbstractTypes, SourceLocation DtorLoc); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the binary type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); MaterializeTemporaryExpr * CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary, bool BoundToLvalueReference); ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) { return ActOnFinishFullExpr( Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue, bool IsConstexpr = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); /// Keeps information about an identifier in a nested-name-spec. /// struct NestedNameSpecInfo { /// The type of the object, if we're parsing nested-name-specifier in /// a member access expression. ParsedType ObjectType; /// The identifier preceding the '::'. IdentifierInfo *Identifier; /// The location of the identifier. SourceLocation IdentifierLoc; /// The location of the '::'. SourceLocation CCLoc; /// Creates info object for the most typical case. NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType()) : ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, QualType ObjectType) : ObjectType(ParsedType::make(ObjectType)), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } }; bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo); bool BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); /// The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param IdInfo Parser information about an identifier in the /// nested-name-spec. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \param OnlyNamespace If true, only considers namespaces in lookup. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo, bool EnteringContext); /// The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// Start the definition of a lambda expression. CXXMethodDecl * startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params, ConstexprSpecKind ConstexprKind, Optional<std::pair<unsigned, Decl *>> Mangling = None); /// Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. ParsedType actOnLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) { return ParsedType::make(buildLambdaInitCaptureInitialization( Loc, ByRef, EllipsisLoc, None, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init)); } QualType buildLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit, Expr *&Init); /// Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, SourceLocation EllipsisLoc, IdentifierInfo *Id, unsigned InitStyle, Expr *Init); /// Add an init-capture to a lambda scope. void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief This is called after parsing the explicit template parameter list /// on a lambda (if it exists) in C++2a. void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc, ArrayRef<NamedDecl *> TParams, SourceLocation RAngleLoc); /// Introduce the lambda parameters into scope. void addLambdaParameters( ArrayRef<LambdaIntroducer::LambdaCapture> Captures, CXXMethodDecl *CallOperator, Scope *CurScope); /// Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// Does copying/destroying the captured variable have side effects? bool CaptureHasSideEffects(const sema::Capture &From); /// Diagnose if an explicit lambda capture is unused. Returns true if a /// diagnostic is emitted. bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange, const sema::Capture &From); /// Build a FieldDecl suitable to hold the given capture. FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture); /// Initialize the given capture with a suitable expression. ExprResult BuildCaptureInit(const sema::Capture &Capture, SourceLocation ImplicitCaptureLoc, bool IsOpenMPMapping = false); /// Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// Get the return type to use for a lambda's conversion function(s) to /// function pointer type, given the type of the call operator. QualType getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType); /// Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, ArrayRef<Expr *> Strings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, MutableArrayRef<ObjCDictionaryElement> Elements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS); bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, const ParsedAttributesView &Attrs); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD, bool ConstexprOnly = false); /// Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); /// Check class-level dllimport/dllexport attribute. The caller must /// ensure that referenceDLLExportedClassMethods is called some point later /// when all outer classes of Class are complete. void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class); void referenceDLLExportedClassMethods(); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); /// Add gsl::Pointer attribute to std::container::iterator /// \param ND The declaration that introduces the name /// std::container::iterator. \param UnderlyingRecord The record named by ND. void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord); /// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types. void inferGslOwnerPointerAttribute(CXXRecordDecl *Record); /// Add [[gsl::Pointer]] attributes for std:: types. void inferGslPointerAttribute(TypedefNameDecl *TD); void CheckCompletedCXXClass(CXXRecordDecl *Record); /// Check that the C++ class annoated with "trivial_abi" satisfies all the /// conditions that are needed for the attribute to have an effect. void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD); void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXNonNestedClass(Decl *D); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckDeductionGuideDeclarator(Declarator &D, QualType &R, StorageClass &SC); void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD); void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD); void CheckDelayedMemberExceptionSpecs(); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, MutableArrayRef<CXXBaseSpecifier *> Bases); void ActOnBaseSpecifiers(Decl *ClassDecl, MutableArrayRef<CXXBaseSpecifier *> Bases); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbigiousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath, bool IgnoreAccess = false); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckStructuredBindingMemberAccess(SourceLocation UseLoc, CXXRecordDecl *DecomposedClass, DeclAccessPair Field); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass, QualType BaseType); bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl, AccessSpecifier access, QualType objectType); void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool isAbstractType(SourceLocation Loc, QualType T); bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true, bool AllowNonTemplateFunctions = false); /// Try to interpret the lookup result D as a template-name. /// /// \param D A declaration found by name lookup. /// \param AllowFunctionTemplates Whether function templates should be /// considered valid results. /// \param AllowDependent Whether unresolved using declarations (that might /// name templates) should be considered valid results. NamedDecl *getAsTemplateNameDecl(NamedDecl *D, bool AllowFunctionTemplates = true, bool AllowDependent = true); enum class AssumedTemplateKind { /// This is not assumed to be a template name. None, /// This is assumed to be a template name because lookup found nothing. FoundNothing, /// This is assumed to be a template name because lookup found one or more /// functions (but no function templates). FoundFunctions, }; bool LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization, SourceLocation TemplateKWLoc = SourceLocation(), AssumedTemplateKind *ATK = nullptr); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization); /// Try to resolve an undeclared template name as a type template. /// /// Sets II to the identifier corresponding to the template name, and updates /// Name to a corresponding (typo-corrected) type template name and TNK to /// the corresponding kind, if possible. void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name, TemplateNameKind &TNK, SourceLocation NameLoc, IdentifierInfo *&II); bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name, SourceLocation NameLoc, bool Diagnose = true); /// Determine whether a particular identifier might be the name in a C++1z /// deduction-guide declaration. bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name, SourceLocation NameLoc, ParsedTemplateTy *Template = nullptr); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation, NamedDecl *Instantiation, bool InstantiatedFromMember, const NamedDecl *Pattern, const NamedDecl *PatternDef, TemplateSpecializationKind TSK, bool Complain = true); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); NamedDecl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg); QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); NamedDecl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ArrayRef<NamedDecl *> Params, SourceLocation RAngleLoc, Expr *RequiresClause); /// The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC, SkipBodyInfo *SkipBody = nullptr); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsMemberSpecialization, bool &Invalid); DeclResult CheckClassTemplate( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg, QualType NTTPType, SourceLocation Loc); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false, bool IsClassName = false); /// Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); ExprResult CheckConceptTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, ConceptDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnDependentTemplateName( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool AllowInjectedClassName = false); DeclResult ActOnClassTemplateSpecialization( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc, TemplateDecl *PrimaryTemplate, unsigned NumExplicitArgs, ArrayRef<TemplateArgument> Args); void CheckTemplatePartialSpecialization( ClassTemplatePartialSpecializationDecl *Partial); void CheckTemplatePartialSpecialization( VarTemplatePartialSpecializationDecl *Partial); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization( FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous, bool QualifiedFriend = false); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation( Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to /// contain the converted forms of the template arguments as written. /// Otherwise, \p TemplateArgs will not be modified. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted, bool UpdateArgsWithConversions = true); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TemplateTypeParmDecl *Param, TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateTemplateArgument(TemplateParameterList *Params, TemplateArgumentLoc &Arg); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateII The identifier used to name the template. /// \param TemplateIILoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); // Concepts Decl *ActOnConceptDefinition( Scope *S, MultiTemplateParamsArg TemplateParameterLists, IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// An arbitrary expression. UPPC_Expression = 0, /// The base type of a class type. UPPC_BaseType, /// The type of an arbitrary declaration. UPPC_DeclarationType, /// The type of a data member. UPPC_DataMemberType, /// The size of a bit-field. UPPC_BitFieldWidth, /// The expression in a static assertion. UPPC_StaticAssertExpression, /// The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// The enumerator value. UPPC_EnumeratorValue, /// A using declaration. UPPC_UsingDeclaration, /// A friend declaration. UPPC_FriendDeclaration, /// A declaration qualifier. UPPC_DeclarationQualifier, /// An initializer. UPPC_Initializer, /// A default argument. UPPC_DefaultArgument, /// The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// The type of an exception. UPPC_ExceptionType, /// Partial specialization. UPPC_PartialSpecialization, /// Microsoft __if_exists. UPPC_IfExists, /// Microsoft __if_not_exists. UPPC_IfNotExists, /// Lambda expression. UPPC_Lambda, /// Block expression, UPPC_Block }; /// Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param NNS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; /// Given a template argument that contains an unexpanded parameter pack, but /// which has already been substituted, attempt to determine the number of /// elements that will be produced once this argument is fully-expanded. /// /// This is intended for use when transforming 'sizeof...(Arg)' in order to /// avoid actually expanding the pack where possible. Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg); //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// /// Adjust the type \p ArgFunctionType to match the calling convention, /// noreturn, and optionally the exception specification of \p FunctionType. /// Deduction often wants to ignore these properties when matching function /// types. QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType, bool AdjustExceptionSpec = false); /// Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// Template argument deduction was successful. TDK_Success = 0, /// The declaration was invalid; do nothing. TDK_Invalid, /// Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// Template argument deduction did not deduce a value for every /// expansion of an expanded template parameter pack. TDK_IncompletePack, /// Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// After substituting deduced template arguments, a dependent /// parameter type did not match the corresponding argument. TDK_DeducedMismatch, /// After substituting deduced template arguments, an element of /// a dependent parameter type did not match the corresponding element /// of the corresponding argument (when deducing from an initializer list). TDK_DeducedMismatchNested, /// A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// Checking non-dependent argument conversions failed. TDK_NonDependentConversionFailure, /// Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure, /// CUDA Target attributes do not match. TDK_CUDATargetMismatch }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, bool DecomposedParam, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), DecomposedParam(DecomposedParam), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) {} QualType OriginalParamType; bool DecomposedParam; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction( FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false, llvm::function_ref<bool()> CheckNonDependent = []{ return false; }); TemplateDeductionResult DeduceTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading, llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); /// Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// Completely replace the \c auto in \p TypeWithAuto by /// \p Replacement. This does not retain any \c auto type sugar. QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement); /// Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); /// Declare implicit deduction guides for a class template if we've /// not already done so. void DeclareImplicitDeductionGuides(TemplateDecl *Template, SourceLocation Loc); QualType DeduceTemplateSpecializationFromInitializer( TypeSourceInfo *TInfo, const InitializedEntity &Entity, const InitializationKind &Kind, MultiExprArg Init); QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name, QualType Type, TypeSourceInfo *TSI, SourceRange Range, bool DirectInit, Expr *Init); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); bool isTemplateTemplateParameterAtLeastAsSpecializedAs( TemplateParameterList *P, TemplateDecl *AArg, SourceLocation Loc); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// A context in which code is being synthesized (where a source location /// alone is not sufficient to identify the context). This covers template /// instantiation and various forms of implicitly-generated functions. struct CodeSynthesisContext { /// The kind of template instantiation we are performing enum SynthesisKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template parameter whose argument is /// being instantiated, the Template is the template, and the /// TemplateArgs/NumTemplateArguments provide the template arguments as /// specified. DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or /// a TemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are computing the exception specification for a defaulted special /// member function. ExceptionSpecEvaluation, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation, /// We are declaring an implicit special member function. DeclaringSpecialMember, /// We are defining a synthesized function (such as a defaulted special /// member). DefiningSynthesizedFunction, /// Added for Template instantiation observation. /// Memoization means we are _not_ instantiating a template because /// it is already instantiated (but we entered a context where we /// would have had to if it was not already instantiated). Memoization } Kind; /// Was the enclosing context a non-instantiation SFINAE context? bool SavedInNonInstantiationSFINAEContext; /// The point of instantiation or synthesis within the source code. SourceLocation PointOfInstantiation; /// The entity that is being synthesized. Decl *Entity; /// The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; // FIXME: Wrap this union around more members, or perhaps store the // kind-specific members in the RAII object owning the context. union { /// The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// The special member being declared or defined. CXXSpecialMember SpecialMember; }; ArrayRef<TemplateArgument> template_arguments() const { assert(Kind != DeclaringSpecialMember); return {TemplateArgs, NumTemplateArgs}; } /// The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; CodeSynthesisContext() : Kind(TemplateInstantiation), SavedInNonInstantiationSFINAEContext(false), Entity(nullptr), Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; }; /// List of active code synthesis contexts. /// /// This vector is treated as a stack. As synthesis of one entity requires /// synthesis of another, additional contexts are pushed onto the stack. SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts; /// Specializations whose definitions are currently being instantiated. llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations; /// Non-dependent types used in templates that have already been instantiated /// by some template instantiation. llvm::DenseSet<QualType> InstantiatedNonDependentTypes; /// Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> CodeSynthesisContextLookupModules; /// Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// Map from the most recent declaration of a namespace to the most /// recent visible declaration of that namespace. llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache; /// Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// The number of \p CodeSynthesisContexts that are not template /// instantiations and, therefore, should not be counted as part of the /// instantiation depth. /// /// When the instantiation depth reaches the user-configurable limit /// \p LangOptions::InstantiationDepth we will abort instantiation. // FIXME: Should we have a similar limit for other forms of synthesis? unsigned NonInstantiationEntries; /// The depth of the context stack at the point when the most recent /// error or warning was produced. /// /// This value is used to suppress printing of redundant context stacks /// when there are multiple errors or warnings in the same instantiation. // FIXME: Does this belong in Sema? It's tough to implement it anywhere else. unsigned LastEmittedCodeSynthesisContextDepth = 0; /// The template instantiation callbacks to trace or track /// instantiations (objects can be chained). /// /// This callbacks is used to print, trace or track template /// instantiations as they are being constructed. std::vector<std::unique_ptr<TemplateInstantiationCallback>> TemplateInstCallbacks; /// The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// Note that we are instantiating a class template, /// function template, variable template, alias template, /// or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateParameter Param, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting either explicitly-specified or /// deduced template arguments during function template argument deduction. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, CodeSynthesisContext::SynthesisKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template declaration. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument for a function /// parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } /// Determine whether we are already instantiating this /// specialization in some surrounding active instantiation. bool isAlreadyInstantiating() const { return AlreadyInstantiating; } private: Sema &SemaRef; bool Invalid; bool AlreadyInstantiating; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = None, sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void pushCodeSynthesisContext(CodeSynthesisContext Ctx); void popCodeSynthesisContext(); /// Determine whether we are currently performing template instantiation. bool inTemplateInstantiation() const { return CodeSynthesisContexts.size() > NonInstantiationEntries; } void PrintContextStack() { if (!CodeSynthesisContexts.empty() && CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) { PrintInstantiationStack(); LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size(); } if (PragmaAttributeCurrentTargetDecl) PrintPragmaAttributeInstantiationPoint(); } void PrintInstantiationStack(); void PrintPragmaAttributeInstantiationPoint(); /// Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } /// RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; bool PrevLastDiagnosticIgnored; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE), PrevLastDiagnosticIgnored( SemaRef.getDiagnostics().isLastDiagnosticIgnored()) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; SemaRef.getDiagnostics().setLastDiagnosticIgnored( PrevLastDiagnosticIgnored); } /// Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; /// Queue of implicit template instantiations that cannot be performed /// eagerly. SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations; class GlobalEagerInstantiationScope { public: GlobalEagerInstantiationScope(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } void perform() { if (Enabled) { S.DefineUsedVTables(); S.PerformPendingInstantiations(); } } ~GlobalEagerInstantiationScope() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class LocalEagerInstantiationScope { public: LocalEagerInstantiationScope(Sema &S) : S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); } ~LocalEagerInstantiationScope() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; /// A helper class for building up ExtParameterInfos. class ExtParameterInfoBuilder { SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos; bool HasInteresting = false; public: /// Set the ExtParameterInfo for the parameter at the given index, /// void set(unsigned index, FunctionProtoType::ExtParameterInfo info) { assert(Infos.size() <= index); Infos.resize(index); Infos.push_back(info); if (!HasInteresting) HasInteresting = (info != FunctionProtoType::ExtParameterInfo()); } /// Return a pointer (suitable for setting in an ExtProtoInfo) to the /// ExtParameterInfo array we've built up. const FunctionProtoType::ExtParameterInfo * getPointerOrNull(unsigned numParams) { if (!HasInteresting) return nullptr; Infos.resize(numParams); return Infos.data(); } }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, bool AllowDeducedTST = false); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, Qualifiers ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); bool SubstExceptionSpec(SourceLocation Loc, FunctionProtoType::ExceptionSpecInfo &ESI, SmallVectorImpl<QualType> &ExceptionStorage, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params, const FunctionProtoType::ExtParameterInfo *ExtParamInfos, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams, ExtParameterInfoBuilder &ParamInfos); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateParameterList * SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); bool usesPartialOrExplicitSpecialization( SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs, TemplateArgumentListInfo &Result, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD, const TemplateArgumentList *Args, SourceLocation Loc); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, void *InsertPos, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false, VarTemplateSpecializationDecl *PrevVTSD = nullptr); VarDecl *getVarTemplateSpecialization( VarTemplateDecl *VarTempl, const TemplateArgumentListInfo *TemplateArgs, const DeclarationNameInfo &MemberNameInfo, SourceLocation TemplateKWLoc); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, bool FindingInstantiatedContext = false); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface( Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, SmallVectorImpl<SourceLocation> &ProtocolLocs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl *const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryInterface( SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc, const ParsedAttributesView &AttrList); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, ArrayRef<IdentifierLocPair> IdentList, const ParsedAttributesView &attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, ArrayRef<IdentifierLocPair> ProtocolId, SmallVectorImpl<Decl *> &Protocols); void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId, SourceLocation ProtocolLoc, IdentifierInfo *TypeArgId, SourceLocation TypeArgLoc, bool SelectProtocolFirst = false); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C type parameter type. QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed void ProcessPropertyDecl(ObjCPropertyDecl *property); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc, ObjCPropertyQueryKind QueryKind); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. ParsedAttributesView ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// The message is sent to 'super'. ObjCSuperMessage, /// The message is an instance message. ObjCInstanceMessage, /// The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs, bool Diagnose = true); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr, bool Diagnose = true); bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr, bool Diagnose = true); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaClangSection - Called on well formed \#pragma clang section void ActOnPragmaClangSection(SourceLocation PragmaLoc, PragmaClangSectionAction Action, PragmaClangSectionKind SecKind, StringRef SecName); /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action, StringRef SlotLabel, Expr *Alignment); enum class PragmaPackDiagnoseKind { NonDefaultStateAtInclude, ChangedStateAtExit }; void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind, SourceLocation IncludeLoc); void DiagnoseUnterminatedPragmaPack(); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action, SourceLocation PragmaLoc, MSVtorDispAttr::Mode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, DeclaratorDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// Called on #pragma clang __debug dump II void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name, StringRef Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT and /// \#pragma clang fp contract void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC); /// ActOnPragmaFenvAccess - Called on well formed /// \#pragma STDC FENV_ACCESS void ActOnPragmaFEnvAccess(LangOptions::FEnvAccessModeKind FPC); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// FreePackedContext - Deallocate and null out PackContext. void FreePackedContext(); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute, SourceLocation PragmaLoc, attr::ParsedSubjectMatchRuleSet Rules); void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Called on well-formed '\#pragma clang attribute pop'. void ActOnPragmaAttributePop(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Adds the attributes that have been specified using the /// '\#pragma clang attribute push' directives to the given declaration. void AddPragmaAttributes(Scope *S, Decl *D); void DiagnoseUnterminatedPragmaAttribute(); /// Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex, bool IsPackExpansion); void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T, unsigned SpellingListIndex, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, Expr *OE, unsigned SpellingListIndex); /// AddAllocAlignAttr - Adds an alloc_align attribute to a particular /// declaration. void AddAllocAlignAttr(SourceRange AttrRange, Decl *D, Expr *ParamExpr, unsigned SpellingListIndex); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(SourceRange AttrRange, Decl *D, Expr *MaxThreads, Expr *MinBlocks, unsigned SpellingListIndex); /// AddModeAttr - Adds a mode attribute to a particular declaration. void AddModeAttr(SourceRange AttrRange, Decl *D, IdentifierInfo *Name, unsigned SpellingListIndex, bool InInstantiation = false); void AddParameterABIAttr(SourceRange AttrRange, Decl *D, ParameterABI ABI, unsigned SpellingListIndex); enum class RetainOwnershipKind {NS, CF, OS}; void AddXConsumedAttr(Decl *D, SourceRange SR, unsigned SpellingIndex, RetainOwnershipKind K, bool IsTemplateInstantiation); /// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size /// attribute to a particular declaration. void addAMDGPUFlatWorkGroupSizeAttr(SourceRange AttrRange, Decl *D, Expr *Min, Expr *Max, unsigned SpellingListIndex); /// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a /// particular declaration. void addAMDGPUWavesPerEUAttr(SourceRange AttrRange, Decl *D, Expr *Min, Expr *Max, unsigned SpellingListIndex); bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type); //===--------------------------------------------------------------------===// // C++ Coroutines TS // bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc, StringRef Keyword); ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E); StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, UnresolvedLookupExpr* Lookup); ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E); StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs); bool buildCoroutineParameterMoves(SourceLocation Loc); VarDecl *buildCoroutinePromise(SourceLocation Loc); void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body); ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc, SourceLocation FuncLoc); //===--------------------------------------------------------------------===// // OpenCL extensions. // private: std::string CurrOpenCLExtension; /// Extensions required by an OpenCL type. llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap; /// Extensions required by an OpenCL declaration. llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap; public: llvm::StringRef getCurrentOpenCLExtension() const { return CurrOpenCLExtension; } /// Check if a function declaration \p FD associates with any /// extensions present in OpenCLDeclExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD); /// Check if a function type \p FT associates with any /// extensions present in OpenCLTypeExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT); /// Find an extension in an appropriate extension map and return its name template<typename T, typename MapT> std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map); void setCurrentOpenCLExtension(llvm::StringRef Ext) { CurrOpenCLExtension = Ext; } /// Set OpenCL extensions for a type which can only be used when these /// OpenCL extensions are enabled. If \p Exts is empty, do nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts); /// Set OpenCL extensions for a declaration which can only be /// used when these OpenCL extensions are enabled. If \p Exts is empty, do /// nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts); /// Set current OpenCL extensions for a type which can only be used /// when these OpenCL extensions are enabled. If current OpenCL extension is /// empty, do nothing. void setCurrentOpenCLExtensionForType(QualType T); /// Set current OpenCL extensions for a declaration which /// can only be used when these OpenCL extensions are enabled. If current /// OpenCL extension is empty, do nothing. void setCurrentOpenCLExtensionForDecl(Decl *FD); bool isOpenCLDisabledDecl(Decl *FD); /// Check if type \p T corresponding to declaration specifier \p DS /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T); /// Check if declaration \p D used by expression \p E /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E); //===--------------------------------------------------------------------===// // OpenMP directives and clauses. // private: void *VarDataSharingAttributesStack; /// Number of nested '#pragma omp declare target' directives. unsigned DeclareTargetNestingLevel = 0; /// Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind, bool StrictlyPositive = true); /// Returns OpenMP nesting level for current directive. unsigned getOpenMPNestingLevel() const; /// Adjusts the function scopes index for the target-based regions. void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex, unsigned Level) const; /// Push new OpenMP function region for non-capturing function. void pushOpenMPFunctionRegion(); /// Pop OpenMP function region for non-capturing function. void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI); /// Check whether we're allowed to call Callee from the current function. void checkOpenMPDeviceFunction(SourceLocation Loc, FunctionDecl *Callee); /// Check if the expression is allowed to be used in expressions for the /// OpenMP devices. void checkOpenMPDeviceExpr(const Expr *E); /// Checks if a type or a declaration is disabled due to the owning extension /// being disabled, and emits diagnostic messages if it is disabled. /// \param D type or declaration to be checked. /// \param DiagLoc source location for the diagnostic message. /// \param DiagInfo information to be emitted for the diagnostic message. /// \param SrcRange source range of the declaration. /// \param Map maps type or declaration to the extensions. /// \param Selector selects diagnostic message: 0 for type and 1 for /// declaration. /// \return true if the type or declaration is disabled. template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT> bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo, MapT &Map, unsigned Selector = 0, SourceRange SrcRange = SourceRange()); public: /// Function tries to capture lambda's captured variables in the OpenMP region /// before the original lambda is captured. void tryCaptureOpenMPLambdas(ValueDecl *V); /// Return true if the provided declaration \a VD should be captured by /// reference. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level) const; /// Check if the specified variable is used in one of the private /// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP /// constructs. VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false, unsigned StopAt = 0); ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK, ExprObjectKind OK, SourceLocation Loc); /// If the current region is a loop-based region, mark the start of the loop /// construct. void startOpenMPLoop(); /// Check if the specified variable is used in 'private' clause. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const; /// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.) /// for \p FD based on DSA for the provided corresponding captured declaration /// \p D. void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level); /// Check if the specified variable is captured by 'target' directive. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level) const; ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// End analysis of clauses. void EndOpenMPClause(); /// Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); // OpenMP directives and clauses. /// Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OpenMPDirectiveKind Kind); /// Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList); /// Called on well-formed '#pragma omp allocate'. DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc, ArrayRef<Expr *> VarList, ArrayRef<OMPClause *> Clauses, DeclContext *Owner = nullptr); /// Called on well-formed '#pragma omp requires'. DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc, ArrayRef<OMPClause *> ClauseList); /// Check restrictions on Requires directive OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc, ArrayRef<OMPClause *> Clauses); /// Check if the specified type is allowed to be used in 'omp declare /// reduction' construct. QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Initialize declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner); /// Initialize declare reduction construct initializer. /// \return omp_priv variable. VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer, VarDecl *OmpPrivParm); /// Called at the end of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd( Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid); /// Check variable declaration in 'omp declare mapper' construct. TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D); /// Check if the specified type is allowed to be used in 'omp declare /// mapper' construct. QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare mapper'. OMPDeclareMapperDecl *ActOnOpenMPDeclareMapperDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType, SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Build the mapper variable of '#pragma omp declare mapper'. void ActOnOpenMPDeclareMapperDirectiveVarDecl(OMPDeclareMapperDecl *DMD, Scope *S, QualType MapperType, SourceLocation StartLoc, DeclarationName VN); /// Called at the end of '#pragma omp declare mapper'. DeclGroupPtrTy ActOnOpenMPDeclareMapperDirectiveEnd(OMPDeclareMapperDecl *D, Scope *S, ArrayRef<OMPClause *> ClauseList); /// Called on the start of target region i.e. '#pragma omp declare target'. bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc); /// Called at the end of target region i.e. '#pragme omp end declare target'. void ActOnFinishOpenMPDeclareTargetDirective(); /// Called on correct id-expression from the '#pragma omp declare target'. void ActOnOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OMPDeclareTargetDeclAttr::MapTypeTy MT, NamedDeclSetType &SameDirectiveDecls); /// Check declaration inside target region. void checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D, SourceLocation IdLoc = SourceLocation()); /// Return true inside OpenMP declare target region. bool isInOpenMPDeclareTargetContext() const { return DeclareTargetNestingLevel > 0; } /// Return true inside OpenMP target region. bool isInOpenMPTargetExecutionDirective() const; /// Return the number of captured regions created for an OpenMP directive. static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind); /// Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); using VarsWithInheritedDSAType = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>; /// Called on well-formed '\#pragma omp simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp sections' after parsing /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp section' after parsing of the /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp single' after parsing of the /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp master' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp critical' after parsing of the /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel for' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel sections' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp task' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskyield'. StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp barrier'. StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskwait'. StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskgroup'. StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp flush'. StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp ordered' after parsing of the /// associated statement. StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp atomic' after parsing of the /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target data' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target enter data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target exit data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target parallel' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp cancellation point'. StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp cancel'. StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target update'. StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp distribute parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute' after parsing of /// the associated statement. StmtResult ActOnOpenMPTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target teams distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for /// simd' after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Checks correctness of linear modifiers. bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind, SourceLocation LinLoc); /// Checks that the specified declaration matches requirements for the linear /// decls. bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc, OpenMPLinearClauseKind LinKind, QualType Type); /// Called on well-formed '\#pragma omp declare simd' after parsing of /// the associated method/function. DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective( DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds, ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears, ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocator' clause. OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'simdlen' clause. OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'ordered' clause. OMPClause * ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc, SourceLocation LParenLoc = SourceLocation(), Expr *NumForLoops = nullptr); /// Called on well-formed 'grainsize' clause. OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_tasks' clause. OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'hint' clause. OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause( OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc, SourceLocation EndLoc); /// Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause( OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'threads' clause. OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'simd' clause. OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nogroup' clause. OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'reverse_offload' clause. OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'dynamic_allocators' clause. OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'atomic_default_mem_order' clause. OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause( OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr, const OMPVarListLocTy &Locs, SourceLocation ColonLoc, CXXScopeSpec &ReductionOrMapperIdScopeSpec, DeclarationNameInfo &ReductionOrMapperId, OpenMPDependClauseKind DepKind, OpenMPLinearClauseKind LinKind, ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation DepLinMapLoc); /// Called on well-formed 'allocate' clause. OMPClause * ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation ColonLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'reduction' clause. OMPClause *ActOnOpenMPReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'task_reduction' clause. OMPClause *ActOnOpenMPTaskReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'in_reduction' clause. OMPClause *ActOnOpenMPInReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'linear' clause. OMPClause * ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind, SourceLocation LinLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'device' clause. OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'map' clause. OMPClause * ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'num_teams' clause. OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'thread_limit' clause. OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'priority' clause. OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'dist_schedule' clause. OMPClause *ActOnOpenMPDistScheduleClause( OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// Called on well-formed 'defaultmap' clause. OMPClause *ActOnOpenMPDefaultmapClause( OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KindLoc, SourceLocation EndLoc); /// Called on well-formed 'to' clause. OMPClause * ActOnOpenMPToClause(ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'from' clause. OMPClause *ActOnOpenMPFromClause( ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'use_device_ptr' clause. OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'is_device_ptr' clause. OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// The kind of conversion being performed. enum CheckedConversionKind { /// An implicit conversion. CCK_ImplicitConversion, /// A C-style cast. CCK_CStyleCast, /// A functional-style cast. CCK_FunctionalCast, /// A cast other than a C-style cast. CCK_OtherCast, /// A conversion for an operand of a builtin overloaded operator. CCK_ForBuiltinOverloadedOp }; static bool isCast(CheckedConversionKind CCK) { return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast || CCK == CCK_OtherCast; } /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_RValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E, bool Diagnose = true); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This is DefaultFunctionArrayLvalueConversion, // except that it assumes the operand isn't of function or array // type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); /// If \p E is a prvalue denoting an unmaterialized temporary, materialize /// it as an xvalue. In C++98, the result will still be a prvalue, because /// we don't have xvalues there. ExprResult TemporaryMaterializationConversion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, bool IsCompAssign = false); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatiblePointerSign - The assignment is between two pointers types /// which point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerAddressSpaceMismatch - The assignment /// changes address spaces in nested pointer types which is not allowed. /// For instance, converting __private int ** to __generic int ** is /// illegal even though __private could be converted to __generic. IncompatibleNestedPointerAddressSpaceMismatch, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and optionally prepare for a conversion of /// the RHS to the LHS type. The conversion is prepared for if ConvertRHS /// is true. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind, bool ConvertRHS = true); /// Check assignment constraints for an assignment of RHS to LHSType. /// /// \param LHSType The destination type for the assignment. /// \param RHS The source expression for the assignment. /// \param Diagnose If \c true, diagnostics may be produced when checking /// for assignability. If a diagnostic is produced, \p RHS will be /// set to ExprError(). Note that this function may still return /// without producing a diagnostic, even for an invalid assignment. /// \param DiagnoseCFAudited If \c true, the target is a function parameter /// in an audited Core Foundation API and does not need to be checked /// for ARC retain issues. /// \param ConvertRHS If \c true, \p RHS will be updated to model the /// conversions necessary to perform the assignment. If \c false, /// \p Diagnose must also be \c false. AssignConvertType CheckSingleAssignmentConstraints( QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false, bool ConvertRHS = true); // If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit, ImplicitConversionSequence& ICS); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); ExprResult PerformQualificationConversion( Expr *E, QualType Ty, ExprValueKind VK = VK_RValue, CheckedConversionKind CCK = CCK_ImplicitConversion); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, bool IsCompAssign = false); void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool ConvertArgs = true); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool ConvertArgs = true) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible - The two types are reference-compatible. Ref_Compatible }; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, bool &DerivedToBase, bool &ObjCConversion, bool &ObjCLifetimeConversion); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); /// Prepare `SplattedExpr` for a vector splat operation, adding /// implicit casts if necessary. ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error }; /// Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds for ARC and Weak. ARCConversionResult CheckObjCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool Diagnose = true, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); class ConditionResult { Decl *ConditionVar; FullExprArg Condition; bool Invalid; bool HasKnownValue; bool KnownValue; friend class Sema; ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition, bool IsConstexpr) : ConditionVar(ConditionVar), Condition(Condition), Invalid(false), HasKnownValue(IsConstexpr && Condition.get() && !Condition.get()->isValueDependent()), KnownValue(HasKnownValue && !!Condition.get()->EvaluateKnownConstInt(S.Context)) {} explicit ConditionResult(bool Invalid) : ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid), HasKnownValue(false), KnownValue(false) {} public: ConditionResult() : ConditionResult(false) {} bool isInvalid() const { return Invalid; } std::pair<VarDecl *, Expr *> get() const { return std::make_pair(cast_or_null<VarDecl>(ConditionVar), Condition.get()); } llvm::Optional<bool> getKnownValue() const { if (!HasKnownValue) return None; return KnownValue; } }; static ConditionResult ConditionError() { return ConditionResult(true); } enum class ConditionKind { Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'. ConstexprIf, ///< A constant boolean condition from 'if constexpr'. Switch ///< An integral condition for a 'switch' statement. }; ConditionResult ActOnCondition(Scope *S, SourceLocation Loc, Expr *SubExpr, ConditionKind CK); ConditionResult ActOnConditionVariable(Decl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E, bool IsConstexpr = false); /// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression /// found in an explicit(bool) specifier. ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E); /// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier. /// Returns true if the explicit specifier is now resolved. bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0; virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR); virtual ~VerifyICEDiagnoser() { } }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr); /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); private: unsigned ForceCUDAHostDeviceDepth = 0; public: /// Increments our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. So long as this count is greater /// than zero, all functions encountered will be __host__ __device__. void PushForceCUDAHostDevice(); /// Decrements our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. Returns false if the count is 0 /// before incrementing, so you can emit an error. bool PopForceCUDAHostDevice(); /// Diagnostics that are emitted only if we discover that the given function /// must be codegen'ed. Because handling these correctly adds overhead to /// compilation, this is currently only enabled for CUDA compilations. llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>, std::vector<PartialDiagnosticAt>> DeviceDeferredDiags; /// A pair of a canonical FunctionDecl and a SourceLocation. When used as the /// key in a hashtable, both the FD and location are hashed. struct FunctionDeclAndLoc { CanonicalDeclPtr<FunctionDecl> FD; SourceLocation Loc; }; /// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a /// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the /// same deferred diag twice. llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags; /// An inverse call graph, mapping known-emitted functions to one of their /// known-emitted callers (plus the location of the call). /// /// Functions that we can tell a priori must be emitted aren't added to this /// map. llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>, /* Caller = */ FunctionDeclAndLoc> DeviceKnownEmittedFns; /// A partial call graph maintained during CUDA/OpenMP device code compilation /// to support deferred diagnostics. /// /// Functions are only added here if, at the time they're considered, they are /// not known-emitted. As soon as we discover that a function is /// known-emitted, we remove it and everything it transitively calls from this /// set and add those functions to DeviceKnownEmittedFns. llvm::DenseMap</* Caller = */ CanonicalDeclPtr<FunctionDecl>, /* Callees = */ llvm::MapVector<CanonicalDeclPtr<FunctionDecl>, SourceLocation>> DeviceCallGraph; /// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be /// deferred. /// /// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch) /// which are not allowed to appear inside __device__ functions and are /// allowed to appear in __host__ __device__ functions only if the host+device /// function is never codegen'ed. /// /// To handle this, we use the notion of "deferred diagnostics", where we /// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed. /// /// This class lets you emit either a regular diagnostic, a deferred /// diagnostic, or no diagnostic at all, according to an argument you pass to /// its constructor, thus simplifying the process of creating these "maybe /// deferred" diagnostics. class DeviceDiagBuilder { public: enum Kind { /// Emit no diagnostics. K_Nop, /// Emit the diagnostic immediately (i.e., behave like Sema::Diag()). K_Immediate, /// Emit the diagnostic immediately, and, if it's a warning or error, also /// emit a call stack showing how this function can be reached by an a /// priori known-emitted function. K_ImmediateWithCallStack, /// Create a deferred diagnostic, which is emitted only if the function /// it's attached to is codegen'ed. Also emit a call stack as with /// K_ImmediateWithCallStack. K_Deferred }; DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID, FunctionDecl *Fn, Sema &S); DeviceDiagBuilder(DeviceDiagBuilder &&D); DeviceDiagBuilder(const DeviceDiagBuilder &) = default; ~DeviceDiagBuilder(); /// Convertible to bool: True if we immediately emitted an error, false if /// we didn't emit an error or we created a deferred error. /// /// Example usage: /// /// if (DeviceDiagBuilder(...) << foo << bar) /// return ExprError(); /// /// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably /// want to use these instead of creating a DeviceDiagBuilder yourself. operator bool() const { return ImmediateDiag.hasValue(); } template <typename T> friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag, const T &Value) { if (Diag.ImmediateDiag.hasValue()) *Diag.ImmediateDiag << Value; else if (Diag.PartialDiagId.hasValue()) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second << Value; return Diag; } private: Sema &S; SourceLocation Loc; unsigned DiagID; FunctionDecl *Fn; bool ShowCallStack; // Invariant: At most one of these Optionals has a value. // FIXME: Switch these to a Variant once that exists. llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag; llvm::Optional<unsigned> PartialDiagId; }; /// Indicate that this function (and thus everything it transtively calls) /// will be codegen'ed, and emit any deferred diagnostics on this function and /// its (transitive) callees. void markKnownEmitted( Sema &S, FunctionDecl *OrigCaller, FunctionDecl *OrigCallee, SourceLocation OrigLoc, const llvm::function_ref<bool(Sema &, FunctionDecl *)> IsKnownEmitted); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as device code". /// /// - If CurContext is a __host__ function, does not emit any diagnostics. /// - If CurContext is a __device__ or __global__ function, emits the /// diagnostics immediately. /// - If CurContext is a __host__ __device__ function and we are compiling for /// the device, creates a diagnostic which is emitted if and when we realize /// that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in CUDA device code. /// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget()) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as host code". /// /// Same as CUDADiagIfDeviceCode, with "host" and "device" switched. DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the device, emits the diagnostics immediately. /// - If CurContext is a non-`declare target` function and we are compiling /// for the device, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID); DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; /// Determines whether the given function is a CUDA device/host/kernel/etc. /// function. /// /// Use this rather than examining the function's attributes yourself -- you /// will get it wrong. Returns CFT_Host if D is null. CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D, bool IgnoreImplicitHDAttr = false); CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs); /// Gets the CUDA target for the current context. CUDAFunctionTarget CurrentCUDATarget() { return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext)); } // CUDA function call preference. Must be ordered numerically from // worst to best. enum CUDAFunctionPreference { CFP_Never, // Invalid caller/callee combination. CFP_WrongSide, // Calls from host-device to host or device // function that do not match current compilation // mode. CFP_HostDevice, // Any calls to host/device functions. CFP_SameSide, // Calls from host-device to host or device // function matching current compilation mode. CFP_Native, // host-to-host or device-to-device calls. }; /// Identifies relative preference of a given Caller/Callee /// combination, based on their host/device attributes. /// \param Caller function which needs address of \p Callee. /// nullptr in case of global context. /// \param Callee target function /// /// \returns preference value for particular Caller/Callee combination. CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Determines whether Caller may invoke Callee, based on their CUDA /// host/device attributes. Returns false if the call is not allowed. /// /// Note: Will return true for CFP_WrongSide calls. These may appear in /// semantically correct CUDA programs, but only if they're never codegen'ed. bool IsAllowedCUDACall(const FunctionDecl *Caller, const FunctionDecl *Callee) { return IdentifyCUDAPreference(Caller, Callee) != CFP_Never; } /// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD, /// depending on FD and the current compilation settings. void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD, const LookupResult &Previous); public: /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// (CFP_Never), emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to /// be emitted if and when the caller is codegen'ed, and returns true. /// /// Will only create deferred diagnostics for a given SourceLocation once, /// so you can safely call this multiple times without generating duplicate /// deferred errors. /// /// - Otherwise, returns true without emitting any diagnostics. bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee); /// Set __device__ or __host__ __device__ attributes on the given lambda /// operator() method. /// /// CUDA lambdas declared inside __device__ or __global__ functions inherit /// the __device__ attribute. Similarly, lambdas inside __host__ __device__ /// functions become __host__ __device__ themselves. void CUDASetLambdaAttrs(CXXMethodDecl *Method); /// Finds a function in \p Matches with highest calling priority /// from \p Caller context and erases all functions with lower /// calling priority. void EraseUnwantedCUDAMatches( const FunctionDecl *Caller, SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \return true if \p CD can be considered empty according to CUDA /// (E.2.3.1 in CUDA 7.5 Programming guide). bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD); bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD); // \brief Checks that initializers of \p Var satisfy CUDA restrictions. In // case of error emits appropriate diagnostic and invalidates \p Var. // // \details CUDA allows only empty constructors as initializers for global // variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all // __shared__ variables whether they are local or not (they all are implicitly // static in CUDA). One exception is that CUDA allows constant initializers // for __constant__ and __device__ variables. void checkAllowedCUDAInitializer(VarDecl *VD); /// Check whether NewFD is a valid overload for CUDA. Emits /// diagnostics and invalidates NewFD if not. void checkCUDATargetOverload(FunctionDecl *NewFD, const LookupResult &Previous); /// Copies target attributes from the template TD to the function FD. void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD); /// Returns the name of the launch configuration function. This is the name /// of the function that will be called to configure kernel call, with the /// parameters specified via <<<>>>. std::string getCudaConfigureFuncName() const; /// \name Code completion //@{ /// Describes the context in which code completion occurs. enum ParserCompletionContext { /// Code completion occurs at top-level or namespace context. PCC_Namespace, /// Code completion occurs within a class, struct, or union. PCC_Class, /// Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// Code completion occurs following one or more template /// headers. PCC_Template, /// Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// Code completion occurs within an expression. PCC_Expression, /// Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// Code completion occurs where only a type is permitted. PCC_Type, /// Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteExpression(Scope *S, QualType PreferredType, bool IsParenthesized = false); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase, SourceLocation OpLoc, bool IsArrow, bool IsBaseExprStatement, QualType PreferredType); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS, QualType PreferredType); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D, const VirtSpecifiers *VS = nullptr); void CodeCompleteBracketDeclarator(Scope *S); void CodeCompleteCase(Scope *S); /// Reports signatures for a call to CodeCompleteConsumer and returns the /// preferred type for the current argument. Returned type can be null. QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl, CXXScopeSpec SS, ParsedType TemplateTypeTy, ArrayRef<Expr *> ArgExprs, IdentifierInfo *II, SourceLocation OpenParLoc); void CodeCompleteInitializer(Scope *S, Decl *D); void CodeCompleteAfterIf(Scope *S); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext, QualType BaseType, QualType PreferredType); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences( ArrayRef<IdentifierLocPair> Protocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName, SourceLocation ClassNameLoc, bool IsBaseExprStatement); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled); void CodeCompleteNaturalLanguage(); void CodeCompleteAvailabilityPlatformName(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, const Expr *ThisArg, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckOSLogFormatStringArg(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); bool SemaBuiltinVSX(CallExpr *TheCall); bool SemaBuiltinOSLogFormat(CallExpr *TheCall); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult, bool IsDelete); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High, bool RangeIsError = true); bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, unsigned Multiple); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_OSLog, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); bool FormatStringHasSArg(const StringLiteral *FExpr); static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); public: void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS); private: void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(Expr *E); /// Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// Check if there is a field shadowing. void CheckShadowInheritedFields(const SourceLocation &Loc, DeclarationName FieldName, const CXXRecordDecl *RD, bool DeclIsField = true); /// Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const ArrayRef<const Expr *> ExprArgs, SourceLocation CallSiteLoc); /// Check if we are taking the address of a packed field /// as this may be a problem if the pointer value is dereferenced. void CheckAddressOfPackedMember(Expr *rhs); /// The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; /// The handler for the FileChanged preprocessor events. /// /// Used for diagnostics that implement custom semantic analysis for #include /// directives, like -Wpragma-pack. sema::SemaPPCallbacks *SemaPPCallbackHandler; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } // Emitting members of dllexported classes is delayed until the class // (including field initializers) is fully parsed. SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses; SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions; private: class SavePendingParsedClassStateRAII { public: SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); } ~SavePendingParsedClassStateRAII() { assert(S.DelayedOverridingExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedEquivalentExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedDllExportClasses.empty() && "there shouldn't be any pending delayed DLL export classes"); swapSavedState(); } private: Sema &S; decltype(DelayedOverridingExceptionSpecChecks) SavedOverridingExceptionSpecChecks; decltype(DelayedEquivalentExceptionSpecChecks) SavedEquivalentExceptionSpecChecks; decltype(DelayedDllExportClasses) SavedDllExportClasses; void swapSavedState() { SavedOverridingExceptionSpecChecks.swap( S.DelayedOverridingExceptionSpecChecks); SavedEquivalentExceptionSpecChecks.swap( S.DelayedEquivalentExceptionSpecChecks); SavedDllExportClasses.swap(S.DelayedDllExportClasses); } }; /// Helper class that collects misaligned member designations and /// their location info for delayed diagnostics. struct MisalignedMember { Expr *E; RecordDecl *RD; ValueDecl *MD; CharUnits Alignment; MisalignedMember() : E(), RD(), MD(), Alignment() {} MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment) : E(E), RD(RD), MD(MD), Alignment(Alignment) {} explicit MisalignedMember(Expr *E) : MisalignedMember(E, nullptr, nullptr, CharUnits()) {} bool operator==(const MisalignedMember &m) { return this->E == m.E; } }; /// Small set of gathered accesses to potentially misaligned members /// due to the packed attribute. SmallVector<MisalignedMember, 4> MisalignedMembers; /// Adds an expression to the set of gathered misaligned members. void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment); public: /// Diagnoses the current set of gathered accesses. This typically /// happens at full expression level. The set is cleared after emitting the /// diagnostics. void DiagnoseMisalignedMembers(); /// This function checks if the expression is in the sef of potentially /// misaligned members and it is converted to some pointer type T with lower /// or equal alignment requirements. If so it removes it. This is used when /// we do not want to diagnose such misaligned access (e.g. in conversions to /// void*). void DiscardMisalignedMemberAddress(const Type *T, Expr *E); /// This function calls Action when it determines that E designates a /// misaligned member due to the packed attribute. This is used to emit /// local diagnostics like in reference binding. void RefersToMemberWithReducedAlignment( Expr *E, llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> Action); /// Describes the reason a calling convention specification was ignored, used /// for diagnostics. enum class CallingConventionIgnoredReason { ForThisTarget = 0, VariadicFunction, ConstructorDestructor, BuiltinFunction }; }; /// RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; bool Entered = true; public: EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other, bool ShouldEnter = true) : Actions(Actions), Entered(ShouldEnter) { if (Entered) Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, ExprContext); } EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other) : Actions(Actions) { Actions.PushExpressionEvaluationContext( NewContext, Sema::ReuseLambdaContextDecl, ExprContext); } enum InitListTag { InitList }; EnterExpressionEvaluationContext(Sema &Actions, InitListTag, bool ShouldEnter = true) : Actions(Actions), Entered(false) { // In C++11 onwards, narrowing checks are performed on the contents of // braced-init-lists, even when they occur within unevaluated operands. // Therefore we still need to instantiate constexpr functions used in such // a context. if (ShouldEnter && Actions.isUnevaluatedContext() && Actions.getLangOpts().CPlusPlus11) { Actions.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::UnevaluatedList); Entered = true; } } ~EnterExpressionEvaluationContext() { if (Entered) Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// The template function declaration to be late parsed. Decl *D; }; } // end namespace clang namespace llvm { // Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its // SourceLocation. template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> { using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc; using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>; static FunctionDeclAndLoc getEmptyKey() { return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()}; } static FunctionDeclAndLoc getTombstoneKey() { return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()}; } static unsigned getHashValue(const FunctionDeclAndLoc &FDL) { return hash_combine(FDBaseInfo::getHashValue(FDL.FD), FDL.Loc.getRawEncoding()); } static bool isEqual(const FunctionDeclAndLoc &LHS, const FunctionDeclAndLoc &RHS) { return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc; } }; } // namespace llvm #endif
seidel.pluto.par2d.c
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <math.h> double A[N][N+17]; void init_arrays() { int i, j; for (i=0; i<N; i++) for (j=0; j<N; j++) A[i][j] = i*i+j*j; } double rtclock() { struct timezone tzp; struct timeval tp; int stat; gettimeofday (&tp, &tzp); return (tp.tv_sec + tp.tv_usec*1.0e-6); } int main() { init_arrays(); double annot_t_start=0, annot_t_end=0, annot_t_total=0; int annot_i; for (annot_i=0; annot_i<REPS; annot_i++) { annot_t_start = rtclock(); #include <math.h> #include <assert.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) #define S1(zT0,zT1,zT2,zT3,zT4,zT5,t,i,j) {A[i][j]=(A[1+i][1+j]+A[1+i][j]+A[1+i][j-1]+A[i][1+j]+A[i][j]+A[i][j-1]+A[i-1][1+j]+A[i-1][j]+A[i-1][j-1])/9;} int c1, c2, c3, c4, c5, c6, c7, c8, c9; register int lb, ub, lb1, ub1, lb2, ub2; register int lbv, ubv; omp_set_nested(1); omp_set_num_threads(2); /* Generated from PLuTo-produced CLooG file by CLooG v0.14.1 64 bits in 5.45s. */ for (c1=-2;c1<=floord(4*T+3*N-10,256);c1++) { lb1=max(max(max(0,ceild(256*c1-2*T-N-251,512)),ceild(256*c1-3*T-2*N+7,256)),ceild(256*c1-N-761,1024)); ub1=min(min(min(floord(256*c1+2*N+505,1024),floord(256*c1+509,512)),floord(64*c1+127,64)),floord(T+N-3,256)); #pragma omp parallel for shared(c1,lb1,ub1) private(lb2,ub2,c2,c3,c4,c5,c6,c7,c8,c9) for (c2=lb1; c2<=ub1; c2++) { lb2=max(max(max(max(max(max(ceild(256*c1-256*c2-T+1,256),ceild(512*c1-512*c2-253,768)),0),ceild(512*c2-N-252,256)),ceild(128*c1-256*c2-127,128)),ceild(128*c2-127,128)),ceild(128*c1-127,256)); ub2=min(min(min(min(min(min(floord(256*c1-256*c2+255,256),floord(256*c1-512*c2+N+253,256)),floord(256*c2+T+N+252,256)),floord(T+N-3,128)),floord(256*c1+N+508,512)),floord(256*c1-256*c2+N+253,384)),floord(512*c2+N+507,256)); #pragma omp parallel for shared(c1,c2,lb1,ub1,lb2,ub2) private(c3,c4,c5,c6,c7,c8,c9) for (c3=lb2; c3<=ub2; c3++) { for (c4=max(max(max(max(0,ceild(-256*c2+256*c3-N-284,32)),8*c1-8*c2-8*c3),ceild(256*c2-N-29,32)),ceild(128*c3-N-29,32));c4<=min(min(min(min(8*c1-8*c2-8*c3+7,floord(256*c3+253,64)),floord(T-1,32)),floord(128*c2+127,16)),floord(-128*c2+128*c3+127,16));c4++) { for (c5=max(max(max(max(max(8*c2,ceild(16*c4-15,16)),ceild(256*c3-T-N-28,32)),0),ceild(256*c3-32*c4-N-60,32)),ceild(256*c3-N-59,64));c5<=min(min(min(min(min(floord(32*c4+N+29,32),floord(128*c3+127,16)),8*c2+7),floord(128*c3-16*c4+127,16)),floord(T+N-3,32)),floord(256*c3+N+252,64));c5++) { for (c6=max(max(max(max(max(ceild(64*c4-29,32),8*c3),ceild(16*c5-15,16)),ceild(16*c4+16*c5-15,16)),0),ceild(64*c5-N-28,32));c6<=min(min(min(min(min(8*c3+7,floord(T+N-3,16)),floord(32*c4+32*c5+N+60,32)),floord(32*c4+N+29,16)),floord(64*c5+N+59,32)),floord(32*c5+T+N+28,32));c6++) { for (c7=max(max(max(max(0,32*c4),32*c5-N+2),16*c6-N+2),-32*c5+32*c6-N-29);c7<=min(min(min(min(-32*c5+32*c6+30,floord(32*c6+29,2)),T-1),32*c5+30),32*c4+31);c7++) { /*@ begin Loop( transform UnrollJam(ufactor=8) for (c8=max(max(32*c5,c7+1),32*c6-c7-N+2);c8<=min(min(32*c6-c7+30,32*c5+31),c7+N-2);c8++) transform Unroll(ufactor=8) for (c9=max(c7+c8+1,32*c6);c9<=min(32*c6+31,c7+c8+N-2);c9++) { S1(c1-c2-c3,-c1+2*c2+c3,-c1+2*c3,c4,-c4+c5,-c4-c5+c6,c7,-c7+c8,-c7-c8+c9) ; } ) @*/ for (c8=max(max(32*c5,c7+1),32*c6-c7-N+2);c8<=min(min(32*c6-c7+30,32*c5+31),c7+N-2);c8++) { for (c9=max(c7+c8+1,32*c6);c9<=min(32*c6+31,c7+c8+N-2);c9++) { S1(c1-c2-c3,-c1+2*c2+c3,-c1+2*c3,c4,-c4+c5,-c4-c5+c6,c7,-c7+c8,-c7-c8+c9) ; } } /*@ end @*/ } } } } } } } /* End of CLooG code */ annot_t_end = rtclock(); annot_t_total += annot_t_end - annot_t_start; } annot_t_total = annot_t_total / REPS; printf("%f\n", annot_t_total); return ((int) A[0][0]); }
clangb.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zlangb.c, normal z -> c, Fri Sep 28 17:38:07 2018 * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_types.h" /***************************************************************************//** * * @ingroup plasma_langb * * Returns the norm of a general band matrix as * * clange = ( max(abs(A(i,j))), NORM = PlasmaMaxNorm * ( * ( norm1(A), NORM = PlasmaOneNorm * ( * ( normI(A), NORM = PlasmaInfNorm * ( * ( normF(A), NORM = PlasmaFrobeniusNorm * * where norm1 denotes the one norm of a matrix (maximum column sum), * normI denotes the infinity norm of a matrix (maximum row sum) and * normF denotes the Frobenius norm of a matrix (square root of sum * of squares). Note that max(abs(A(i,j))) is not a consistent matrix * norm. * ******************************************************************************* * * @param[in] norm * - PlasmaMaxNorm: max norm * - PlasmaOneNorm: one norm * - PlasmaInfNorm: infinity norm * - PlasmaFrobeniusNorm: Frobenius norm * * @param[in] m * The number of rows of the matrix A. m >= 0. When m = 0, * the returned value is set to zero. * * @param[in] n * The number of columns of the matrix A. n >= 0. When n = 0, * the returned value is set to zero. * * @param[in] kl * The number of subdiagonals within the band of A. kl >= 0. * * @param[in] ku * The number of superdiagonals within the band of A. ku >= 0. * * @param[in] pAB * The band matrix AB. * * @param[in] ldab * The leading dimension of the array AB. lda >= max(1,m). * ******************************************************************************* * * @retval float * The specified norm of the general band matrix A. * ******************************************************************************* * * @sa plasma_omp_clangb * @sa plasma_clangb * @sa plasma_dlangb * @sa plasma_slangb * ******************************************************************************/ float plasma_clangb(plasma_enum_t norm, int m, int n, int kl, int ku, plasma_complex32_t *pAB, int ldab) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if ((norm != PlasmaMaxNorm) && (norm != PlasmaOneNorm) && (norm != PlasmaInfNorm) && (norm != PlasmaFrobeniusNorm) ) { plasma_error("illegal value of norm"); return -1; } if (m < 0) { plasma_error("illegal value of m"); return -2; } if (n < 0) { plasma_error("illegal value of n"); return -3; } if (kl < 0) { plasma_error("illegal value of kl"); return -4; } if (ku < 0) { plasma_error("illegal value of ku"); return -5; } if (ldab < imax(1, 1+kl+ku)) { //printf("%d\n", ldab); plasma_error("illegal value of lda"); return -7; } // quick return if (imin(n, m) == 0) return 0.0; // Set tiling parameters. int nb = plasma->nb; // Create tile matrices. plasma_desc_t AB; int tku = (ku+kl+nb-1)/nb; // number of tiles in upper band (not including diagonal) int tkl = (kl+nb-1)/nb; // number of tiles in lower band (not including diagonal) int lm = (tku+tkl+1)*nb; int retval; retval = plasma_desc_general_band_create(PlasmaComplexFloat, PlasmaGeneral, nb, nb, lm, n, 0, 0, m, n, kl, ku, &AB); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } // Allocate workspace. float *work = NULL; switch (norm) { case PlasmaMaxNorm: work = (float*)malloc((size_t)(AB.klt+AB.kut-1)*AB.nt*sizeof(float)); break; case PlasmaOneNorm: work = (float*)calloc(((size_t)AB.n*(tku+tkl+1)+AB.n), sizeof(float)); //TODO: too much space. break; case PlasmaInfNorm: work = (float*)calloc(((size_t)AB.nt*AB.mt*AB.mb+AB.mb*AB.mt), sizeof(float)); break; case PlasmaFrobeniusNorm: work = (float*)calloc((size_t)2*(tku+tkl+1)*AB.nt, sizeof(float)); break; default: assert(0); } if (work == NULL) { plasma_error("malloc() failed"); return PlasmaErrorOutOfMemory; } // Create sequence. plasma_sequence_t sequence; retval = plasma_sequence_init(&sequence); if (retval != PlasmaSuccess) { plasma_error("plasma_sequence_create() failed"); return retval; } // Initialize request. plasma_request_t request; retval = plasma_request_init(&request); float value; // asynchronous block #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_cpb2desc(pAB, ldab, AB, &sequence, &request); // Call tile async function. plasma_omp_clangb(norm, AB, work, &value, &sequence, &request); } // implicit synchronization free(work); // Free matrix in tile layout. plasma_desc_destroy(&AB); // Return the norm. //printf("[plasma_clangb]: value=%.3f\n", value); return value; } /***************************************************************************//** * * @ingroup plasma_langb * * Calculates the max, one, infinity or Frobenius norm of a general band matrix. * Non-blocking equivalent of plasma_clangb(). May return before the * computation is finished. Operates on matrices stored by tiles. All matrices * are passed through descriptors. All dimensions are taken from the * descriptors. Allows for pipelining of operations at runtime. * ******************************************************************************* * * @param[in] norm * - PlasmaMaxNorm: Max norm * - PlasmaOneNorm: One norm * - PlasmaInfNorm: Infinity norm * - PlasmaFrobeniusNorm: Frobenius norm * * @param[in] AB * The descriptor of matrix A. * * @param[out] work * Workspace of size: * - PlasmaMaxNorm: (AB.klt+AB.kut-1)*A.nt * - PlasmaOneNorm: AB.n*(tku+tkl+1)+AB.n * - PlasmaInfNorm: AB.nt*AB.mt*AB.mb+AB.mb*AB.mt * - PlasmaFrobeniusNorm: 2*(tku+tkl+1)*AB.nt * * @param[out] value * The calculated value of the norm requested. * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). * * @param[out] request * Identifies this function call (for exception handling purposes). * * @retval void * Errors are returned by setting sequence->status and * request->status to error values. The sequence->status and * request->status should never be set to PlasmaSuccess (the * initial values) since another async call may be setting a * failure value at the same time. * ******************************************************************************* * * @sa plasma_clangb * @sa plasma_omp_clangb * @sa plasma_omp_dlangb * @sa plasma_omp_slangb * ******************************************************************************/ void plasma_omp_clangb(plasma_enum_t norm, plasma_desc_t AB, float *work, float *value, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if ((norm != PlasmaMaxNorm) && (norm != PlasmaOneNorm) && (norm != PlasmaInfNorm) && (norm != PlasmaFrobeniusNorm)) { plasma_error("illegal value of norm"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(AB) != PlasmaSuccess) { plasma_error("invalid descriptor AB"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (sequence == NULL) { plasma_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return if (imin(AB.m, AB.n) == 0) { *value = 0.0; return; } // Call the parallel function. plasma_pclangb(norm, AB, work, value, sequence, request); }
trmm_x_coo_u_lo_row.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" #include <memory.h> alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_COO *mat, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Number beta, ALPHA_Number *y, const ALPHA_INT ldy) { ALPHA_INT m = mat->rows; ALPHA_INT n = columns; ALPHA_INT num_threads = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for (ALPHA_INT r = 0; r < m; r++) for (ALPHA_INT c = 0; c < columns; c++) { ALPHA_Number t1, t2; alpha_mul(t1, y[r * ldy + c], beta); alpha_mul(t2, alpha, x[index2(r, c, ldx)]); alpha_add(y[r * ldy + c], t1, t2); } #ifdef _OPENMP #pragma omp parallel num_threads(num_threads) #endif { ALPHA_INT tid = alpha_get_thread_id(); for (ALPHA_INT ai = 0; ai < mat->nnz; ++ai) { ALPHA_INT cr = mat->row_indx[ai]; if (cr % num_threads != tid) continue; ALPHA_Number *Y = &y[index2(cr, 0, ldy)]; if (mat->col_indx[ai] < cr) { ALPHA_Number val; alpha_mul(val, alpha, mat->values[ai]); const ALPHA_Number *X = &x[index2(mat->col_indx[ai], 0, ldx)]; for (ALPHA_INT c = 0; c < n; ++c) alpha_madde(Y[c], val, X[c]); } } } return ALPHA_SPARSE_STATUS_SUCCESS; }
GB_unop__identity_int16_bool.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_int16_bool) // op(A') function: GB (_unop_tran__identity_int16_bool) // C type: int16_t // A type: bool // cast: int16_t cij = (int16_t) aij // unaryop: cij = aij #define GB_ATYPE \ bool #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int16_t z = (int16_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ bool aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int16_t z = (int16_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT16 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_int16_bool) ( int16_t *Cx, // Cx and Ax may be aliased const bool *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { bool aij = Ax [p] ; int16_t z = (int16_t) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; bool aij = Ax [p] ; int16_t z = (int16_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_int16_bool) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
work.c
/******************************************************************** * BenchIT - Performance Measurement for Scientific Applications * Contact: developer@benchit.org * * $Id: work.c 1 2009-09-11 12:26:19Z william $ * $URL: svn+ssh://william@rupert.zih.tu-dresden.de/svn-base/benchit-root/BenchITv6/kernel/numerical/gemv/C/OpenMP/0/float/work.c $ * For license details see COPYING in the package base directory *******************************************************************/ /* Kernel: C SGEMV kernel *******************************************************************/ #include "work.h" void ij_(int sizeVector,int sizeAusgabe,float alpha,float beta, float *x, float *A, float *y) { int i,j; float temp = 0.0; #pragma omp parallel for for (j=0;j<sizeAusgabe;j++) { y[j]=beta*y[j]; } // // now : x=x, A=A, y=beta*y // #pragma omp parallel for private(temp) for (i=0;i<sizeVector;i++) { temp=alpha*x[i]; for (j=0;j<sizeAusgabe;j++) { y[j]=y[j]+A[i*sizeAusgabe+j]*temp; } } } void ji_(int sizeVector,int sizeAusgabe,float alpha,float beta, float *x, float *A, float *y) { int i,j; float temp = 0.0; #pragma omp parallel for for (j=0;j<sizeAusgabe;j++) { y[j]=beta*y[j]; } // // now : x=x, A=A, y=beta*y // #pragma omp parallel for private(temp) for (j=0;j<sizeAusgabe;j++) { temp=0.0; for (i=0;i<sizeVector;i++) { temp=temp+A[i*sizeAusgabe+j]*x[i]; } temp=temp*alpha; y[j]=y[j]+temp; } }
ClientComm.h
/*! @brief Flag for checking if this header has already been included. */ #ifndef YGGCLIENTCOMM_H_ #define YGGCLIENTCOMM_H_ #include <../tools.h> #include <CommBase.h> #include <DefaultComm.h> #include "../datatypes/datatypes.h" #ifdef __cplusplus /* If this is a C++ compiler, use C linkage */ extern "C" { #endif // Handle is send address // Info is response static unsigned _client_rand_seeded = 0; /*! @brief Structure for storing requests/responses. */ typedef struct responses_t { comm_t* comm; //!< Response comm. size_t nreq; //!< Number of requests sent. char** request_id; //!< Request ids. char** data; //!< Received responses size_t* len; //!< Lengths of received messages. } responses_t; /*! @brief Create a new registry of requests and responses. @returns responses_t* Structure containing a registry of requests and responses. */ static inline responses_t* client_new_responses() { responses_t* out = (responses_t*)malloc(sizeof(responses_t)); if (out != NULL) { out->comm = NULL; out->nreq = 0; out->request_id = NULL; out->data = NULL; out->len = NULL; } return out; }; /*! @brief Free a registry of requests and responses. @param[in] x responses_t** Pointer to structure containing a registry of requests and responses. */ static inline void client_free_responses(responses_t** x) { if (x[0] != NULL) { if (x[0]->comm != NULL) { free_default_comm(x[0]->comm); free_comm_base(x[0]->comm); } if (x[0]->data != NULL) { for (size_t i = 0; i < x[0]->nreq; i++) if (x[0]->data[i] != NULL) free(x[0]->data[i]); free(x[0]->data); } if (x[0]->len != NULL) free(x[0]->len); free(x[0]); x[0] = NULL; } }; /*! @brief Determine if there is a request in the registry. @param[in] x responses_t* Structure containing a registry of requests and responses. @param[in] request_id const char* ID associated with the request to check for. @returns int -1 if there is an error, otherwise the index of the request in the registry. */ static inline int client_has_request(responses_t *x, const char* request_id) { if (x == NULL) return -1; for (size_t i = 0; i < x->nreq; i++) { if (strcmp(x->request_id[i], request_id) == 0) return (int)i; } return -1; }; /*! @brief Determine if there is a response in the registry. @param[in] x responses_t* Structure containing a registry of requests and responses. @param[in] request_id const char* ID associated with the response to check for. @returns int -1 if there is an error, otherwise the index of the response in the registry. */ static inline int client_has_response(responses_t *x, const char* request_id) { int idx = client_has_request(x, request_id); if (idx < 0) return idx; if (x->data[idx] != NULL) return idx; return -1; }; /*! @brief Add a request to the registry. @param[in] x responses_t* Structure containing a registry of requests and responses. @param[in] request_id const char* ID associated with the request being added. @returns int -1 if there is an error, 0 otherwise. */ static inline int client_add_request(responses_t *x, const char* request_id) { if (x == NULL) return -1; x->request_id = (char**)realloc(x->request_id, (x->nreq + 1) * sizeof(char*)); if (x->request_id == NULL) return -1; size_t request_len = strlen(request_id); x->request_id[x->nreq] = (char*)malloc(request_len + 1); if (x->request_id[x->nreq] == NULL) return -1; memcpy(x->request_id[x->nreq], request_id, request_len); x->request_id[x->nreq][request_len] = '\0'; x->data = (char**)realloc(x->data, (x->nreq + 1) * sizeof(char*)); if (x->data == NULL) return -1; x->data[x->nreq] = NULL; x->len = (size_t*)realloc(x->len, (x->nreq + 1) * sizeof(size_t)); if (x->len == NULL) return -1; x->len[x->nreq] = 0; x->nreq++; return 0; }; /*! @brief Add a response to the registry. @param[in] x responses_t* Structure containing a registry of requests and responses. @param[in] request_id const char* ID associated with the response being added. @param[in] data const char* Response message. @param[in] len size_t Size of the response message. @returns int -1 if there is an error, 0 otherwise. */ static inline int client_add_response(responses_t *x, const char* request_id, const char* data, const size_t len) { int idx = client_has_request(x, request_id); if (idx < 0) { ygglog_error("client_add_response: idx = %d", idx); return idx; } x->data[idx] = (char*)malloc(len + 1); if (x->data[idx] == NULL) { ygglog_error("client_add_response: failed to malloc data"); return -1; } memcpy(x->data[idx], data, len); x->data[idx][len] = '\0'; x->len[idx] = len; return 0; }; /*! @brief Remove a request/response from the registry. @param[in] x responses_t* Structure containing a registry of requests and responses. @param[in] request_id const char* ID associated with the request/response that should be removed. @returns int -1 if there is an error, 0 otherwise. */ static inline int client_remove_request(responses_t *x, const char* request_id) { if (x == NULL) return -1; int idx = client_has_request(x, request_id); if (idx < 0) return 0; int nrem = x->nreq - (idx + 1); free(x->request_id[idx]); if (x->data[idx] != NULL) free(x->data[idx]); if (nrem > 0) { memmove(x->request_id + idx, x->request_id + idx + 1, nrem * sizeof(char*)); memmove(x->data + idx, x->data + idx + 1, nrem * sizeof(char*)); memmove(x->len + idx, x->len + idx + 1, nrem * sizeof(size_t)); } x->nreq--; return 0; }; /*! @brief Remove and return a response from the registry after it has been received. @param[in] x responses_t* Structure containing a registry of requests and responses. @param[in] request_id const char* ID associated with the response that should be removed and returned. @param[in,out] data char** Pointer to memory where the response should be stored. @param[in] len const size_t Size of the existing buffer pointed to by data. @param[in] allow_realloc int If 1 and the response exceeds len, the buffer pointed to by data will be reallocated, if 0 and the response exceeds len, an error will be returned. @returns int -1 if there is an error, otherwise the size of the reponse message will be returned. */ static inline int client_pop_response(responses_t *x, const char* request_id, char **data, const size_t len, const int allow_realloc) { if (x == NULL) return -1; int idx = client_has_response(x, request_id); if (idx < 0) return -1; int ret = x->len[idx]; if ((ret + 1) > len) { if (allow_realloc) { ygglog_debug("client_pop_response: reallocating buffer from %d to %d bytes.", len, ret + 1); (*data) = (char*)realloc(*data, ret + 1); if (*data == NULL) { ygglog_error("client_pop_response: failed to realloc buffer."); return -1; } } else { ygglog_error("client_pop_response: buffer (%d bytes) is not large enough for message (%d bytes)", len, ret + 1); return -((int)(ret)); } } memcpy(*data, x->data[idx], ret); (*data)[ret] = '\0'; if (client_remove_request(x, request_id) < 0) return -1; return ret; }; /*! @brief Create a new channel. @param[in] comm comm_t * Comm structure initialized with new_comm_base. @returns int -1 if the address could not be created. */ static inline int new_client_address(comm_t *comm) { #ifdef _OPENMP #pragma omp critical (client) { #endif if (!(_client_rand_seeded)) { srand(ptr2seed(comm)); _client_rand_seeded = 1; } #ifdef _OPENMP } #endif comm->type = _default_comm; return new_default_address(comm); }; /*! @brief Initialize a client communicator. @param[in] comm comm_t * Comm structure initialized with init_comm_base. @returns int -1 if the comm could not be initialized. */ static inline int init_client_comm(comm_t *comm) { int ret = 0; ygglog_debug("init_client_comm: Creating a client comm"); #ifdef _OPENMP #pragma omp critical (client) { #endif if (!(_client_rand_seeded)) { srand(ptr2seed(comm)); _client_rand_seeded = 1; } #ifdef _OPENMP } #endif // Called to create temp comm for send/recv if ((strlen(comm->name) == 0) && (strlen(comm->address) > 0)) { comm->type = _default_comm; return init_default_comm(comm); } // Called to initialize/create client comm dtype_t *dtype_out = NULL; if (strlen(comm->direction) > 0) { dtype_out = create_dtype_format(comm->direction, 0, false); if (dtype_out == NULL) { ygglog_error("init_client_comm: Failed to create output datatype."); return -1; } } comm_t *handle; if (strlen(comm->name) == 0) { handle = new_comm_base(comm->address, "send", _default_comm, dtype_out); sprintf(handle->name, "client_request.%s", comm->address); } else { handle = init_comm_base(comm->name, "send", _default_comm, dtype_out); } handle->flags = handle->flags | COMM_FLAG_CLIENT; ret = init_default_comm(handle); strcpy(comm->address, handle->address); comm->handle = (void*)handle; // Keep track of response comms responses_t *resp = client_new_responses(); if (resp == NULL) { ygglog_error("init_client_comm: Failed to malloc responses."); return -1; } comm->info = (void*)resp; strcpy(comm->direction, "send"); comm->flags = comm->flags | COMM_ALWAYS_SEND_HEADER; return ret; }; /*! @brief Perform deallocation for client communicator. @param[in] x comm_t* Pointer to communicator to deallocate. @returns int 1 if there is and error, 0 otherwise. */ static inline int free_client_comm(comm_t *x) { if (x->info != NULL) { responses_t *resp = (responses_t*)(x->info); if (resp != NULL) client_free_responses(&resp); x->info = NULL; } if (x->handle != NULL) { comm_t *handle = (comm_t*)(x->handle); free_default_comm(handle); free_comm_base(handle); free(x->handle); x->handle = NULL; } return 0; }; /*! @brief Get number of messages in the comm. @param[in] x comm_t* Communicator to check. @returns int Number of messages. -1 indicates an error. */ static inline int client_comm_nmsg(const comm_t* x) { comm_t *handle = (comm_t*)(x->handle); int ret = default_comm_nmsg(handle); return ret; }; /*! @brief Create response comm and add info to header. @param[in] x comm_t* structure that header will be sent to. @param[in] head comm_head_t Prexisting header structure. @returns comm_head_t Header structure that includes the additional information about the response comm. */ static inline comm_head_t client_response_header(const comm_t* x, comm_head_t head) { // Initialize new comm responses_t *resp = (responses_t*)(x->info); if (resp->comm == NULL) { dtype_t * dtype_copy = copy_dtype(x->datatype); resp->comm = new_comm_base(NULL, "recv", _default_comm, dtype_copy); resp->comm->flags = resp->comm->flags | COMM_FLAG_CLIENT_RESPONSE; int ret = new_default_address(resp->comm); if (ret < 0) { ygglog_error("client_response_header(%s): could not create response comm", x->name); head.flags = head.flags & ~HEAD_FLAG_VALID; return head; } resp->comm->const_flags[0] = resp->comm->const_flags[0] | COMM_EOF_SENT | COMM_EOF_RECV; ygglog_debug("client_response_header(%s): Created response comm", x->name); } // Add address & request ID to header strcpy(head.response_address, resp->comm->address); sprintf(head.request_id, "%d", rand()); if (client_add_request(resp, head.request_id) < 0) { ygglog_error("client_response_header(%s): Failed to add request", x->name); head.flags = head.flags & ~HEAD_FLAG_VALID; return head; } if (client_has_request(resp, head.request_id) < 0) { ygglog_error("client_response_header(%s): Failed to add request", x->name); head.flags = head.flags & ~HEAD_FLAG_VALID; return head; } ygglog_debug("client_response_header(%s): response_address = %s, request_id = %s", x->name, head.response_address, head.request_id); return head; }; /*! @brief Send a message to the comm. @param[in] x comm_t* structure that comm should be sent to. @param[in] data character pointer to message that should be sent. @param[in] len size_t length of message to be sent. @returns int 0 if send succesfull, -1 if send unsuccessful. */ static inline int client_comm_send(const comm_t* x, const char *data, const size_t len) { int ret; ygglog_debug("client_comm_send(%s): %d bytes", x->name, len); if (x->handle == NULL) { ygglog_error("client_comm_send(%s): no request comm registered", x->name); return -1; } comm_t *req_comm = (comm_t*)(x->handle); ret = default_comm_send(req_comm, data, len); if (is_eof(data)) { req_comm->const_flags[0] = req_comm->const_flags[0] | COMM_EOF_SENT; } return ret; }; /*! @brief Receive a message from an input comm. @param[in] x comm_t* structure that message should be sent to. @param[out] data char ** pointer to allocated buffer where the message should be saved. This should be a malloc'd buffer if allow_realloc is 1. @param[in] len const size_t length of the allocated message buffer in bytes. @param[in] allow_realloc const int If 1, the buffer will be realloced if it is not large enought. Otherwise an error will be returned. @returns int -1 if message could not be received. Length of the received message if message was received. */ static inline int client_comm_recv(const comm_t* x, char **data, const size_t len, const int allow_realloc) { ygglog_debug("client_comm_recv(%s)", x->name); if (x->info == NULL) { ygglog_error("client_comm_recv(%s): no response struct set up", x->name); return -1; } responses_t *resp = (responses_t*)(x->info); if ((resp->comm == NULL) || (resp->nreq == 0)) { ygglog_error("client_comm_recv(%s): no response comm registered", x->name); return -1; } char* request_id = resp->request_id[0]; int ret = 0; while (client_has_response(resp, request_id) < 0) { ret = default_comm_recv(resp->comm, data, len, allow_realloc); if (ret < 0) { ygglog_error("client_comm_recv(%s): default_comm_recv returned %d", x->name, ret); return ret; } comm_head_t head = parse_comm_header(*data, len); if (!(head.flags & HEAD_FLAG_VALID)) { ygglog_error("client_comm_recv(%s): Invalid header.", x->name); return -1; } if (strcmp(head.request_id, request_id) == 0) { ygglog_debug("client_comm_recv(%s): default_comm_recv returned %d", x->name, ret); if (client_remove_request(resp, request_id) < 0) { ygglog_error("client_comm_recv(%s): Failed to remove request %s", x->name, request_id); return -1; } return ret; } if (client_add_response(resp, head.request_id, *data, ret) < 0) { ygglog_error("client_comm_recv(%s): Failed to add response %s", x->name, head.request_id); return -1; } } ret = client_pop_response(resp, request_id, data, len, allow_realloc); // Close response comm and decrement count of response comms ygglog_debug("client_comm_recv(%s): client_pop_response returned %d", x->name, ret); return ret; }; #ifdef __cplusplus /* If this is a C++ compiler, end C linkage */ } #endif #endif /*YGGCLIENTCOMM_H_*/
feature_group.h
/*! * Copyright (c) 2017 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #ifndef LIGHTGBM_FEATURE_GROUP_H_ #define LIGHTGBM_FEATURE_GROUP_H_ #include <cstdio> #include <memory> #include <vector> #include <LightGBM/bin.h> #include <LightGBM/meta.h> #include <LightGBM/utils/random.h> namespace LightGBM { class Dataset; class DatasetLoader; /*! \brief Using to store data and providing some operations on one feature group*/ class FeatureGroup { public: friend Dataset; friend DatasetLoader; /*! * \brief Constructor * \param num_feature number of features of this group * \param bin_mappers Bin mapper for features * \param num_data Total number of data * \param is_enable_sparse True if enable sparse feature */ FeatureGroup(int num_feature, bool is_multi_val, std::vector<std::unique_ptr<BinMapper>>* bin_mappers, data_size_t num_data) : num_feature_(num_feature), is_multi_val_(is_multi_val), is_sparse_(false) { CHECK_EQ(static_cast<int>(bin_mappers->size()), num_feature); // use bin at zero to store most_freq_bin num_total_bin_ = 1; bin_offsets_.emplace_back(num_total_bin_); auto& ref_bin_mappers = *bin_mappers; for (int i = 0; i < num_feature_; ++i) { bin_mappers_.emplace_back(ref_bin_mappers[i].release()); auto num_bin = bin_mappers_[i]->num_bin(); if (bin_mappers_[i]->GetMostFreqBin() == 0) { num_bin -= 1; } num_total_bin_ += num_bin; bin_offsets_.emplace_back(num_total_bin_); } CreateBinData(num_data, is_multi_val_, true, false); } FeatureGroup(const FeatureGroup& other, int num_data) { num_feature_ = other.num_feature_; is_multi_val_ = other.is_multi_val_; is_sparse_ = other.is_sparse_; num_total_bin_ = other.num_total_bin_; bin_offsets_ = other.bin_offsets_; bin_mappers_.reserve(other.bin_mappers_.size()); for (auto& bin_mapper : other.bin_mappers_) { bin_mappers_.emplace_back(new BinMapper(*bin_mapper)); } CreateBinData(num_data, is_multi_val_, !is_sparse_, is_sparse_); } FeatureGroup(std::vector<std::unique_ptr<BinMapper>>* bin_mappers, data_size_t num_data) : num_feature_(1), is_multi_val_(false) { CHECK_EQ(static_cast<int>(bin_mappers->size()), 1); // use bin at zero to store default_bin num_total_bin_ = 1; bin_offsets_.emplace_back(num_total_bin_); auto& ref_bin_mappers = *bin_mappers; for (int i = 0; i < num_feature_; ++i) { bin_mappers_.emplace_back(ref_bin_mappers[i].release()); auto num_bin = bin_mappers_[i]->num_bin(); if (bin_mappers_[i]->GetMostFreqBin() == 0) { num_bin -= 1; } num_total_bin_ += num_bin; bin_offsets_.emplace_back(num_total_bin_); } CreateBinData(num_data, false, false, false); } /*! * \brief Constructor from memory * \param memory Pointer of memory * \param num_all_data Number of global data * \param local_used_indices Local used indices, empty means using all data */ FeatureGroup(const void* memory, data_size_t num_all_data, const std::vector<data_size_t>& local_used_indices) { const char* memory_ptr = reinterpret_cast<const char*>(memory); // get is_sparse is_multi_val_ = *(reinterpret_cast<const bool*>(memory_ptr)); memory_ptr += sizeof(is_multi_val_); is_sparse_ = *(reinterpret_cast<const bool*>(memory_ptr)); memory_ptr += sizeof(is_sparse_); num_feature_ = *(reinterpret_cast<const int*>(memory_ptr)); memory_ptr += sizeof(num_feature_); // get bin mapper bin_mappers_.clear(); bin_offsets_.clear(); // start from 1, due to need to store zero bin in this slot num_total_bin_ = 1; bin_offsets_.emplace_back(num_total_bin_); for (int i = 0; i < num_feature_; ++i) { bin_mappers_.emplace_back(new BinMapper(memory_ptr)); auto num_bin = bin_mappers_[i]->num_bin(); if (bin_mappers_[i]->GetMostFreqBin() == 0) { num_bin -= 1; } num_total_bin_ += num_bin; bin_offsets_.emplace_back(num_total_bin_); memory_ptr += bin_mappers_[i]->SizesInByte(); } data_size_t num_data = num_all_data; if (!local_used_indices.empty()) { num_data = static_cast<data_size_t>(local_used_indices.size()); } if (is_multi_val_) { for (int i = 0; i < num_feature_; ++i) { int addi = bin_mappers_[i]->GetMostFreqBin() == 0 ? 0 : 1; if (bin_mappers_[i]->sparse_rate() >= kSparseThreshold) { multi_bin_data_.emplace_back(Bin::CreateSparseBin(num_data, bin_mappers_[i]->num_bin() + addi)); } else { multi_bin_data_.emplace_back(Bin::CreateDenseBin(num_data, bin_mappers_[i]->num_bin() + addi)); } multi_bin_data_.back()->LoadFromMemory(memory_ptr, local_used_indices); memory_ptr += multi_bin_data_.back()->SizesInByte(); } } else { if (is_sparse_) { bin_data_.reset(Bin::CreateSparseBin(num_data, num_total_bin_)); } else { bin_data_.reset(Bin::CreateDenseBin(num_data, num_total_bin_)); } // get bin data bin_data_->LoadFromMemory(memory_ptr, local_used_indices); } } /*! \brief Destructor */ ~FeatureGroup() { } /*! * \brief Push one record, will auto convert to bin and push to bin data * \param tid Thread id * \param idx Index of record * \param value feature value of record */ inline void PushData(int tid, int sub_feature_idx, data_size_t line_idx, double value) { uint32_t bin = bin_mappers_[sub_feature_idx]->ValueToBin(value); if (bin == bin_mappers_[sub_feature_idx]->GetMostFreqBin()) { return; } if (bin_mappers_[sub_feature_idx]->GetMostFreqBin() == 0) { bin -= 1; } if (is_multi_val_) { multi_bin_data_[sub_feature_idx]->Push(tid, line_idx, bin + 1); } else { bin += bin_offsets_[sub_feature_idx]; bin_data_->Push(tid, line_idx, bin); } } void ReSize(int num_data) { if (!is_multi_val_) { bin_data_->ReSize(num_data); } else { for (int i = 0; i < num_feature_; ++i) { multi_bin_data_[i]->ReSize(num_data); } } } inline void CopySubrow(const FeatureGroup* full_feature, const data_size_t* used_indices, data_size_t num_used_indices) { if (!is_multi_val_) { bin_data_->CopySubrow(full_feature->bin_data_.get(), used_indices, num_used_indices); } else { for (int i = 0; i < num_feature_; ++i) { multi_bin_data_[i]->CopySubrow(full_feature->multi_bin_data_[i].get(), used_indices, num_used_indices); } } } inline BinIterator* SubFeatureIterator(int sub_feature) { uint32_t most_freq_bin = bin_mappers_[sub_feature]->GetMostFreqBin(); if (!is_multi_val_) { uint32_t min_bin = bin_offsets_[sub_feature]; uint32_t max_bin = bin_offsets_[sub_feature + 1] - 1; return bin_data_->GetIterator(min_bin, max_bin, most_freq_bin); } else { int addi = bin_mappers_[sub_feature]->GetMostFreqBin() == 0 ? 0 : 1; uint32_t min_bin = 1; uint32_t max_bin = bin_mappers_[sub_feature]->num_bin() - 1 + addi; return multi_bin_data_[sub_feature]->GetIterator(min_bin, max_bin, most_freq_bin); } } inline void FinishLoad() { if (is_multi_val_) { OMP_INIT_EX(); #pragma omp parallel for schedule(guided) for (int i = 0; i < num_feature_; ++i) { OMP_LOOP_EX_BEGIN(); multi_bin_data_[i]->FinishLoad(); OMP_LOOP_EX_END(); } OMP_THROW_EX(); } else { bin_data_->FinishLoad(); } } /*! * \brief Returns a BinIterator that can access the entire feature group's raw data. * The RawGet() function of the iterator should be called for best efficiency. * \return A pointer to the BinIterator object */ inline BinIterator* FeatureGroupIterator() { if (is_multi_val_) { return nullptr; } uint32_t min_bin = bin_offsets_[0]; uint32_t max_bin = bin_offsets_.back() - 1; uint32_t most_freq_bin = 0; return bin_data_->GetIterator(min_bin, max_bin, most_freq_bin); } inline data_size_t Split(int sub_feature, const uint32_t* threshold, int num_threshold, bool default_left, const data_size_t* data_indices, data_size_t cnt, data_size_t* lte_indices, data_size_t* gt_indices) const { uint32_t default_bin = bin_mappers_[sub_feature]->GetDefaultBin(); uint32_t most_freq_bin = bin_mappers_[sub_feature]->GetMostFreqBin(); if (!is_multi_val_) { uint32_t min_bin = bin_offsets_[sub_feature]; uint32_t max_bin = bin_offsets_[sub_feature + 1] - 1; if (bin_mappers_[sub_feature]->bin_type() == BinType::NumericalBin) { auto missing_type = bin_mappers_[sub_feature]->missing_type(); if (num_feature_ == 1) { return bin_data_->Split(max_bin, default_bin, most_freq_bin, missing_type, default_left, *threshold, data_indices, cnt, lte_indices, gt_indices); } else { return bin_data_->Split(min_bin, max_bin, default_bin, most_freq_bin, missing_type, default_left, *threshold, data_indices, cnt, lte_indices, gt_indices); } } else { if (num_feature_ == 1) { return bin_data_->SplitCategorical(max_bin, most_freq_bin, threshold, num_threshold, data_indices, cnt, lte_indices, gt_indices); } else { return bin_data_->SplitCategorical( min_bin, max_bin, most_freq_bin, threshold, num_threshold, data_indices, cnt, lte_indices, gt_indices); } } } else { int addi = bin_mappers_[sub_feature]->GetMostFreqBin() == 0 ? 0 : 1; uint32_t max_bin = bin_mappers_[sub_feature]->num_bin() - 1 + addi; if (bin_mappers_[sub_feature]->bin_type() == BinType::NumericalBin) { auto missing_type = bin_mappers_[sub_feature]->missing_type(); return multi_bin_data_[sub_feature]->Split( max_bin, default_bin, most_freq_bin, missing_type, default_left, *threshold, data_indices, cnt, lte_indices, gt_indices); } else { return multi_bin_data_[sub_feature]->SplitCategorical( max_bin, most_freq_bin, threshold, num_threshold, data_indices, cnt, lte_indices, gt_indices); } } } /*! * \brief From bin to feature value * \param bin * \return FeatureGroup value of this bin */ inline double BinToValue(int sub_feature_idx, uint32_t bin) const { return bin_mappers_[sub_feature_idx]->BinToValue(bin); } /*! * \brief Save binary data to file * \param file File want to write */ void SaveBinaryToFile(const VirtualFileWriter* writer) const { writer->Write(&is_multi_val_, sizeof(is_multi_val_)); writer->Write(&is_sparse_, sizeof(is_sparse_)); writer->Write(&num_feature_, sizeof(num_feature_)); for (int i = 0; i < num_feature_; ++i) { bin_mappers_[i]->SaveBinaryToFile(writer); } if (is_multi_val_) { for (int i = 0; i < num_feature_; ++i) { multi_bin_data_[i]->SaveBinaryToFile(writer); } } else { bin_data_->SaveBinaryToFile(writer); } } /*! * \brief Get sizes in byte of this object */ size_t SizesInByte() const { size_t ret = sizeof(is_multi_val_) + sizeof(is_sparse_) + sizeof(num_feature_); for (int i = 0; i < num_feature_; ++i) { ret += bin_mappers_[i]->SizesInByte(); } if (!is_multi_val_) { ret += bin_data_->SizesInByte(); } else { for (int i = 0; i < num_feature_; ++i) { ret += multi_bin_data_[i]->SizesInByte(); } } return ret; } /*! \brief Disable copy */ FeatureGroup& operator=(const FeatureGroup&) = delete; /*! \brief Deep copy */ FeatureGroup(const FeatureGroup& other) { num_feature_ = other.num_feature_; is_multi_val_ = other.is_multi_val_; is_sparse_ = other.is_sparse_; num_total_bin_ = other.num_total_bin_; bin_offsets_ = other.bin_offsets_; bin_mappers_.reserve(other.bin_mappers_.size()); for (auto& bin_mapper : other.bin_mappers_) { bin_mappers_.emplace_back(new BinMapper(*bin_mapper)); } if (!is_multi_val_) { bin_data_.reset(other.bin_data_->Clone()); } else { multi_bin_data_.clear(); for (int i = 0; i < num_feature_; ++i) { multi_bin_data_.emplace_back(other.multi_bin_data_[i]->Clone()); } } } private: void CreateBinData(int num_data, bool is_multi_val, bool force_dense, bool force_sparse) { if (is_multi_val) { multi_bin_data_.clear(); for (int i = 0; i < num_feature_; ++i) { int addi = bin_mappers_[i]->GetMostFreqBin() == 0 ? 0 : 1; if (bin_mappers_[i]->sparse_rate() >= kSparseThreshold) { multi_bin_data_.emplace_back(Bin::CreateSparseBin( num_data, bin_mappers_[i]->num_bin() + addi)); } else { multi_bin_data_.emplace_back( Bin::CreateDenseBin(num_data, bin_mappers_[i]->num_bin() + addi)); } } is_multi_val_ = true; } else { if (force_sparse || (!force_dense && num_feature_ == 1 && bin_mappers_[0]->sparse_rate() >= kSparseThreshold)) { is_sparse_ = true; bin_data_.reset(Bin::CreateSparseBin(num_data, num_total_bin_)); } else { is_sparse_ = false; bin_data_.reset(Bin::CreateDenseBin(num_data, num_total_bin_)); } is_multi_val_ = false; } } /*! \brief Number of features */ int num_feature_; /*! \brief Bin mapper for sub features */ std::vector<std::unique_ptr<BinMapper>> bin_mappers_; /*! \brief Bin offsets for sub features */ std::vector<uint32_t> bin_offsets_; /*! \brief Bin data of this feature */ std::unique_ptr<Bin> bin_data_; std::vector<std::unique_ptr<Bin>> multi_bin_data_; /*! \brief True if this feature is sparse */ bool is_multi_val_; bool is_sparse_; int num_total_bin_; }; } // namespace LightGBM #endif // LIGHTGBM_FEATURE_GROUP_H_
loop-collapse-openmp3x.c
/**************************************************************************** OpenMP-3.0 Example Codes Beta-v1.0 File : loop-collapse-openmp3x.c Date : Aug 2011 Description : Simple example program to demonstrates the use of openmp new feature "collapse" clause. In this example the iteration space over the loop index i and j is collapsed into the single large iteration space which then executed by the team of threads. OpenMP pragma/ Directive used : #pragma omp parallel for - collapse clause Input : None Output : Value of count variable ***************************************************************************/ /* Header file inclusion */ #include <stdio.h> #include<omp.h> /* Defined Parameters */ #define N 100 #define M 100 #define NUMTHREADS 4 /* Function declaration */ void loopParCollapse(); /* main function */ int main(int argc, char *argv[]) { /* Function calling */ loopParCollapse(); } /* End of main() */ /* Description: Parallelize Nested loop using Collapse clause (openmp-3.0). Collapse clause reduce the iterations over i & j in sigle iteration space which is executed by the threads in the team. @param : None */ void loopParCollapse() { int i,j,count=0; double start_time, end_time; /* Set the number of threads */ omp_set_num_threads(NUMTHREADS); /* Create the parallel region and collapse clause reduce the iteration over i & j in single iteration space which is then executed by thread team */ #pragma omp parallel for collapse(2) for ( i = 1 ; i <=N ; i++ ) { for (j=1; j<=M ;j++ ) { #pragma omp atomic /* Mutual exclusion point */ count++; } } /* end of the parallel region */ printf(" Value of count( Collapse Clause) : %d \n", count); } /* End of the function */
coloring_jones_v2.h
#include "gms/third_party/gapbs/benchmark.h" #include "gms/third_party/gapbs/builder.h" #include "gms/third_party/gapbs/command_line.h" #include "gms/third_party/gapbs/graph.h" #include "coloring_common.h" #include <vector> #include <unordered_map> #include <unordered_set> #include <memory> #include <random> // We need to know the cache line size at compile-time for alignas, but we can only determine it at runtime. // TODO: What do? Just leave this hack with CACHE_LINE_SIZE = 64? #define CACHE_LINE_SIZE 64 // Communication between threads using a lock-free queue namespace GMS::Coloring::JonesV2 { class alignas(CACHE_LINE_SIZE) ready_queue { private: NodeId* data; // shared size_t write_pos; // shared size_t read_pos; // private public: ready_queue() : data(nullptr), write_pos(0), read_pos(0) {} ~ready_queue() { if (write_pos != read_pos) { std::cout << "Tried to destroy a non-empty ready_queue" << std::endl; exit(EXIT_FAILURE); } delete[] data; } void init(size_t max_size) { if (data != nullptr) { std::cout << "Tried to re-init a ready_queue" << std::endl; exit(EXIT_FAILURE); } data = new NodeId[max_size](); // Zero-initialized } // Used by multiple threads to notify a thread that one of its vertices is now ready void enqueue(NodeId ready_vertex) { size_t my_write_pos; #pragma omp atomic capture my_write_pos = write_pos++; #pragma omp atomic write data[my_write_pos] = (ready_vertex + 1); } // Only called by the owner thread, spins until a ready vertex is available void dequeue(std::vector<NodeId> &color_queue) { // Wait until at least one ready vertex is available size_t cur_write_pos; do { #pragma omp atomic read cur_write_pos = write_pos; } while (cur_write_pos == read_pos); int64_t ready_vertex; for (; read_pos < cur_write_pos; ++read_pos) { // Wait until a thread writes to data[read_pos], i.e. data[read_pos] != 0 do { #pragma omp atomic read ready_vertex = data[read_pos]; } while (ready_vertex == 0); color_queue.push_back(ready_vertex - 1); } } }; // Book-keeping of which vertices to notify if a vertex gets colored class node_queue { private: const NodeId* beginPtr; const NodeId* endPtr; public: node_queue(const NodeId* begin, const NodeId* end) : beginPtr(begin), endPtr(end) {} const NodeId* begin() const { return beginPtr; } const NodeId* end() const { return endPtr; } }; class node_queue_list { private: int64_t part_start; size_t cur_node_idx; size_t cur_offset_idx; NodeId *nodes; size_t *node_offsets; public: node_queue_list(int64_t part_start, int64_t part_end, size_t max_size) : part_start(part_start), cur_node_idx(0), cur_offset_idx(1), nodes(new NodeId[max_size]), node_offsets(new size_t[part_end - part_start + 1]) { node_offsets[0] = 0; } ~node_queue_list() { delete[] nodes; delete[] node_offsets; } void insert(NodeId node) { nodes[cur_node_idx++] = node; } void next_node() { node_offsets[cur_offset_idx++] = cur_node_idx; } const node_queue operator[](const NodeId v) const { const int64_t v_idx = v - part_start; size_t begin = node_offsets[v_idx]; size_t end = node_offsets[v_idx+1]; return node_queue(&nodes[begin], &nodes[end]); } }; void notify_threads(const node_queue send_queue, std::vector<int32_t> &n_wait, std::vector<ready_queue> &ready_queues, const int64_t part_max_size) { for (NodeId w : send_queue) { int32_t num_waiting; #pragma omp atomic capture num_waiting = --n_wait[w]; if (num_waiting == 0) { // w now isn't waiting for any other vertices anymore, so tell the w's thread that w is ready size_t w_thread_id = w / part_max_size; ready_queues[w_thread_id].enqueue(w); } } } // Sequential coloring algorithms template <class CGraph> int32_t pick_lowest_consistent_color(const CGraph& g, std::vector<int32_t> &coloring, const NodeId v, std::vector<bool> &color_palette) { // If all deg(v) neighbors have distinct colors 1..deg(v), then deg(v) + 1 will be a consistent color // Else, there will be a color i with 1 <= i <= deg(v) which was not selected for any neighbor const int32_t deg = g.out_degree(v); for (NodeId w : g.out_neigh(v)) { int32_t w_color; #pragma omp atomic read w_color = coloring[w]; if (w_color <= deg) { color_palette[w_color] = true; } } int32_t color; for (color = 1; color <= deg; ++color) { if (!color_palette[color]) break; } #pragma omp atomic write coloring[v] = color; // Reset the color pallette to false - only [0..deg] were used std::fill(color_palette.begin(), color_palette.begin() + (deg + 1), false); return color; } template <class CGraph> using seq_coloring_func = int32_t (*)(const CGraph& g, std::vector<int32_t> &coloring, const std::vector<NodeId> &color_queue, std::vector<int32_t> &n_wait, const node_queue_list &send_queues, std::vector<ready_queue> &ready_queues, const int64_t part_max_size, std::vector<bool> &color_palette); template <class CGraph> int32_t sequential_coloring_unordered(const CGraph& g, std::vector<int32_t> &coloring, const std::vector<NodeId> &color_queue, std::vector<int32_t> &n_wait, const node_queue_list &send_queues, std::vector<ready_queue> &ready_queues, const int64_t part_max_size, std::vector<bool> &color_palette) { int32_t max_color = 0; for (NodeId v : color_queue) { int32_t color = pick_lowest_consistent_color(g, coloring, v, color_palette); max_color = std::max(max_color, color); notify_threads(send_queues[v], n_wait, ready_queues, part_max_size); } return max_color; } template <class CGraph> int32_t sequential_coloring_ldo(const CGraph& g, std::vector<int32_t> &coloring, const std::vector<NodeId> &color_queue, std::vector<int32_t> &n_wait, const node_queue_list &send_queues, std::vector<ready_queue> &ready_queues, const int64_t part_max_size, std::vector<bool> &color_palette) { struct idx_and_degree { size_t index; int64_t degree; idx_and_degree(size_t i, int64_t d) : index(i), degree(d) {} bool operator <(const idx_and_degree &other) { return degree > other.degree; // Sort descending by degree } }; int32_t max_color = 0; size_t n_to_color = color_queue.size(); std::vector<idx_and_degree> work_list; work_list.reserve(n_to_color); for (size_t i = 0; i < n_to_color; ++i) { NodeId v = color_queue[i]; work_list.emplace_back(i, g.out_degree(v)); } std::sort(work_list.begin(), work_list.end()); for (idx_and_degree item : work_list) { NodeId v = color_queue[item.index]; int32_t color = pick_lowest_consistent_color(g, coloring, v, color_palette); max_color = std::max(max_color, color); notify_threads(send_queues[v], n_wait, ready_queues, part_max_size); } return max_color; } template <class CGraph> int32_t sequential_coloring_ido(const CGraph& g, std::vector<int32_t> &coloring, const std::vector<NodeId> &color_queue, std::vector<int32_t> &n_wait, const node_queue_list &send_queues, std::vector<ready_queue> &ready_queues, const int64_t part_max_size, std::vector<bool> &color_palette) { int32_t max_color = 0; size_t n_to_color = color_queue.size(); std::vector<int32_t> incidence_degree(n_to_color); std::vector<size_t> max_deg_heap(n_to_color); std::unordered_map<NodeId, size_t> rev_map; for (size_t i = 0; i < n_to_color; ++i) { NodeId v = color_queue[i]; int32_t num_colored_neighbors = 0; for (NodeId w : g.out_neigh(v)) { int32_t w_color; #pragma omp atomic read w_color = coloring[w]; if (w_color > 0) { ++num_colored_neighbors; } } incidence_degree[i] = num_colored_neighbors; rev_map[v] = i; max_deg_heap[i] = i; } bool modified = true; for (size_t i = 0; i < n_to_color; ++i) { if (modified) { std::make_heap(max_deg_heap.begin(), max_deg_heap.end(), [&](const size_t a, const size_t b) -> bool { return incidence_degree[a] < incidence_degree[b]; }); modified = false; } NodeId v = color_queue[max_deg_heap[0]]; std::pop_heap(max_deg_heap.begin(), max_deg_heap.end()); max_deg_heap.pop_back(); int32_t color = pick_lowest_consistent_color(g, coloring, v, color_palette); max_color = std::max(max_color, color); notify_threads(send_queues[v], n_wait, ready_queues, part_max_size); for (NodeId w : g.out_neigh(v)) { auto map_it = rev_map.find(w); if (map_it == rev_map.end()) continue; NodeId v = map_it->second; ++incidence_degree[v]; modified = true; } } return max_color; } template <class CGraph> int32_t sequential_coloring_sdo(const CGraph& g, std::vector<int32_t> &coloring, const std::vector<NodeId> &color_queue, std::vector<int32_t> &n_wait, const node_queue_list &send_queues, std::vector<ready_queue> &ready_queues, const int64_t part_max_size, std::vector<bool> &color_palette) { // Sort the vertices by decreasing order of degrees struct idx_and_degree { size_t index1; int64_t degree; idx_and_degree(size_t idx, int64_t d) : index1(idx), degree(d) {} bool operator <(const idx_and_degree &other) { return degree > other.degree; // Sort descending by degree } }; int32_t max_color = 0; size_t n_to_color = color_queue.size(); if (n_to_color == 0) return 0; std::vector<idx_and_degree> degree_ordering; degree_ordering.reserve(n_to_color); for (size_t i = 0; i < n_to_color; ++i) { NodeId v = color_queue[i]; degree_ordering.emplace_back(i, g.out_degree(v)); } std::sort(degree_ordering.begin(), degree_ordering.end()); // Color a vertex of maximal degree NodeId v = color_queue[degree_ordering[0].index1]; int32_t color = pick_lowest_consistent_color(g, coloring, v, color_palette); max_color = std::max(max_color, color); notify_threads(send_queues[v], n_wait, ready_queues, part_max_size); // Order the rest of the vertices into a heap by decreasing saturation degree struct idx_and_saturation { size_t index2; int64_t saturation; idx_and_saturation(size_t idx) : index2(idx), saturation(0) {} bool operator<(const idx_and_saturation &other) { return saturation < other.saturation; } void update_saturation(const CGraph &g, const std::vector<int32_t> &coloring, const std::vector<NodeId> &color_queue) { std::unordered_set<int32_t> distinct_colors; NodeId w = color_queue[index2]; for (NodeId o : g.out_neigh(w)) { int32_t v_color = coloring[o]; if (v_color > 0) distinct_colors.insert(v_color); } if (saturation < distinct_colors.size()) saturation = distinct_colors.size(); } }; using idx_and_sat_ptr = std::shared_ptr<idx_and_saturation>; std::vector<idx_and_sat_ptr> saturation_ordering; saturation_ordering.reserve(n_to_color - 1); // Omit the vertex already colored std::unordered_map<NodeId, idx_and_sat_ptr> rev_map; for (size_t i = 0; i < n_to_color; ++i) { NodeId w = color_queue[i]; if (w != v) { idx_and_sat_ptr idx {new idx_and_saturation(i)}; saturation_ordering.emplace_back(idx); rev_map[w] = idx; } } for (NodeId w : g.out_neigh(v)) { idx_and_sat_ptr idx = rev_map[w]; if (idx) idx->update_saturation(g, coloring, color_queue); } // Color the rest of the verticies based on their saturation degrees bool modified = true; for (size_t i = 0; i < n_to_color - 1 && !saturation_ordering.empty(); ++i) { if (modified) { std::make_heap(saturation_ordering.begin(), saturation_ordering.end(), [](const idx_and_sat_ptr &a, const idx_and_sat_ptr &b) { return a->saturation < b->saturation; }); modified = false; } // Pick vertex with the largest saturation degree value NodeId w = color_queue[saturation_ordering[0]->index2]; rev_map[w].reset(); rev_map[w] = nullptr; // Remove the vertex from the heap std::pop_heap(saturation_ordering.begin(), saturation_ordering.end()); saturation_ordering.pop_back(); // Pick a color for the vertex int32_t color = pick_lowest_consistent_color(g, coloring, w, color_palette); max_color = std::max(max_color, color); // Possibly modify max_color if new is higher notify_threads(send_queues[w], n_wait, ready_queues, part_max_size); // For each neighbour, update saturation for (NodeId o : g.out_neigh(w)) { idx_and_sat_ptr it = rev_map[o]; if (it) { int64_t old_saturation = it->saturation; it->update_saturation(g, coloring, color_queue); if (it->saturation > old_saturation) modified = true; } } } return max_color; } // Graph partitioning uint64_t rho(const uint32_t seed, uint64_t v) { const uint64_t rnd_prime_64 = 0xE57EACE69B044FE7ULL; v = (v * rnd_prime_64) + seed; v = (v >> 17) | (v << 47); v = (v + seed) * rnd_prime_64; return v; } template <class CGraph> size_t partition_graph(const CGraph &g, const int64_t part_start, const int64_t part_end, const uint32_t rho_seed, std::vector<int32_t> &n_wait, node_queue_list &send_queue, std::vector<NodeId> &color_queue, std::vector<NodeId> &local_vertices) { int64_t max_degree = 0; for (NodeId v = part_start; v < part_end; ++v) { bool is_local = true; // A vertex is local iff none of its neighbors are part of another partition int32_t n_wait_v = 0; uint64_t rho_v = rho(rho_seed, (uint64_t) v); for (NodeId w : g.out_neigh(v)) { if (part_start <= w && w < part_end) { continue; // Skip local neighbors } is_local = false; // Has a shared edge, no longer a local vertex uint64_t rho_w = rho(rho_seed, (uint64_t) w); if (rho_w > rho_v) { ++n_wait_v; } else { send_queue.insert(w); } } if (is_local) { local_vertices.push_back(v); } else if (n_wait_v == 0) { // Shared vertex doesn't have to wait for any other vertices to be colored, so we can immediately color it color_queue.push_back(v); } max_degree = std::max(max_degree, g.out_degree(v)); n_wait[v] = n_wait_v; send_queue.next_node(); } return max_degree; } // Actual parallel algorithm template <class CGraph> int32_t graph_coloring_jones(const CGraph &g, std::vector<int32_t> &coloring) { // DetailTimer timer; const seq_coloring_func<CGraph> seq_color = &sequential_coloring_sdo; const int64_t n = g.num_nodes(); std::vector<ready_queue> ready_queues(omp_get_max_threads()); std::vector<int32_t> n_wait(n); int32_t num_colors = 0; size_t shared_vertices_count = 0; std::random_device rd; const uint32_t rho_seed = rd(); std::cout << omp_get_max_threads() << " " << omp_get_num_threads() << std::endl; // timer.endPhase("init"); #pragma omp parallel shared(g, coloring, ready_queues, n_wait) reduction(max: num_colors) reduction(+: shared_vertices_count) { const int tcount = omp_get_num_threads(); const int tid = omp_get_thread_num(); const int64_t part_max_size = (n + (tcount - 1)) / tcount; // part_max_size = ceil(n / tcount); const int64_t part_start = std::min(n, tid * part_max_size); const int64_t part_end = std::min(n, part_start + part_max_size); const int64_t part_size = part_end - part_start; std::vector<NodeId> color_queue; color_queue.reserve(part_size); std::vector<NodeId> local_vertices; local_vertices.reserve(part_size); const size_t num_partition_neighbors = std::distance(g.out_neigh(part_start).begin(), g.out_neigh(part_end - 1).end()); node_queue_list send_queues(part_start, part_end, num_partition_neighbors); size_t max_degree = partition_graph(g, part_start, part_end, rho_seed, n_wait, send_queues, color_queue, local_vertices); shared_vertices_count = part_size - local_vertices.size(); std::vector<bool> color_palette(max_degree + 1, false); ready_queue &in_queue = ready_queues[tid]; in_queue.init(shared_vertices_count); #pragma omp barrier // All n_wait and ready_queues are now initialized #pragma omp master { // timer.endPhase("par_partition"); } num_colors = seq_color(g, coloring, color_queue, n_wait, send_queues, ready_queues, part_max_size, color_palette); size_t n_colored = color_queue.size(); color_queue.clear(); while (n_colored < shared_vertices_count) { in_queue.dequeue(color_queue); int32_t cur_max_color = seq_color(g, coloring, color_queue, n_wait, send_queues, ready_queues, part_max_size, color_palette); num_colors = std::max(num_colors, cur_max_color); n_colored += color_queue.size(); color_queue.clear(); } // Color local vertices last int32_t cur_max_color = seq_color(g, coloring, local_vertices, n_wait, send_queues, ready_queues, part_max_size, color_palette); num_colors = std::max(num_colors, cur_max_color); } // timer.endPhase("par_coloring"); // timer.print(); double local_ratio = 1.0 - (double) shared_vertices_count / n; std::cout << "Local vertex ratio: " << local_ratio << std::endl; return num_colors; } } // namespace GMS::Coloring::JonesV2
omp_report_mask.c
/* Routine reports OpenMP process affinity information. Get thread number and cpus (cpu_ids) Create static space (proc_mask) to hold all masks (done in a single region) Determine the mask for each thread (insert it in proc_mask) print mask header (one thread in single region) print mask (one thread in single region) free spaces return */ #include <stdio.h> #include <omp.h> #include <unistd.h> #include <stdlib.h> #include <ctype.h> #include "opts.h" void print_mask(int hd_prnt, char* name, int multi_node, int rank, int thrd, int ncpus, int nranks, int nthrds, int *proc_mask, int tpc, char l); int boundto(int* nelements_set, int* int_mask); int get_threads_per_node(); void omp_report_mask(){ int nthrds, thrd; //Thread info int ncpus, nel_set; static int ** proc_mask; int i,j, ierr; char * dummy; char l,p; int tpc; // hwthreads/core Maskopts opts; // get print_speed fast or slow (f|c); listing cores or SMT (c|s) p = opts.get_p(); l = opts.get_l(); tpc=get_threads_per_node(); thrd = omp_get_thread_num(); nthrds = omp_get_num_threads(); ncpus = (int) sysconf(_SC_NPROCESSORS_ONLN); if(omp_get_num_procs() != ncpus){ printf("ERROR: ncpus_by_omp=%d, ncpus_sched=%d\n",omp_get_num_procs(),ncpus); exit(1); } #pragma omp single { proc_mask = (int **) malloc(sizeof(int*)*nthrds); for(i=0;i<nthrds;i++) proc_mask[i] = (int * ) malloc(sizeof(int)*ncpus ); for(i=0;i<nthrds;i++) for(j=0;j<ncpus;j++) proc_mask[i][j] =0; } ierr = boundto(&nel_set,proc_mask[thrd]); #pragma omp barrier #pragma omp single { print_mask(1, dummy, 0, 0,0, ncpus, 1,nthrds, proc_mask[thrd],tpc,l); //print header for(thrd=0;thrd<nthrds;thrd++){ print_mask(0, dummy, 0, 0,thrd, ncpus, 1,nthrds, proc_mask[thrd],tpc,l); if(p == 's') ierr=usleep(300000); } if(nthrds>50) print_mask(2, dummy, 0, 0,0, ncpus, 1,nthrds, proc_mask[thrd],tpc,l); //print header for(i=0;i<nthrds;i++) free( proc_mask[i]); free( proc_mask); } } void omp_report_mask_(){ omp_report_mask(); }
CGOpenMPRuntime.h
//===----- CGOpenMPRuntime.h - Interface to OpenMP Runtimes -----*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This provides a class for OpenMP runtime code generation. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIME_H #define LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIME_H #include "CGValue.h" #include "clang/AST/DeclOpenMP.h" #include "clang/AST/GlobalDecl.h" #include "clang/AST/Type.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/SourceLocation.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/PointerIntPair.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/StringMap.h" #include "llvm/ADT/StringSet.h" #include "llvm/Frontend/OpenMP/OMPConstants.h" #include "llvm/Frontend/OpenMP/OMPIRBuilder.h" #include "llvm/IR/Function.h" #include "llvm/IR/ValueHandle.h" #include "llvm/Support/AtomicOrdering.h" namespace llvm { class ArrayType; class Constant; class FunctionType; class GlobalVariable; class StructType; class Type; class Value; class OpenMPIRBuilder; } // namespace llvm namespace clang { class Expr; class OMPDependClause; class OMPExecutableDirective; class OMPLoopDirective; class VarDecl; class OMPDeclareReductionDecl; class IdentifierInfo; namespace CodeGen { class Address; class CodeGenFunction; class CodeGenModule; /// A basic class for pre|post-action for advanced codegen sequence for OpenMP /// region. class PrePostActionTy { public: explicit PrePostActionTy() {} virtual void Enter(CodeGenFunction &CGF) {} virtual void Exit(CodeGenFunction &CGF) {} virtual ~PrePostActionTy() {} }; /// Class provides a way to call simple version of codegen for OpenMP region, or /// an advanced with possible pre|post-actions in codegen. class RegionCodeGenTy final { intptr_t CodeGen; typedef void (*CodeGenTy)(intptr_t, CodeGenFunction &, PrePostActionTy &); CodeGenTy Callback; mutable PrePostActionTy *PrePostAction; RegionCodeGenTy() = delete; template <typename Callable> static void CallbackFn(intptr_t CodeGen, CodeGenFunction &CGF, PrePostActionTy &Action) { return (*reinterpret_cast<Callable *>(CodeGen))(CGF, Action); } public: template <typename Callable> RegionCodeGenTy( Callable &&CodeGen, std::enable_if_t<!std::is_same<std::remove_reference_t<Callable>, RegionCodeGenTy>::value> * = nullptr) : CodeGen(reinterpret_cast<intptr_t>(&CodeGen)), Callback(CallbackFn<std::remove_reference_t<Callable>>), PrePostAction(nullptr) {} void setAction(PrePostActionTy &Action) const { PrePostAction = &Action; } void operator()(CodeGenFunction &CGF) const; }; struct OMPTaskDataTy final { SmallVector<const Expr *, 4> PrivateVars; SmallVector<const Expr *, 4> PrivateCopies; SmallVector<const Expr *, 4> FirstprivateVars; SmallVector<const Expr *, 4> FirstprivateCopies; SmallVector<const Expr *, 4> FirstprivateInits; SmallVector<const Expr *, 4> LastprivateVars; SmallVector<const Expr *, 4> LastprivateCopies; SmallVector<const Expr *, 4> ReductionVars; SmallVector<const Expr *, 4> ReductionOrigs; SmallVector<const Expr *, 4> ReductionCopies; SmallVector<const Expr *, 4> ReductionOps; SmallVector<CanonicalDeclPtr<const VarDecl>, 4> PrivateLocals; struct DependData { OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown; const Expr *IteratorExpr = nullptr; SmallVector<const Expr *, 4> DepExprs; explicit DependData() = default; DependData(OpenMPDependClauseKind DepKind, const Expr *IteratorExpr) : DepKind(DepKind), IteratorExpr(IteratorExpr) {} }; SmallVector<DependData, 4> Dependences; llvm::PointerIntPair<llvm::Value *, 1, bool> Final; llvm::PointerIntPair<llvm::Value *, 1, bool> Schedule; llvm::PointerIntPair<llvm::Value *, 1, bool> Priority; llvm::Value *Reductions = nullptr; unsigned NumberOfParts = 0; bool Tied = true; bool Nogroup = false; bool IsReductionWithTaskMod = false; bool IsWorksharingReduction = false; }; /// Class intended to support codegen of all kind of the reduction clauses. class ReductionCodeGen { private: /// Data required for codegen of reduction clauses. struct ReductionData { /// Reference to the item shared between tasks to reduce into. const Expr *Shared = nullptr; /// Reference to the original item. const Expr *Ref = nullptr; /// Helper expression for generation of private copy. const Expr *Private = nullptr; /// Helper expression for generation reduction operation. const Expr *ReductionOp = nullptr; ReductionData(const Expr *Shared, const Expr *Ref, const Expr *Private, const Expr *ReductionOp) : Shared(Shared), Ref(Ref), Private(Private), ReductionOp(ReductionOp) { } }; /// List of reduction-based clauses. SmallVector<ReductionData, 4> ClausesData; /// List of addresses of shared variables/expressions. SmallVector<std::pair<LValue, LValue>, 4> SharedAddresses; /// List of addresses of original variables/expressions. SmallVector<std::pair<LValue, LValue>, 4> OrigAddresses; /// Sizes of the reduction items in chars. SmallVector<std::pair<llvm::Value *, llvm::Value *>, 4> Sizes; /// Base declarations for the reduction items. SmallVector<const VarDecl *, 4> BaseDecls; /// Emits lvalue for shared expression. LValue emitSharedLValue(CodeGenFunction &CGF, const Expr *E); /// Emits upper bound for shared expression (if array section). LValue emitSharedLValueUB(CodeGenFunction &CGF, const Expr *E); /// Performs aggregate initialization. /// \param N Number of reduction item in the common list. /// \param PrivateAddr Address of the corresponding private item. /// \param SharedLVal Address of the original shared variable. /// \param DRD Declare reduction construct used for reduction item. void emitAggregateInitialization(CodeGenFunction &CGF, unsigned N, Address PrivateAddr, LValue SharedLVal, const OMPDeclareReductionDecl *DRD); public: ReductionCodeGen(ArrayRef<const Expr *> Shareds, ArrayRef<const Expr *> Origs, ArrayRef<const Expr *> Privates, ArrayRef<const Expr *> ReductionOps); /// Emits lvalue for the shared and original reduction item. /// \param N Number of the reduction item. void emitSharedOrigLValue(CodeGenFunction &CGF, unsigned N); /// Emits the code for the variable-modified type, if required. /// \param N Number of the reduction item. void emitAggregateType(CodeGenFunction &CGF, unsigned N); /// Emits the code for the variable-modified type, if required. /// \param N Number of the reduction item. /// \param Size Size of the type in chars. void emitAggregateType(CodeGenFunction &CGF, unsigned N, llvm::Value *Size); /// Performs initialization of the private copy for the reduction item. /// \param N Number of the reduction item. /// \param PrivateAddr Address of the corresponding private item. /// \param DefaultInit Default initialization sequence that should be /// performed if no reduction specific initialization is found. /// \param SharedLVal Address of the original shared variable. void emitInitialization(CodeGenFunction &CGF, unsigned N, Address PrivateAddr, LValue SharedLVal, llvm::function_ref<bool(CodeGenFunction &)> DefaultInit); /// Returns true if the private copy requires cleanups. bool needCleanups(unsigned N); /// Emits cleanup code for the reduction item. /// \param N Number of the reduction item. /// \param PrivateAddr Address of the corresponding private item. void emitCleanups(CodeGenFunction &CGF, unsigned N, Address PrivateAddr); /// Adjusts \p PrivatedAddr for using instead of the original variable /// address in normal operations. /// \param N Number of the reduction item. /// \param PrivateAddr Address of the corresponding private item. Address adjustPrivateAddress(CodeGenFunction &CGF, unsigned N, Address PrivateAddr); /// Returns LValue for the reduction item. LValue getSharedLValue(unsigned N) const { return SharedAddresses[N].first; } /// Returns LValue for the original reduction item. LValue getOrigLValue(unsigned N) const { return OrigAddresses[N].first; } /// Returns the size of the reduction item (in chars and total number of /// elements in the item), or nullptr, if the size is a constant. std::pair<llvm::Value *, llvm::Value *> getSizes(unsigned N) const { return Sizes[N]; } /// Returns the base declaration of the reduction item. const VarDecl *getBaseDecl(unsigned N) const { return BaseDecls[N]; } /// Returns the base declaration of the reduction item. const Expr *getRefExpr(unsigned N) const { return ClausesData[N].Ref; } /// Returns true if the initialization of the reduction item uses initializer /// from declare reduction construct. bool usesReductionInitializer(unsigned N) const; }; class CGOpenMPRuntime { public: /// Allows to disable automatic handling of functions used in target regions /// as those marked as `omp declare target`. class DisableAutoDeclareTargetRAII { CodeGenModule &CGM; bool SavedShouldMarkAsGlobal; public: DisableAutoDeclareTargetRAII(CodeGenModule &CGM); ~DisableAutoDeclareTargetRAII(); }; /// Manages list of nontemporal decls for the specified directive. class NontemporalDeclsRAII { CodeGenModule &CGM; const bool NeedToPush; public: NontemporalDeclsRAII(CodeGenModule &CGM, const OMPLoopDirective &S); ~NontemporalDeclsRAII(); }; /// Manages list of nontemporal decls for the specified directive. class UntiedTaskLocalDeclsRAII { CodeGenModule &CGM; const bool NeedToPush; public: UntiedTaskLocalDeclsRAII( CodeGenFunction &CGF, const llvm::MapVector<CanonicalDeclPtr<const VarDecl>, std::pair<Address, Address>> &LocalVars); ~UntiedTaskLocalDeclsRAII(); }; /// Maps the expression for the lastprivate variable to the global copy used /// to store new value because original variables are not mapped in inner /// parallel regions. Only private copies are captured but we need also to /// store private copy in shared address. /// Also, stores the expression for the private loop counter and it /// threaprivate name. struct LastprivateConditionalData { llvm::MapVector<CanonicalDeclPtr<const Decl>, SmallString<16>> DeclToUniqueName; LValue IVLVal; llvm::Function *Fn = nullptr; bool Disabled = false; }; /// Manages list of lastprivate conditional decls for the specified directive. class LastprivateConditionalRAII { enum class ActionToDo { DoNotPush, PushAsLastprivateConditional, DisableLastprivateConditional, }; CodeGenModule &CGM; ActionToDo Action = ActionToDo::DoNotPush; /// Check and try to disable analysis of inner regions for changes in /// lastprivate conditional. void tryToDisableInnerAnalysis(const OMPExecutableDirective &S, llvm::DenseSet<CanonicalDeclPtr<const Decl>> &NeedToAddForLPCsAsDisabled) const; LastprivateConditionalRAII(CodeGenFunction &CGF, const OMPExecutableDirective &S); public: explicit LastprivateConditionalRAII(CodeGenFunction &CGF, const OMPExecutableDirective &S, LValue IVLVal); static LastprivateConditionalRAII disable(CodeGenFunction &CGF, const OMPExecutableDirective &S); ~LastprivateConditionalRAII(); }; llvm::OpenMPIRBuilder &getOMPBuilder() { return OMPBuilder; } protected: CodeGenModule &CGM; StringRef FirstSeparator, Separator; /// An OpenMP-IR-Builder instance. llvm::OpenMPIRBuilder OMPBuilder; /// Constructor allowing to redefine the name separator for the variables. explicit CGOpenMPRuntime(CodeGenModule &CGM, StringRef FirstSeparator, StringRef Separator); /// Creates offloading entry for the provided entry ID \a ID, /// address \a Addr, size \a Size, and flags \a Flags. virtual void createOffloadEntry(llvm::Constant *ID, llvm::Constant *Addr, uint64_t Size, int32_t Flags, llvm::GlobalValue::LinkageTypes Linkage); /// Helper to emit outlined function for 'target' directive. /// \param D Directive to emit. /// \param ParentName Name of the function that encloses the target region. /// \param OutlinedFn Outlined function value to be defined by this call. /// \param OutlinedFnID Outlined function ID value to be defined by this call. /// \param IsOffloadEntry True if the outlined function is an offload entry. /// \param CodeGen Lambda codegen specific to an accelerator device. /// An outlined function may not be an entry if, e.g. the if clause always /// evaluates to false. virtual void emitTargetOutlinedFunctionHelper(const OMPExecutableDirective &D, StringRef ParentName, llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID, bool IsOffloadEntry, const RegionCodeGenTy &CodeGen); /// Emits object of ident_t type with info for source location. /// \param Flags Flags for OpenMP location. /// llvm::Value *emitUpdateLocation(CodeGenFunction &CGF, SourceLocation Loc, unsigned Flags = 0); /// Emit the number of teams for a target directive. Inspect the num_teams /// clause associated with a teams construct combined or closely nested /// with the target directive. /// /// Emit a team of size one for directives such as 'target parallel' that /// have no associated teams construct. /// /// Otherwise, return nullptr. const Expr *getNumTeamsExprForTargetDirective(CodeGenFunction &CGF, const OMPExecutableDirective &D, int32_t &DefaultVal); llvm::Value *emitNumTeamsForTargetDirective(CodeGenFunction &CGF, const OMPExecutableDirective &D); /// Emit the number of threads for a target directive. Inspect the /// thread_limit clause associated with a teams construct combined or closely /// nested with the target directive. /// /// Emit the num_threads clause for directives such as 'target parallel' that /// have no associated teams construct. /// /// Otherwise, return nullptr. const Expr * getNumThreadsExprForTargetDirective(CodeGenFunction &CGF, const OMPExecutableDirective &D, int32_t &DefaultVal); llvm::Value * emitNumThreadsForTargetDirective(CodeGenFunction &CGF, const OMPExecutableDirective &D); /// Returns pointer to ident_t type. llvm::Type *getIdentTyPointerTy(); /// Gets thread id value for the current thread. /// llvm::Value *getThreadID(CodeGenFunction &CGF, SourceLocation Loc); /// Get the function name of an outlined region. // The name can be customized depending on the target. // virtual StringRef getOutlinedHelperName() const { return ".omp_outlined."; } /// Emits \p Callee function call with arguments \p Args with location \p Loc. void emitCall(CodeGenFunction &CGF, SourceLocation Loc, llvm::FunctionCallee Callee, ArrayRef<llvm::Value *> Args = llvm::None) const; /// Emits address of the word in a memory where current thread id is /// stored. virtual Address emitThreadIDAddress(CodeGenFunction &CGF, SourceLocation Loc); void setLocThreadIdInsertPt(CodeGenFunction &CGF, bool AtCurrentPoint = false); void clearLocThreadIdInsertPt(CodeGenFunction &CGF); /// Check if the default location must be constant. /// Default is false to support OMPT/OMPD. virtual bool isDefaultLocationConstant() const { return false; } /// Returns additional flags that can be stored in reserved_2 field of the /// default location. virtual unsigned getDefaultLocationReserved2Flags() const { return 0; } /// Returns default flags for the barriers depending on the directive, for /// which this barier is going to be emitted. static unsigned getDefaultFlagsForBarriers(OpenMPDirectiveKind Kind); /// Get the LLVM type for the critical name. llvm::ArrayType *getKmpCriticalNameTy() const {return KmpCriticalNameTy;} /// Returns corresponding lock object for the specified critical region /// name. If the lock object does not exist it is created, otherwise the /// reference to the existing copy is returned. /// \param CriticalName Name of the critical region. /// llvm::Value *getCriticalRegionLock(StringRef CriticalName); private: /// Map for SourceLocation and OpenMP runtime library debug locations. typedef llvm::DenseMap<SourceLocation, llvm::Value *> OpenMPDebugLocMapTy; OpenMPDebugLocMapTy OpenMPDebugLocMap; /// The type for a microtask which gets passed to __kmpc_fork_call(). /// Original representation is: /// typedef void (kmpc_micro)(kmp_int32 global_tid, kmp_int32 bound_tid,...); llvm::FunctionType *Kmpc_MicroTy = nullptr; /// Stores debug location and ThreadID for the function. struct DebugLocThreadIdTy { llvm::Value *DebugLoc; llvm::Value *ThreadID; /// Insert point for the service instructions. llvm::AssertingVH<llvm::Instruction> ServiceInsertPt = nullptr; }; /// Map of local debug location, ThreadId and functions. typedef llvm::DenseMap<llvm::Function *, DebugLocThreadIdTy> OpenMPLocThreadIDMapTy; OpenMPLocThreadIDMapTy OpenMPLocThreadIDMap; /// Map of UDRs and corresponding combiner/initializer. typedef llvm::DenseMap<const OMPDeclareReductionDecl *, std::pair<llvm::Function *, llvm::Function *>> UDRMapTy; UDRMapTy UDRMap; /// Map of functions and locally defined UDRs. typedef llvm::DenseMap<llvm::Function *, SmallVector<const OMPDeclareReductionDecl *, 4>> FunctionUDRMapTy; FunctionUDRMapTy FunctionUDRMap; /// Map from the user-defined mapper declaration to its corresponding /// functions. llvm::DenseMap<const OMPDeclareMapperDecl *, llvm::Function *> UDMMap; /// Map of functions and their local user-defined mappers. using FunctionUDMMapTy = llvm::DenseMap<llvm::Function *, SmallVector<const OMPDeclareMapperDecl *, 4>>; FunctionUDMMapTy FunctionUDMMap; /// Maps local variables marked as lastprivate conditional to their internal /// types. llvm::DenseMap<llvm::Function *, llvm::DenseMap<CanonicalDeclPtr<const Decl>, std::tuple<QualType, const FieldDecl *, const FieldDecl *, LValue>>> LastprivateConditionalToTypes; /// Maps function to the position of the untied task locals stack. llvm::DenseMap<llvm::Function *, unsigned> FunctionToUntiedTaskStackMap; /// Type kmp_critical_name, originally defined as typedef kmp_int32 /// kmp_critical_name[8]; llvm::ArrayType *KmpCriticalNameTy; /// An ordered map of auto-generated variables to their unique names. /// It stores variables with the following names: 1) ".gomp_critical_user_" + /// <critical_section_name> + ".var" for "omp critical" directives; 2) /// <mangled_name_for_global_var> + ".cache." for cache for threadprivate /// variables. llvm::StringMap<llvm::AssertingVH<llvm::Constant>, llvm::BumpPtrAllocator> InternalVars; /// Type typedef kmp_int32 (* kmp_routine_entry_t)(kmp_int32, void *); llvm::Type *KmpRoutineEntryPtrTy = nullptr; QualType KmpRoutineEntryPtrQTy; /// Type typedef struct kmp_task { /// void * shareds; /**< pointer to block of pointers to /// shared vars */ /// kmp_routine_entry_t routine; /**< pointer to routine to call for /// executing task */ /// kmp_int32 part_id; /**< part id for the task */ /// kmp_routine_entry_t destructors; /* pointer to function to invoke /// deconstructors of firstprivate C++ objects */ /// } kmp_task_t; QualType KmpTaskTQTy; /// Saved kmp_task_t for task directive. QualType SavedKmpTaskTQTy; /// Saved kmp_task_t for taskloop-based directive. QualType SavedKmpTaskloopTQTy; /// Type typedef struct kmp_depend_info { /// kmp_intptr_t base_addr; /// size_t len; /// struct { /// bool in:1; /// bool out:1; /// } flags; /// } kmp_depend_info_t; QualType KmpDependInfoTy; /// Type typedef struct kmp_task_affinity_info { /// kmp_intptr_t base_addr; /// size_t len; /// struct { /// bool flag1 : 1; /// bool flag2 : 1; /// kmp_int32 reserved : 30; /// } flags; /// } kmp_task_affinity_info_t; QualType KmpTaskAffinityInfoTy; /// struct kmp_dim { // loop bounds info casted to kmp_int64 /// kmp_int64 lo; // lower /// kmp_int64 up; // upper /// kmp_int64 st; // stride /// }; QualType KmpDimTy; /// Type struct __tgt_offload_entry{ /// void *addr; // Pointer to the offload entry info. /// // (function or global) /// char *name; // Name of the function or global. /// size_t size; // Size of the entry info (0 if it a function). /// int32_t flags; /// int32_t reserved; /// }; QualType TgtOffloadEntryQTy; /// Entity that registers the offloading constants that were emitted so /// far. class OffloadEntriesInfoManagerTy { CodeGenModule &CGM; /// Number of entries registered so far. unsigned OffloadingEntriesNum = 0; public: /// Base class of the entries info. class OffloadEntryInfo { public: /// Kind of a given entry. enum OffloadingEntryInfoKinds : unsigned { /// Entry is a target region. OffloadingEntryInfoTargetRegion = 0, /// Entry is a declare target variable. OffloadingEntryInfoDeviceGlobalVar = 1, /// Invalid entry info. OffloadingEntryInfoInvalid = ~0u }; protected: OffloadEntryInfo() = delete; explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind) : Kind(Kind) {} explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind, unsigned Order, uint32_t Flags) : Flags(Flags), Order(Order), Kind(Kind) {} ~OffloadEntryInfo() = default; public: bool isValid() const { return Order != ~0u; } unsigned getOrder() const { return Order; } OffloadingEntryInfoKinds getKind() const { return Kind; } uint32_t getFlags() const { return Flags; } void setFlags(uint32_t NewFlags) { Flags = NewFlags; } llvm::Constant *getAddress() const { return cast_or_null<llvm::Constant>(Addr); } void setAddress(llvm::Constant *V) { assert(!Addr.pointsToAliveValue() && "Address has been set before!"); Addr = V; } static bool classof(const OffloadEntryInfo *Info) { return true; } private: /// Address of the entity that has to be mapped for offloading. llvm::WeakTrackingVH Addr; /// Flags associated with the device global. uint32_t Flags = 0u; /// Order this entry was emitted. unsigned Order = ~0u; OffloadingEntryInfoKinds Kind = OffloadingEntryInfoInvalid; }; /// Return true if a there are no entries defined. bool empty() const; /// Return number of entries defined so far. unsigned size() const { return OffloadingEntriesNum; } OffloadEntriesInfoManagerTy(CodeGenModule &CGM) : CGM(CGM) {} // // Target region entries related. // /// Kind of the target registry entry. enum OMPTargetRegionEntryKind : uint32_t { /// Mark the entry as target region. OMPTargetRegionEntryTargetRegion = 0x0, /// Mark the entry as a global constructor. OMPTargetRegionEntryCtor = 0x02, /// Mark the entry as a global destructor. OMPTargetRegionEntryDtor = 0x04, }; /// Target region entries info. class OffloadEntryInfoTargetRegion final : public OffloadEntryInfo { /// Address that can be used as the ID of the entry. llvm::Constant *ID = nullptr; public: OffloadEntryInfoTargetRegion() : OffloadEntryInfo(OffloadingEntryInfoTargetRegion) {} explicit OffloadEntryInfoTargetRegion(unsigned Order, llvm::Constant *Addr, llvm::Constant *ID, OMPTargetRegionEntryKind Flags) : OffloadEntryInfo(OffloadingEntryInfoTargetRegion, Order, Flags), ID(ID) { setAddress(Addr); } llvm::Constant *getID() const { return ID; } void setID(llvm::Constant *V) { assert(!ID && "ID has been set before!"); ID = V; } static bool classof(const OffloadEntryInfo *Info) { return Info->getKind() == OffloadingEntryInfoTargetRegion; } }; /// Initialize target region entry. void initializeTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID, StringRef ParentName, unsigned LineNum, unsigned Order); /// Register target region entry. void registerTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID, StringRef ParentName, unsigned LineNum, llvm::Constant *Addr, llvm::Constant *ID, OMPTargetRegionEntryKind Flags); /// Return true if a target region entry with the provided information /// exists. bool hasTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID, StringRef ParentName, unsigned LineNum, bool IgnoreAddressId = false) const; /// brief Applies action \a Action on all registered entries. typedef llvm::function_ref<void(unsigned, unsigned, StringRef, unsigned, const OffloadEntryInfoTargetRegion &)> OffloadTargetRegionEntryInfoActTy; void actOnTargetRegionEntriesInfo( const OffloadTargetRegionEntryInfoActTy &Action); // // Device global variable entries related. // /// Kind of the global variable entry.. enum OMPTargetGlobalVarEntryKind : uint32_t { /// Mark the entry as a to declare target. OMPTargetGlobalVarEntryTo = 0x0, /// Mark the entry as a to declare target link. OMPTargetGlobalVarEntryLink = 0x1, }; /// Device global variable entries info. class OffloadEntryInfoDeviceGlobalVar final : public OffloadEntryInfo { /// Type of the global variable. CharUnits VarSize; llvm::GlobalValue::LinkageTypes Linkage; public: OffloadEntryInfoDeviceGlobalVar() : OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar) {} explicit OffloadEntryInfoDeviceGlobalVar(unsigned Order, OMPTargetGlobalVarEntryKind Flags) : OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar, Order, Flags) {} explicit OffloadEntryInfoDeviceGlobalVar( unsigned Order, llvm::Constant *Addr, CharUnits VarSize, OMPTargetGlobalVarEntryKind Flags, llvm::GlobalValue::LinkageTypes Linkage) : OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar, Order, Flags), VarSize(VarSize), Linkage(Linkage) { setAddress(Addr); } CharUnits getVarSize() const { return VarSize; } void setVarSize(CharUnits Size) { VarSize = Size; } llvm::GlobalValue::LinkageTypes getLinkage() const { return Linkage; } void setLinkage(llvm::GlobalValue::LinkageTypes LT) { Linkage = LT; } static bool classof(const OffloadEntryInfo *Info) { return Info->getKind() == OffloadingEntryInfoDeviceGlobalVar; } }; /// Initialize device global variable entry. void initializeDeviceGlobalVarEntryInfo(StringRef Name, OMPTargetGlobalVarEntryKind Flags, unsigned Order); /// Register device global variable entry. void registerDeviceGlobalVarEntryInfo(StringRef VarName, llvm::Constant *Addr, CharUnits VarSize, OMPTargetGlobalVarEntryKind Flags, llvm::GlobalValue::LinkageTypes Linkage); /// Checks if the variable with the given name has been registered already. bool hasDeviceGlobalVarEntryInfo(StringRef VarName) const { return OffloadEntriesDeviceGlobalVar.count(VarName) > 0; } /// Applies action \a Action on all registered entries. typedef llvm::function_ref<void(StringRef, const OffloadEntryInfoDeviceGlobalVar &)> OffloadDeviceGlobalVarEntryInfoActTy; void actOnDeviceGlobalVarEntriesInfo( const OffloadDeviceGlobalVarEntryInfoActTy &Action); private: // Storage for target region entries kind. The storage is to be indexed by // file ID, device ID, parent function name and line number. typedef llvm::DenseMap<unsigned, OffloadEntryInfoTargetRegion> OffloadEntriesTargetRegionPerLine; typedef llvm::StringMap<OffloadEntriesTargetRegionPerLine> OffloadEntriesTargetRegionPerParentName; typedef llvm::DenseMap<unsigned, OffloadEntriesTargetRegionPerParentName> OffloadEntriesTargetRegionPerFile; typedef llvm::DenseMap<unsigned, OffloadEntriesTargetRegionPerFile> OffloadEntriesTargetRegionPerDevice; typedef OffloadEntriesTargetRegionPerDevice OffloadEntriesTargetRegionTy; OffloadEntriesTargetRegionTy OffloadEntriesTargetRegion; /// Storage for device global variable entries kind. The storage is to be /// indexed by mangled name. typedef llvm::StringMap<OffloadEntryInfoDeviceGlobalVar> OffloadEntriesDeviceGlobalVarTy; OffloadEntriesDeviceGlobalVarTy OffloadEntriesDeviceGlobalVar; }; OffloadEntriesInfoManagerTy OffloadEntriesInfoManager; bool ShouldMarkAsGlobal = true; /// List of the emitted declarations. llvm::DenseSet<CanonicalDeclPtr<const Decl>> AlreadyEmittedTargetDecls; /// List of the global variables with their addresses that should not be /// emitted for the target. llvm::StringMap<llvm::WeakTrackingVH> EmittedNonTargetVariables; /// List of variables that can become declare target implicitly and, thus, /// must be emitted. llvm::SmallDenseSet<const VarDecl *> DeferredGlobalVariables; using NontemporalDeclsSet = llvm::SmallDenseSet<CanonicalDeclPtr<const Decl>>; /// Stack for list of declarations in current context marked as nontemporal. /// The set is the union of all current stack elements. llvm::SmallVector<NontemporalDeclsSet, 4> NontemporalDeclsStack; using UntiedLocalVarsAddressesMap = llvm::MapVector<CanonicalDeclPtr<const VarDecl>, std::pair<Address, Address>>; llvm::SmallVector<UntiedLocalVarsAddressesMap, 4> UntiedLocalVarsStack; /// Stack for list of addresses of declarations in current context marked as /// lastprivate conditional. The set is the union of all current stack /// elements. llvm::SmallVector<LastprivateConditionalData, 4> LastprivateConditionalStack; /// Flag for keeping track of weather a requires unified_shared_memory /// directive is present. bool HasRequiresUnifiedSharedMemory = false; /// Atomic ordering from the omp requires directive. llvm::AtomicOrdering RequiresAtomicOrdering = llvm::AtomicOrdering::Monotonic; /// Flag for keeping track of weather a target region has been emitted. bool HasEmittedTargetRegion = false; /// Flag for keeping track of weather a device routine has been emitted. /// Device routines are specific to the bool HasEmittedDeclareTargetRegion = false; /// Loads all the offload entries information from the host IR /// metadata. void loadOffloadInfoMetadata(); /// Returns __tgt_offload_entry type. QualType getTgtOffloadEntryQTy(); /// Start scanning from statement \a S and and emit all target regions /// found along the way. /// \param S Starting statement. /// \param ParentName Name of the function declaration that is being scanned. void scanForTargetRegionsFunctions(const Stmt *S, StringRef ParentName); /// Build type kmp_routine_entry_t (if not built yet). void emitKmpRoutineEntryT(QualType KmpInt32Ty); /// Returns pointer to kmpc_micro type. llvm::Type *getKmpc_MicroPointerTy(); /// Returns __kmpc_for_static_init_* runtime function for the specified /// size \a IVSize and sign \a IVSigned. Will create a distribute call /// __kmpc_distribute_static_init* if \a IsGPUDistribute is set. llvm::FunctionCallee createForStaticInitFunction(unsigned IVSize, bool IVSigned, bool IsGPUDistribute); /// Returns __kmpc_dispatch_init_* runtime function for the specified /// size \a IVSize and sign \a IVSigned. llvm::FunctionCallee createDispatchInitFunction(unsigned IVSize, bool IVSigned); /// Returns __kmpc_dispatch_next_* runtime function for the specified /// size \a IVSize and sign \a IVSigned. llvm::FunctionCallee createDispatchNextFunction(unsigned IVSize, bool IVSigned); /// Returns __kmpc_dispatch_fini_* runtime function for the specified /// size \a IVSize and sign \a IVSigned. llvm::FunctionCallee createDispatchFiniFunction(unsigned IVSize, bool IVSigned); /// If the specified mangled name is not in the module, create and /// return threadprivate cache object. This object is a pointer's worth of /// storage that's reserved for use by the OpenMP runtime. /// \param VD Threadprivate variable. /// \return Cache variable for the specified threadprivate. llvm::Constant *getOrCreateThreadPrivateCache(const VarDecl *VD); /// Gets (if variable with the given name already exist) or creates /// internal global variable with the specified Name. The created variable has /// linkage CommonLinkage by default and is initialized by null value. /// \param Ty Type of the global variable. If it is exist already the type /// must be the same. /// \param Name Name of the variable. llvm::Constant *getOrCreateInternalVariable(llvm::Type *Ty, const llvm::Twine &Name, unsigned AddressSpace = 0); /// Set of threadprivate variables with the generated initializer. llvm::StringSet<> ThreadPrivateWithDefinition; /// Set of declare target variables with the generated initializer. llvm::StringSet<> DeclareTargetWithDefinition; /// Emits initialization code for the threadprivate variables. /// \param VDAddr Address of the global variable \a VD. /// \param Ctor Pointer to a global init function for \a VD. /// \param CopyCtor Pointer to a global copy function for \a VD. /// \param Dtor Pointer to a global destructor function for \a VD. /// \param Loc Location of threadprivate declaration. void emitThreadPrivateVarInit(CodeGenFunction &CGF, Address VDAddr, llvm::Value *Ctor, llvm::Value *CopyCtor, llvm::Value *Dtor, SourceLocation Loc); /// Emit the array initialization or deletion portion for user-defined mapper /// code generation. void emitUDMapperArrayInitOrDel(CodeGenFunction &MapperCGF, llvm::Value *Handle, llvm::Value *BasePtr, llvm::Value *Ptr, llvm::Value *Size, llvm::Value *MapType, llvm::Value *MapName, CharUnits ElementSize, llvm::BasicBlock *ExitBB, bool IsInit); struct TaskResultTy { llvm::Value *NewTask = nullptr; llvm::Function *TaskEntry = nullptr; llvm::Value *NewTaskNewTaskTTy = nullptr; LValue TDBase; const RecordDecl *KmpTaskTQTyRD = nullptr; llvm::Value *TaskDupFn = nullptr; }; /// Emit task region for the task directive. The task region is emitted in /// several steps: /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the /// function: /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) { /// TaskFunction(gtid, tt->part_id, tt->shareds); /// return 0; /// } /// 2. Copy a list of shared variables to field shareds of the resulting /// structure kmp_task_t returned by the previous call (if any). /// 3. Copy a pointer to destructions function to field destructions of the /// resulting structure kmp_task_t. /// \param D Current task directive. /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32 /// /*part_id*/, captured_struct */*__context*/); /// \param SharedsTy A type which contains references the shared variables. /// \param Shareds Context with the list of shared variables from the \p /// TaskFunction. /// \param Data Additional data for task generation like tiednsee, final /// state, list of privates etc. TaskResultTy emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc, const OMPExecutableDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const OMPTaskDataTy &Data); /// Emit code that pushes the trip count of loops associated with constructs /// 'target teams distribute' and 'teams distribute parallel for'. /// \param SizeEmitter Emits the int64 value for the number of iterations of /// the associated loop. void emitTargetNumIterationsCall( CodeGenFunction &CGF, const OMPExecutableDirective &D, llvm::Value *DeviceID, llvm::function_ref<llvm::Value *(CodeGenFunction &CGF, const OMPLoopDirective &D)> SizeEmitter); /// Emit update for lastprivate conditional data. void emitLastprivateConditionalUpdate(CodeGenFunction &CGF, LValue IVLVal, StringRef UniqueDeclName, LValue LVal, SourceLocation Loc); /// Returns the number of the elements and the address of the depobj /// dependency array. /// \return Number of elements in depobj array and the pointer to the array of /// dependencies. std::pair<llvm::Value *, LValue> getDepobjElements(CodeGenFunction &CGF, LValue DepobjLVal, SourceLocation Loc); public: explicit CGOpenMPRuntime(CodeGenModule &CGM) : CGOpenMPRuntime(CGM, ".", ".") {} virtual ~CGOpenMPRuntime() {} virtual void clear(); /// Emits code for OpenMP 'if' clause using specified \a CodeGen /// function. Here is the logic: /// if (Cond) { /// ThenGen(); /// } else { /// ElseGen(); /// } void emitIfClause(CodeGenFunction &CGF, const Expr *Cond, const RegionCodeGenTy &ThenGen, const RegionCodeGenTy &ElseGen); /// Checks if the \p Body is the \a CompoundStmt and returns its child /// statement iff there is only one that is not evaluatable at the compile /// time. static const Stmt *getSingleCompoundChild(ASTContext &Ctx, const Stmt *Body); /// Get the platform-specific name separator. std::string getName(ArrayRef<StringRef> Parts) const; /// Emit code for the specified user defined reduction construct. virtual void emitUserDefinedReduction(CodeGenFunction *CGF, const OMPDeclareReductionDecl *D); /// Get combiner/initializer for the specified user-defined reduction, if any. virtual std::pair<llvm::Function *, llvm::Function *> getUserDefinedReduction(const OMPDeclareReductionDecl *D); /// Emit the function for the user defined mapper construct. void emitUserDefinedMapper(const OMPDeclareMapperDecl *D, CodeGenFunction *CGF = nullptr); /// Get the function for the specified user-defined mapper. If it does not /// exist, create one. llvm::Function * getOrCreateUserDefinedMapperFunc(const OMPDeclareMapperDecl *D); /// Emits outlined function for the specified OpenMP parallel directive /// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID, /// kmp_int32 BoundID, struct context_vars*). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. virtual llvm::Function *emitParallelOutlinedFunction( const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen); /// Emits outlined function for the specified OpenMP teams directive /// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID, /// kmp_int32 BoundID, struct context_vars*). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. virtual llvm::Function *emitTeamsOutlinedFunction( const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen); /// Emits outlined function for the OpenMP task directive \a D. This /// outlined function has type void(*)(kmp_int32 ThreadID, struct task_t* /// TaskT). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param PartIDVar Variable for partition id in the current OpenMP untied /// task region. /// \param TaskTVar Variable for task_t argument. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. /// \param Tied true if task is generated for tied task, false otherwise. /// \param NumberOfParts Number of parts in untied task. Ignored for tied /// tasks. /// virtual llvm::Function *emitTaskOutlinedFunction( const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, const VarDecl *PartIDVar, const VarDecl *TaskTVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen, bool Tied, unsigned &NumberOfParts); /// Cleans up references to the objects in finished function. /// virtual void functionFinished(CodeGenFunction &CGF); /// Emits code for parallel or serial call of the \a OutlinedFn with /// variables captured in a record which address is stored in \a /// CapturedStruct. /// \param OutlinedFn Outlined function to be run in parallel threads. Type of /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*). /// \param CapturedVars A pointer to the record with the references to /// variables used in \a OutlinedFn function. /// \param IfCond Condition in the associated 'if' clause, if it was /// specified, nullptr otherwise. /// virtual void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc, llvm::Function *OutlinedFn, ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond); /// Emits a critical region. /// \param CriticalName Name of the critical region. /// \param CriticalOpGen Generator for the statement associated with the given /// critical region. /// \param Hint Value of the 'hint' clause (optional). virtual void emitCriticalRegion(CodeGenFunction &CGF, StringRef CriticalName, const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc, const Expr *Hint = nullptr); /// Emits a master region. /// \param MasterOpGen Generator for the statement associated with the given /// master region. virtual void emitMasterRegion(CodeGenFunction &CGF, const RegionCodeGenTy &MasterOpGen, SourceLocation Loc); /// Emits a masked region. /// \param MaskedOpGen Generator for the statement associated with the given /// masked region. virtual void emitMaskedRegion(CodeGenFunction &CGF, const RegionCodeGenTy &MaskedOpGen, SourceLocation Loc, const Expr *Filter = nullptr); /// Emits code for a taskyield directive. virtual void emitTaskyieldCall(CodeGenFunction &CGF, SourceLocation Loc); /// Emit a taskgroup region. /// \param TaskgroupOpGen Generator for the statement associated with the /// given taskgroup region. virtual void emitTaskgroupRegion(CodeGenFunction &CGF, const RegionCodeGenTy &TaskgroupOpGen, SourceLocation Loc); /// Emits a single region. /// \param SingleOpGen Generator for the statement associated with the given /// single region. virtual void emitSingleRegion(CodeGenFunction &CGF, const RegionCodeGenTy &SingleOpGen, SourceLocation Loc, ArrayRef<const Expr *> CopyprivateVars, ArrayRef<const Expr *> DestExprs, ArrayRef<const Expr *> SrcExprs, ArrayRef<const Expr *> AssignmentOps); /// Emit an ordered region. /// \param OrderedOpGen Generator for the statement associated with the given /// ordered region. virtual void emitOrderedRegion(CodeGenFunction &CGF, const RegionCodeGenTy &OrderedOpGen, SourceLocation Loc, bool IsThreads); /// Emit an implicit/explicit barrier for OpenMP threads. /// \param Kind Directive for which this implicit barrier call must be /// generated. Must be OMPD_barrier for explicit barrier generation. /// \param EmitChecks true if need to emit checks for cancellation barriers. /// \param ForceSimpleCall true simple barrier call must be emitted, false if /// runtime class decides which one to emit (simple or with cancellation /// checks). /// virtual void emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind Kind, bool EmitChecks = true, bool ForceSimpleCall = false); /// Check if the specified \a ScheduleKind is static non-chunked. /// This kind of worksharing directive is emitted without outer loop. /// \param ScheduleKind Schedule kind specified in the 'schedule' clause. /// \param Chunked True if chunk is specified in the clause. /// virtual bool isStaticNonchunked(OpenMPScheduleClauseKind ScheduleKind, bool Chunked) const; /// Check if the specified \a ScheduleKind is static non-chunked. /// This kind of distribute directive is emitted without outer loop. /// \param ScheduleKind Schedule kind specified in the 'dist_schedule' clause. /// \param Chunked True if chunk is specified in the clause. /// virtual bool isStaticNonchunked(OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) const; /// Check if the specified \a ScheduleKind is static chunked. /// \param ScheduleKind Schedule kind specified in the 'schedule' clause. /// \param Chunked True if chunk is specified in the clause. /// virtual bool isStaticChunked(OpenMPScheduleClauseKind ScheduleKind, bool Chunked) const; /// Check if the specified \a ScheduleKind is static non-chunked. /// \param ScheduleKind Schedule kind specified in the 'dist_schedule' clause. /// \param Chunked True if chunk is specified in the clause. /// virtual bool isStaticChunked(OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) const; /// Check if the specified \a ScheduleKind is dynamic. /// This kind of worksharing directive is emitted without outer loop. /// \param ScheduleKind Schedule Kind specified in the 'schedule' clause. /// virtual bool isDynamic(OpenMPScheduleClauseKind ScheduleKind) const; /// struct with the values to be passed to the dispatch runtime function struct DispatchRTInput { /// Loop lower bound llvm::Value *LB = nullptr; /// Loop upper bound llvm::Value *UB = nullptr; /// Chunk size specified using 'schedule' clause (nullptr if chunk /// was not specified) llvm::Value *Chunk = nullptr; DispatchRTInput() = default; DispatchRTInput(llvm::Value *LB, llvm::Value *UB, llvm::Value *Chunk) : LB(LB), UB(UB), Chunk(Chunk) {} }; /// Call the appropriate runtime routine to initialize it before start /// of loop. /// This is used for non static scheduled types and when the ordered /// clause is present on the loop construct. /// Depending on the loop schedule, it is necessary to call some runtime /// routine before start of the OpenMP loop to get the loop upper / lower /// bounds \a LB and \a UB and stride \a ST. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param ScheduleKind Schedule kind, specified by the 'schedule' clause. /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// \param Ordered true if loop is ordered, false otherwise. /// \param DispatchValues struct containing llvm values for lower bound, upper /// bound, and chunk expression. /// For the default (nullptr) value, the chunk 1 will be used. /// virtual void emitForDispatchInit(CodeGenFunction &CGF, SourceLocation Loc, const OpenMPScheduleTy &ScheduleKind, unsigned IVSize, bool IVSigned, bool Ordered, const DispatchRTInput &DispatchValues); /// Struct with the values to be passed to the static runtime function struct StaticRTInput { /// Size of the iteration variable in bits. unsigned IVSize = 0; /// Sign of the iteration variable. bool IVSigned = false; /// true if loop is ordered, false otherwise. bool Ordered = false; /// Address of the output variable in which the flag of the last iteration /// is returned. Address IL = Address::invalid(); /// Address of the output variable in which the lower iteration number is /// returned. Address LB = Address::invalid(); /// Address of the output variable in which the upper iteration number is /// returned. Address UB = Address::invalid(); /// Address of the output variable in which the stride value is returned /// necessary to generated the static_chunked scheduled loop. Address ST = Address::invalid(); /// Value of the chunk for the static_chunked scheduled loop. For the /// default (nullptr) value, the chunk 1 will be used. llvm::Value *Chunk = nullptr; StaticRTInput(unsigned IVSize, bool IVSigned, bool Ordered, Address IL, Address LB, Address UB, Address ST, llvm::Value *Chunk = nullptr) : IVSize(IVSize), IVSigned(IVSigned), Ordered(Ordered), IL(IL), LB(LB), UB(UB), ST(ST), Chunk(Chunk) {} }; /// Call the appropriate runtime routine to initialize it before start /// of loop. /// /// This is used only in case of static schedule, when the user did not /// specify a ordered clause on the loop construct. /// Depending on the loop schedule, it is necessary to call some runtime /// routine before start of the OpenMP loop to get the loop upper / lower /// bounds LB and UB and stride ST. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param DKind Kind of the directive. /// \param ScheduleKind Schedule kind, specified by the 'schedule' clause. /// \param Values Input arguments for the construct. /// virtual void emitForStaticInit(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind, const OpenMPScheduleTy &ScheduleKind, const StaticRTInput &Values); /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param SchedKind Schedule kind, specified by the 'dist_schedule' clause. /// \param Values Input arguments for the construct. /// virtual void emitDistributeStaticInit(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDistScheduleClauseKind SchedKind, const StaticRTInput &Values); /// Call the appropriate runtime routine to notify that we finished /// iteration of the ordered loop with the dynamic scheduling. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// virtual void emitForOrderedIterationEnd(CodeGenFunction &CGF, SourceLocation Loc, unsigned IVSize, bool IVSigned); /// Call the appropriate runtime routine to notify that we finished /// all the work with current loop. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param DKind Kind of the directive for which the static finish is emitted. /// virtual void emitForStaticFinish(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind); /// Call __kmpc_dispatch_next( /// ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter, /// kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper, /// kmp_int[32|64] *p_stride); /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// \param IL Address of the output variable in which the flag of the /// last iteration is returned. /// \param LB Address of the output variable in which the lower iteration /// number is returned. /// \param UB Address of the output variable in which the upper iteration /// number is returned. /// \param ST Address of the output variable in which the stride value is /// returned. virtual llvm::Value *emitForNext(CodeGenFunction &CGF, SourceLocation Loc, unsigned IVSize, bool IVSigned, Address IL, Address LB, Address UB, Address ST); /// Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32 /// global_tid, kmp_int32 num_threads) to generate code for 'num_threads' /// clause. /// \param NumThreads An integer value of threads. virtual void emitNumThreadsClause(CodeGenFunction &CGF, llvm::Value *NumThreads, SourceLocation Loc); /// Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 /// global_tid, int proc_bind) to generate code for 'proc_bind' clause. virtual void emitProcBindClause(CodeGenFunction &CGF, llvm::omp::ProcBindKind ProcBind, SourceLocation Loc); /// Returns address of the threadprivate variable for the current /// thread. /// \param VD Threadprivate variable. /// \param VDAddr Address of the global variable \a VD. /// \param Loc Location of the reference to threadprivate var. /// \return Address of the threadprivate variable for the current thread. virtual Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr, SourceLocation Loc); /// Returns the address of the variable marked as declare target with link /// clause OR as declare target with to clause and unified memory. virtual Address getAddrOfDeclareTargetVar(const VarDecl *VD); /// Emit a code for initialization of threadprivate variable. It emits /// a call to runtime library which adds initial value to the newly created /// threadprivate variable (if it is not constant) and registers destructor /// for the variable (if any). /// \param VD Threadprivate variable. /// \param VDAddr Address of the global variable \a VD. /// \param Loc Location of threadprivate declaration. /// \param PerformInit true if initialization expression is not constant. virtual llvm::Function * emitThreadPrivateVarDefinition(const VarDecl *VD, Address VDAddr, SourceLocation Loc, bool PerformInit, CodeGenFunction *CGF = nullptr); /// Emit a code for initialization of declare target variable. /// \param VD Declare target variable. /// \param Addr Address of the global variable \a VD. /// \param PerformInit true if initialization expression is not constant. virtual bool emitDeclareTargetVarDefinition(const VarDecl *VD, llvm::GlobalVariable *Addr, bool PerformInit); /// Creates artificial threadprivate variable with name \p Name and type \p /// VarType. /// \param VarType Type of the artificial threadprivate variable. /// \param Name Name of the artificial threadprivate variable. virtual Address getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF, QualType VarType, StringRef Name); /// Emit flush of the variables specified in 'omp flush' directive. /// \param Vars List of variables to flush. virtual void emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *> Vars, SourceLocation Loc, llvm::AtomicOrdering AO); /// Emit task region for the task directive. The task region is /// emitted in several steps: /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the /// function: /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) { /// TaskFunction(gtid, tt->part_id, tt->shareds); /// return 0; /// } /// 2. Copy a list of shared variables to field shareds of the resulting /// structure kmp_task_t returned by the previous call (if any). /// 3. Copy a pointer to destructions function to field destructions of the /// resulting structure kmp_task_t. /// 4. Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, /// kmp_task_t *new_task), where new_task is a resulting structure from /// previous items. /// \param D Current task directive. /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32 /// /*part_id*/, captured_struct */*__context*/); /// \param SharedsTy A type which contains references the shared variables. /// \param Shareds Context with the list of shared variables from the \p /// TaskFunction. /// \param IfCond Not a nullptr if 'if' clause was specified, nullptr /// otherwise. /// \param Data Additional data for task generation like tiednsee, final /// state, list of privates etc. virtual void emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc, const OMPExecutableDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const Expr *IfCond, const OMPTaskDataTy &Data); /// Emit task region for the taskloop directive. The taskloop region is /// emitted in several steps: /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the /// function: /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) { /// TaskFunction(gtid, tt->part_id, tt->shareds); /// return 0; /// } /// 2. Copy a list of shared variables to field shareds of the resulting /// structure kmp_task_t returned by the previous call (if any). /// 3. Copy a pointer to destructions function to field destructions of the /// resulting structure kmp_task_t. /// 4. Emit a call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t /// *task, int if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int /// nogroup, int sched, kmp_uint64 grainsize, void *task_dup ), where new_task /// is a resulting structure from /// previous items. /// \param D Current task directive. /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32 /// /*part_id*/, captured_struct */*__context*/); /// \param SharedsTy A type which contains references the shared variables. /// \param Shareds Context with the list of shared variables from the \p /// TaskFunction. /// \param IfCond Not a nullptr if 'if' clause was specified, nullptr /// otherwise. /// \param Data Additional data for task generation like tiednsee, final /// state, list of privates etc. virtual void emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc, const OMPLoopDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const Expr *IfCond, const OMPTaskDataTy &Data); /// Emit code for the directive that does not require outlining. /// /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. /// \param HasCancel true if region has inner cancel directive, false /// otherwise. virtual void emitInlinedDirective(CodeGenFunction &CGF, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen, bool HasCancel = false); /// Emits reduction function. /// \param ArgsType Array type containing pointers to reduction variables. /// \param Privates List of private copies for original reduction arguments. /// \param LHSExprs List of LHS in \a ReductionOps reduction operations. /// \param RHSExprs List of RHS in \a ReductionOps reduction operations. /// \param ReductionOps List of reduction operations in form 'LHS binop RHS' /// or 'operator binop(LHS, RHS)'. llvm::Function *emitReductionFunction(SourceLocation Loc, llvm::Type *ArgsType, ArrayRef<const Expr *> Privates, ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs, ArrayRef<const Expr *> ReductionOps); /// Emits single reduction combiner void emitSingleReductionCombiner(CodeGenFunction &CGF, const Expr *ReductionOp, const Expr *PrivateRef, const DeclRefExpr *LHS, const DeclRefExpr *RHS); struct ReductionOptionsTy { bool WithNowait; bool SimpleReduction; OpenMPDirectiveKind ReductionKind; }; /// Emit a code for reduction clause. Next code should be emitted for /// reduction: /// \code /// /// static kmp_critical_name lock = { 0 }; /// /// void reduce_func(void *lhs[<n>], void *rhs[<n>]) { /// ... /// *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]); /// ... /// } /// /// ... /// void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]}; /// switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList), /// RedList, reduce_func, &<lock>)) { /// case 1: /// ... /// <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]); /// ... /// __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>); /// break; /// case 2: /// ... /// Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i])); /// ... /// break; /// default:; /// } /// \endcode /// /// \param Privates List of private copies for original reduction arguments. /// \param LHSExprs List of LHS in \a ReductionOps reduction operations. /// \param RHSExprs List of RHS in \a ReductionOps reduction operations. /// \param ReductionOps List of reduction operations in form 'LHS binop RHS' /// or 'operator binop(LHS, RHS)'. /// \param Options List of options for reduction codegen: /// WithNowait true if parent directive has also nowait clause, false /// otherwise. /// SimpleReduction Emit reduction operation only. Used for omp simd /// directive on the host. /// ReductionKind The kind of reduction to perform. virtual void emitReduction(CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> Privates, ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs, ArrayRef<const Expr *> ReductionOps, ReductionOptionsTy Options); /// Emit a code for initialization of task reduction clause. Next code /// should be emitted for reduction: /// \code /// /// _taskred_item_t red_data[n]; /// ... /// red_data[i].shar = &shareds[i]; /// red_data[i].orig = &origs[i]; /// red_data[i].size = sizeof(origs[i]); /// red_data[i].f_init = (void*)RedInit<i>; /// red_data[i].f_fini = (void*)RedDest<i>; /// red_data[i].f_comb = (void*)RedOp<i>; /// red_data[i].flags = <Flag_i>; /// ... /// void* tg1 = __kmpc_taskred_init(gtid, n, red_data); /// \endcode /// For reduction clause with task modifier it emits the next call: /// \code /// /// _taskred_item_t red_data[n]; /// ... /// red_data[i].shar = &shareds[i]; /// red_data[i].orig = &origs[i]; /// red_data[i].size = sizeof(origs[i]); /// red_data[i].f_init = (void*)RedInit<i>; /// red_data[i].f_fini = (void*)RedDest<i>; /// red_data[i].f_comb = (void*)RedOp<i>; /// red_data[i].flags = <Flag_i>; /// ... /// void* tg1 = __kmpc_taskred_modifier_init(loc, gtid, is_worksharing, n, /// red_data); /// \endcode /// \param LHSExprs List of LHS in \a Data.ReductionOps reduction operations. /// \param RHSExprs List of RHS in \a Data.ReductionOps reduction operations. /// \param Data Additional data for task generation like tiedness, final /// state, list of privates, reductions etc. virtual llvm::Value *emitTaskReductionInit(CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs, const OMPTaskDataTy &Data); /// Emits the following code for reduction clause with task modifier: /// \code /// __kmpc_task_reduction_modifier_fini(loc, gtid, is_worksharing); /// \endcode virtual void emitTaskReductionFini(CodeGenFunction &CGF, SourceLocation Loc, bool IsWorksharingReduction); /// Required to resolve existing problems in the runtime. Emits threadprivate /// variables to store the size of the VLAs/array sections for /// initializer/combiner/finalizer functions. /// \param RCG Allows to reuse an existing data for the reductions. /// \param N Reduction item for which fixups must be emitted. virtual void emitTaskReductionFixups(CodeGenFunction &CGF, SourceLocation Loc, ReductionCodeGen &RCG, unsigned N); /// Get the address of `void *` type of the privatue copy of the reduction /// item specified by the \p SharedLVal. /// \param ReductionsPtr Pointer to the reduction data returned by the /// emitTaskReductionInit function. /// \param SharedLVal Address of the original reduction item. virtual Address getTaskReductionItem(CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *ReductionsPtr, LValue SharedLVal); /// Emit code for 'taskwait' directive. virtual void emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc); /// Emit code for 'cancellation point' construct. /// \param CancelRegion Region kind for which the cancellation point must be /// emitted. /// virtual void emitCancellationPointCall(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind CancelRegion); /// Emit code for 'cancel' construct. /// \param IfCond Condition in the associated 'if' clause, if it was /// specified, nullptr otherwise. /// \param CancelRegion Region kind for which the cancel must be emitted. /// virtual void emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc, const Expr *IfCond, OpenMPDirectiveKind CancelRegion); /// Emit outilined function for 'target' directive. /// \param D Directive to emit. /// \param ParentName Name of the function that encloses the target region. /// \param OutlinedFn Outlined function value to be defined by this call. /// \param OutlinedFnID Outlined function ID value to be defined by this call. /// \param IsOffloadEntry True if the outlined function is an offload entry. /// \param CodeGen Code generation sequence for the \a D directive. /// An outlined function may not be an entry if, e.g. the if clause always /// evaluates to false. virtual void emitTargetOutlinedFunction(const OMPExecutableDirective &D, StringRef ParentName, llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID, bool IsOffloadEntry, const RegionCodeGenTy &CodeGen); /// Emit the target offloading code associated with \a D. The emitted /// code attempts offloading the execution to the device, an the event of /// a failure it executes the host version outlined in \a OutlinedFn. /// \param D Directive to emit. /// \param OutlinedFn Host version of the code to be offloaded. /// \param OutlinedFnID ID of host version of the code to be offloaded. /// \param IfCond Expression evaluated in if clause associated with the target /// directive, or null if no if clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used and device modifier. /// \param SizeEmitter Callback to emit number of iterations for loop-based /// directives. virtual void emitTargetCall( CodeGenFunction &CGF, const OMPExecutableDirective &D, llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond, llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device, llvm::function_ref<llvm::Value *(CodeGenFunction &CGF, const OMPLoopDirective &D)> SizeEmitter); /// Emit the target regions enclosed in \a GD function definition or /// the function itself in case it is a valid device function. Returns true if /// \a GD was dealt with successfully. /// \param GD Function to scan. virtual bool emitTargetFunctions(GlobalDecl GD); /// Emit the global variable if it is a valid device global variable. /// Returns true if \a GD was dealt with successfully. /// \param GD Variable declaration to emit. virtual bool emitTargetGlobalVariable(GlobalDecl GD); /// Checks if the provided global decl \a GD is a declare target variable and /// registers it when emitting code for the host. virtual void registerTargetGlobalVariable(const VarDecl *VD, llvm::Constant *Addr); /// Emit the global \a GD if it is meaningful for the target. Returns /// if it was emitted successfully. /// \param GD Global to scan. virtual bool emitTargetGlobal(GlobalDecl GD); /// Creates and returns a registration function for when at least one /// requires directives was used in the current module. llvm::Function *emitRequiresDirectiveRegFun(); /// Creates all the offload entries in the current compilation unit /// along with the associated metadata. void createOffloadEntriesAndInfoMetadata(); /// Emits code for teams call of the \a OutlinedFn with /// variables captured in a record which address is stored in \a /// CapturedStruct. /// \param OutlinedFn Outlined function to be run by team masters. Type of /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*). /// \param CapturedVars A pointer to the record with the references to /// variables used in \a OutlinedFn function. /// virtual void emitTeamsCall(CodeGenFunction &CGF, const OMPExecutableDirective &D, SourceLocation Loc, llvm::Function *OutlinedFn, ArrayRef<llvm::Value *> CapturedVars); /// Emits call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32 /// global_tid, kmp_int32 num_teams, kmp_int32 thread_limit) to generate code /// for num_teams clause. /// \param NumTeams An integer expression of teams. /// \param ThreadLimit An integer expression of threads. virtual void emitNumTeamsClause(CodeGenFunction &CGF, const Expr *NumTeams, const Expr *ThreadLimit, SourceLocation Loc); /// Struct that keeps all the relevant information that should be kept /// throughout a 'target data' region. class TargetDataInfo { /// Set to true if device pointer information have to be obtained. bool RequiresDevicePointerInfo = false; /// Set to true if Clang emits separate runtime calls for the beginning and /// end of the region. These calls might have separate map type arrays. bool SeparateBeginEndCalls = false; public: /// The array of base pointer passed to the runtime library. llvm::Value *BasePointersArray = nullptr; /// The array of section pointers passed to the runtime library. llvm::Value *PointersArray = nullptr; /// The array of sizes passed to the runtime library. llvm::Value *SizesArray = nullptr; /// The array of map types passed to the runtime library for the beginning /// of the region or for the entire region if there are no separate map /// types for the region end. llvm::Value *MapTypesArray = nullptr; /// The array of map types passed to the runtime library for the end of the /// region, or nullptr if there are no separate map types for the region /// end. llvm::Value *MapTypesArrayEnd = nullptr; /// The array of user-defined mappers passed to the runtime library. llvm::Value *MappersArray = nullptr; /// The array of original declaration names of mapped pointers sent to the /// runtime library for debugging llvm::Value *MapNamesArray = nullptr; /// Indicate whether any user-defined mapper exists. bool HasMapper = false; /// The total number of pointers passed to the runtime library. unsigned NumberOfPtrs = 0u; /// Map between the a declaration of a capture and the corresponding base /// pointer address where the runtime returns the device pointers. llvm::DenseMap<const ValueDecl *, Address> CaptureDeviceAddrMap; explicit TargetDataInfo() {} explicit TargetDataInfo(bool RequiresDevicePointerInfo, bool SeparateBeginEndCalls) : RequiresDevicePointerInfo(RequiresDevicePointerInfo), SeparateBeginEndCalls(SeparateBeginEndCalls) {} /// Clear information about the data arrays. void clearArrayInfo() { BasePointersArray = nullptr; PointersArray = nullptr; SizesArray = nullptr; MapTypesArray = nullptr; MapTypesArrayEnd = nullptr; MapNamesArray = nullptr; MappersArray = nullptr; HasMapper = false; NumberOfPtrs = 0u; } /// Return true if the current target data information has valid arrays. bool isValid() { return BasePointersArray && PointersArray && SizesArray && MapTypesArray && (!HasMapper || MappersArray) && NumberOfPtrs; } bool requiresDevicePointerInfo() { return RequiresDevicePointerInfo; } bool separateBeginEndCalls() { return SeparateBeginEndCalls; } }; /// Emit the target data mapping code associated with \a D. /// \param D Directive to emit. /// \param IfCond Expression evaluated in if clause associated with the /// target directive, or null if no device clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used. /// \param Info A record used to store information that needs to be preserved /// until the region is closed. virtual void emitTargetDataCalls(CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond, const Expr *Device, const RegionCodeGenTy &CodeGen, TargetDataInfo &Info); /// Emit the data mapping/movement code associated with the directive /// \a D that should be of the form 'target [{enter|exit} data | update]'. /// \param D Directive to emit. /// \param IfCond Expression evaluated in if clause associated with the target /// directive, or null if no if clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used. virtual void emitTargetDataStandAloneCall(CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond, const Expr *Device); /// Marks function \a Fn with properly mangled versions of vector functions. /// \param FD Function marked as 'declare simd'. /// \param Fn LLVM function that must be marked with 'declare simd' /// attributes. virtual void emitDeclareSimdFunction(const FunctionDecl *FD, llvm::Function *Fn); /// Emit initialization for doacross loop nesting support. /// \param D Loop-based construct used in doacross nesting construct. virtual void emitDoacrossInit(CodeGenFunction &CGF, const OMPLoopDirective &D, ArrayRef<Expr *> NumIterations); /// Emit code for doacross ordered directive with 'depend' clause. /// \param C 'depend' clause with 'sink|source' dependency kind. virtual void emitDoacrossOrdered(CodeGenFunction &CGF, const OMPDependClause *C); /// Translates the native parameter of outlined function if this is required /// for target. /// \param FD Field decl from captured record for the parameter. /// \param NativeParam Parameter itself. virtual const VarDecl *translateParameter(const FieldDecl *FD, const VarDecl *NativeParam) const { return NativeParam; } /// Gets the address of the native argument basing on the address of the /// target-specific parameter. /// \param NativeParam Parameter itself. /// \param TargetParam Corresponding target-specific parameter. virtual Address getParameterAddress(CodeGenFunction &CGF, const VarDecl *NativeParam, const VarDecl *TargetParam) const; /// Choose default schedule type and chunk value for the /// dist_schedule clause. virtual void getDefaultDistScheduleAndChunk(CodeGenFunction &CGF, const OMPLoopDirective &S, OpenMPDistScheduleClauseKind &ScheduleKind, llvm::Value *&Chunk) const {} /// Choose default schedule type and chunk value for the /// schedule clause. virtual void getDefaultScheduleAndChunk(CodeGenFunction &CGF, const OMPLoopDirective &S, OpenMPScheduleClauseKind &ScheduleKind, const Expr *&ChunkExpr) const; /// Emits call of the outlined function with the provided arguments, /// translating these arguments to correct target-specific arguments. virtual void emitOutlinedFunctionCall(CodeGenFunction &CGF, SourceLocation Loc, llvm::FunctionCallee OutlinedFn, ArrayRef<llvm::Value *> Args = llvm::None) const; /// Emits OpenMP-specific function prolog. /// Required for device constructs. virtual void emitFunctionProlog(CodeGenFunction &CGF, const Decl *D); /// Gets the OpenMP-specific address of the local variable. virtual Address getAddressOfLocalVariable(CodeGenFunction &CGF, const VarDecl *VD); /// Marks the declaration as already emitted for the device code and returns /// true, if it was marked already, and false, otherwise. bool markAsGlobalTarget(GlobalDecl GD); /// Emit deferred declare target variables marked for deferred emission. void emitDeferredTargetDecls() const; /// Adjust some parameters for the target-based directives, like addresses of /// the variables captured by reference in lambdas. virtual void adjustTargetSpecificDataForLambdas(CodeGenFunction &CGF, const OMPExecutableDirective &D) const; /// Perform check on requires decl to ensure that target architecture /// supports unified addressing virtual void processRequiresDirective(const OMPRequiresDecl *D); /// Gets default memory ordering as specified in requires directive. llvm::AtomicOrdering getDefaultMemoryOrdering() const; /// Checks if the variable has associated OMPAllocateDeclAttr attribute with /// the predefined allocator and translates it into the corresponding address /// space. virtual bool hasAllocateAttributeForGlobalVar(const VarDecl *VD, LangAS &AS); /// Return whether the unified_shared_memory has been specified. bool hasRequiresUnifiedSharedMemory() const; /// Checks if the \p VD variable is marked as nontemporal declaration in /// current context. bool isNontemporalDecl(const ValueDecl *VD) const; /// Create specialized alloca to handle lastprivate conditionals. Address emitLastprivateConditionalInit(CodeGenFunction &CGF, const VarDecl *VD); /// Checks if the provided \p LVal is lastprivate conditional and emits the /// code to update the value of the original variable. /// \code /// lastprivate(conditional: a) /// ... /// <type> a; /// lp_a = ...; /// #pragma omp critical(a) /// if (last_iv_a <= iv) { /// last_iv_a = iv; /// global_a = lp_a; /// } /// \endcode virtual void checkAndEmitLastprivateConditional(CodeGenFunction &CGF, const Expr *LHS); /// Checks if the lastprivate conditional was updated in inner region and /// writes the value. /// \code /// lastprivate(conditional: a) /// ... /// <type> a;bool Fired = false; /// #pragma omp ... shared(a) /// { /// lp_a = ...; /// Fired = true; /// } /// if (Fired) { /// #pragma omp critical(a) /// if (last_iv_a <= iv) { /// last_iv_a = iv; /// global_a = lp_a; /// } /// Fired = false; /// } /// \endcode virtual void checkAndEmitSharedLastprivateConditional( CodeGenFunction &CGF, const OMPExecutableDirective &D, const llvm::DenseSet<CanonicalDeclPtr<const VarDecl>> &IgnoredDecls); /// Gets the address of the global copy used for lastprivate conditional /// update, if any. /// \param PrivLVal LValue for the private copy. /// \param VD Original lastprivate declaration. virtual void emitLastprivateConditionalFinalUpdate(CodeGenFunction &CGF, LValue PrivLVal, const VarDecl *VD, SourceLocation Loc); /// Emits list of dependecies based on the provided data (array of /// dependence/expression pairs). /// \returns Pointer to the first element of the array casted to VoidPtr type. std::pair<llvm::Value *, Address> emitDependClause(CodeGenFunction &CGF, ArrayRef<OMPTaskDataTy::DependData> Dependencies, SourceLocation Loc); /// Emits list of dependecies based on the provided data (array of /// dependence/expression pairs) for depobj construct. In this case, the /// variable is allocated in dynamically. \returns Pointer to the first /// element of the array casted to VoidPtr type. Address emitDepobjDependClause(CodeGenFunction &CGF, const OMPTaskDataTy::DependData &Dependencies, SourceLocation Loc); /// Emits the code to destroy the dependency object provided in depobj /// directive. void emitDestroyClause(CodeGenFunction &CGF, LValue DepobjLVal, SourceLocation Loc); /// Updates the dependency kind in the specified depobj object. /// \param DepobjLVal LValue for the main depobj object. /// \param NewDepKind New dependency kind. void emitUpdateClause(CodeGenFunction &CGF, LValue DepobjLVal, OpenMPDependClauseKind NewDepKind, SourceLocation Loc); /// Initializes user defined allocators specified in the uses_allocators /// clauses. void emitUsesAllocatorsInit(CodeGenFunction &CGF, const Expr *Allocator, const Expr *AllocatorTraits); /// Destroys user defined allocators specified in the uses_allocators clause. void emitUsesAllocatorsFini(CodeGenFunction &CGF, const Expr *Allocator); /// Returns true if the variable is a local variable in untied task. bool isLocalVarInUntiedTask(CodeGenFunction &CGF, const VarDecl *VD) const; }; /// Class supports emissionof SIMD-only code. class CGOpenMPSIMDRuntime final : public CGOpenMPRuntime { public: explicit CGOpenMPSIMDRuntime(CodeGenModule &CGM) : CGOpenMPRuntime(CGM) {} ~CGOpenMPSIMDRuntime() override {} /// Emits outlined function for the specified OpenMP parallel directive /// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID, /// kmp_int32 BoundID, struct context_vars*). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. llvm::Function * emitParallelOutlinedFunction(const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) override; /// Emits outlined function for the specified OpenMP teams directive /// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID, /// kmp_int32 BoundID, struct context_vars*). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. llvm::Function * emitTeamsOutlinedFunction(const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) override; /// Emits outlined function for the OpenMP task directive \a D. This /// outlined function has type void(*)(kmp_int32 ThreadID, struct task_t* /// TaskT). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param PartIDVar Variable for partition id in the current OpenMP untied /// task region. /// \param TaskTVar Variable for task_t argument. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. /// \param Tied true if task is generated for tied task, false otherwise. /// \param NumberOfParts Number of parts in untied task. Ignored for tied /// tasks. /// llvm::Function *emitTaskOutlinedFunction( const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, const VarDecl *PartIDVar, const VarDecl *TaskTVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen, bool Tied, unsigned &NumberOfParts) override; /// Emits code for parallel or serial call of the \a OutlinedFn with /// variables captured in a record which address is stored in \a /// CapturedStruct. /// \param OutlinedFn Outlined function to be run in parallel threads. Type of /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*). /// \param CapturedVars A pointer to the record with the references to /// variables used in \a OutlinedFn function. /// \param IfCond Condition in the associated 'if' clause, if it was /// specified, nullptr otherwise. /// void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc, llvm::Function *OutlinedFn, ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond) override; /// Emits a critical region. /// \param CriticalName Name of the critical region. /// \param CriticalOpGen Generator for the statement associated with the given /// critical region. /// \param Hint Value of the 'hint' clause (optional). void emitCriticalRegion(CodeGenFunction &CGF, StringRef CriticalName, const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc, const Expr *Hint = nullptr) override; /// Emits a master region. /// \param MasterOpGen Generator for the statement associated with the given /// master region. void emitMasterRegion(CodeGenFunction &CGF, const RegionCodeGenTy &MasterOpGen, SourceLocation Loc) override; /// Emits a masked region. /// \param MaskedOpGen Generator for the statement associated with the given /// masked region. void emitMaskedRegion(CodeGenFunction &CGF, const RegionCodeGenTy &MaskedOpGen, SourceLocation Loc, const Expr *Filter = nullptr) override; /// Emits a masked region. /// \param MaskedOpGen Generator for the statement associated with the given /// masked region. /// Emits code for a taskyield directive. void emitTaskyieldCall(CodeGenFunction &CGF, SourceLocation Loc) override; /// Emit a taskgroup region. /// \param TaskgroupOpGen Generator for the statement associated with the /// given taskgroup region. void emitTaskgroupRegion(CodeGenFunction &CGF, const RegionCodeGenTy &TaskgroupOpGen, SourceLocation Loc) override; /// Emits a single region. /// \param SingleOpGen Generator for the statement associated with the given /// single region. void emitSingleRegion(CodeGenFunction &CGF, const RegionCodeGenTy &SingleOpGen, SourceLocation Loc, ArrayRef<const Expr *> CopyprivateVars, ArrayRef<const Expr *> DestExprs, ArrayRef<const Expr *> SrcExprs, ArrayRef<const Expr *> AssignmentOps) override; /// Emit an ordered region. /// \param OrderedOpGen Generator for the statement associated with the given /// ordered region. void emitOrderedRegion(CodeGenFunction &CGF, const RegionCodeGenTy &OrderedOpGen, SourceLocation Loc, bool IsThreads) override; /// Emit an implicit/explicit barrier for OpenMP threads. /// \param Kind Directive for which this implicit barrier call must be /// generated. Must be OMPD_barrier for explicit barrier generation. /// \param EmitChecks true if need to emit checks for cancellation barriers. /// \param ForceSimpleCall true simple barrier call must be emitted, false if /// runtime class decides which one to emit (simple or with cancellation /// checks). /// void emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind Kind, bool EmitChecks = true, bool ForceSimpleCall = false) override; /// This is used for non static scheduled types and when the ordered /// clause is present on the loop construct. /// Depending on the loop schedule, it is necessary to call some runtime /// routine before start of the OpenMP loop to get the loop upper / lower /// bounds \a LB and \a UB and stride \a ST. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param ScheduleKind Schedule kind, specified by the 'schedule' clause. /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// \param Ordered true if loop is ordered, false otherwise. /// \param DispatchValues struct containing llvm values for lower bound, upper /// bound, and chunk expression. /// For the default (nullptr) value, the chunk 1 will be used. /// void emitForDispatchInit(CodeGenFunction &CGF, SourceLocation Loc, const OpenMPScheduleTy &ScheduleKind, unsigned IVSize, bool IVSigned, bool Ordered, const DispatchRTInput &DispatchValues) override; /// Call the appropriate runtime routine to initialize it before start /// of loop. /// /// This is used only in case of static schedule, when the user did not /// specify a ordered clause on the loop construct. /// Depending on the loop schedule, it is necessary to call some runtime /// routine before start of the OpenMP loop to get the loop upper / lower /// bounds LB and UB and stride ST. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param DKind Kind of the directive. /// \param ScheduleKind Schedule kind, specified by the 'schedule' clause. /// \param Values Input arguments for the construct. /// void emitForStaticInit(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind, const OpenMPScheduleTy &ScheduleKind, const StaticRTInput &Values) override; /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param SchedKind Schedule kind, specified by the 'dist_schedule' clause. /// \param Values Input arguments for the construct. /// void emitDistributeStaticInit(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDistScheduleClauseKind SchedKind, const StaticRTInput &Values) override; /// Call the appropriate runtime routine to notify that we finished /// iteration of the ordered loop with the dynamic scheduling. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// void emitForOrderedIterationEnd(CodeGenFunction &CGF, SourceLocation Loc, unsigned IVSize, bool IVSigned) override; /// Call the appropriate runtime routine to notify that we finished /// all the work with current loop. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param DKind Kind of the directive for which the static finish is emitted. /// void emitForStaticFinish(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind) override; /// Call __kmpc_dispatch_next( /// ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter, /// kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper, /// kmp_int[32|64] *p_stride); /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// \param IL Address of the output variable in which the flag of the /// last iteration is returned. /// \param LB Address of the output variable in which the lower iteration /// number is returned. /// \param UB Address of the output variable in which the upper iteration /// number is returned. /// \param ST Address of the output variable in which the stride value is /// returned. llvm::Value *emitForNext(CodeGenFunction &CGF, SourceLocation Loc, unsigned IVSize, bool IVSigned, Address IL, Address LB, Address UB, Address ST) override; /// Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32 /// global_tid, kmp_int32 num_threads) to generate code for 'num_threads' /// clause. /// \param NumThreads An integer value of threads. void emitNumThreadsClause(CodeGenFunction &CGF, llvm::Value *NumThreads, SourceLocation Loc) override; /// Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 /// global_tid, int proc_bind) to generate code for 'proc_bind' clause. void emitProcBindClause(CodeGenFunction &CGF, llvm::omp::ProcBindKind ProcBind, SourceLocation Loc) override; /// Returns address of the threadprivate variable for the current /// thread. /// \param VD Threadprivate variable. /// \param VDAddr Address of the global variable \a VD. /// \param Loc Location of the reference to threadprivate var. /// \return Address of the threadprivate variable for the current thread. Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr, SourceLocation Loc) override; /// Emit a code for initialization of threadprivate variable. It emits /// a call to runtime library which adds initial value to the newly created /// threadprivate variable (if it is not constant) and registers destructor /// for the variable (if any). /// \param VD Threadprivate variable. /// \param VDAddr Address of the global variable \a VD. /// \param Loc Location of threadprivate declaration. /// \param PerformInit true if initialization expression is not constant. llvm::Function * emitThreadPrivateVarDefinition(const VarDecl *VD, Address VDAddr, SourceLocation Loc, bool PerformInit, CodeGenFunction *CGF = nullptr) override; /// Creates artificial threadprivate variable with name \p Name and type \p /// VarType. /// \param VarType Type of the artificial threadprivate variable. /// \param Name Name of the artificial threadprivate variable. Address getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF, QualType VarType, StringRef Name) override; /// Emit flush of the variables specified in 'omp flush' directive. /// \param Vars List of variables to flush. void emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *> Vars, SourceLocation Loc, llvm::AtomicOrdering AO) override; /// Emit task region for the task directive. The task region is /// emitted in several steps: /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the /// function: /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) { /// TaskFunction(gtid, tt->part_id, tt->shareds); /// return 0; /// } /// 2. Copy a list of shared variables to field shareds of the resulting /// structure kmp_task_t returned by the previous call (if any). /// 3. Copy a pointer to destructions function to field destructions of the /// resulting structure kmp_task_t. /// 4. Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, /// kmp_task_t *new_task), where new_task is a resulting structure from /// previous items. /// \param D Current task directive. /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32 /// /*part_id*/, captured_struct */*__context*/); /// \param SharedsTy A type which contains references the shared variables. /// \param Shareds Context with the list of shared variables from the \p /// TaskFunction. /// \param IfCond Not a nullptr if 'if' clause was specified, nullptr /// otherwise. /// \param Data Additional data for task generation like tiednsee, final /// state, list of privates etc. void emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc, const OMPExecutableDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const Expr *IfCond, const OMPTaskDataTy &Data) override; /// Emit task region for the taskloop directive. The taskloop region is /// emitted in several steps: /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the /// function: /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) { /// TaskFunction(gtid, tt->part_id, tt->shareds); /// return 0; /// } /// 2. Copy a list of shared variables to field shareds of the resulting /// structure kmp_task_t returned by the previous call (if any). /// 3. Copy a pointer to destructions function to field destructions of the /// resulting structure kmp_task_t. /// 4. Emit a call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t /// *task, int if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int /// nogroup, int sched, kmp_uint64 grainsize, void *task_dup ), where new_task /// is a resulting structure from /// previous items. /// \param D Current task directive. /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32 /// /*part_id*/, captured_struct */*__context*/); /// \param SharedsTy A type which contains references the shared variables. /// \param Shareds Context with the list of shared variables from the \p /// TaskFunction. /// \param IfCond Not a nullptr if 'if' clause was specified, nullptr /// otherwise. /// \param Data Additional data for task generation like tiednsee, final /// state, list of privates etc. void emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc, const OMPLoopDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const Expr *IfCond, const OMPTaskDataTy &Data) override; /// Emit a code for reduction clause. Next code should be emitted for /// reduction: /// \code /// /// static kmp_critical_name lock = { 0 }; /// /// void reduce_func(void *lhs[<n>], void *rhs[<n>]) { /// ... /// *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]); /// ... /// } /// /// ... /// void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]}; /// switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList), /// RedList, reduce_func, &<lock>)) { /// case 1: /// ... /// <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]); /// ... /// __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>); /// break; /// case 2: /// ... /// Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i])); /// ... /// break; /// default:; /// } /// \endcode /// /// \param Privates List of private copies for original reduction arguments. /// \param LHSExprs List of LHS in \a ReductionOps reduction operations. /// \param RHSExprs List of RHS in \a ReductionOps reduction operations. /// \param ReductionOps List of reduction operations in form 'LHS binop RHS' /// or 'operator binop(LHS, RHS)'. /// \param Options List of options for reduction codegen: /// WithNowait true if parent directive has also nowait clause, false /// otherwise. /// SimpleReduction Emit reduction operation only. Used for omp simd /// directive on the host. /// ReductionKind The kind of reduction to perform. void emitReduction(CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> Privates, ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs, ArrayRef<const Expr *> ReductionOps, ReductionOptionsTy Options) override; /// Emit a code for initialization of task reduction clause. Next code /// should be emitted for reduction: /// \code /// /// _taskred_item_t red_data[n]; /// ... /// red_data[i].shar = &shareds[i]; /// red_data[i].orig = &origs[i]; /// red_data[i].size = sizeof(origs[i]); /// red_data[i].f_init = (void*)RedInit<i>; /// red_data[i].f_fini = (void*)RedDest<i>; /// red_data[i].f_comb = (void*)RedOp<i>; /// red_data[i].flags = <Flag_i>; /// ... /// void* tg1 = __kmpc_taskred_init(gtid, n, red_data); /// \endcode /// For reduction clause with task modifier it emits the next call: /// \code /// /// _taskred_item_t red_data[n]; /// ... /// red_data[i].shar = &shareds[i]; /// red_data[i].orig = &origs[i]; /// red_data[i].size = sizeof(origs[i]); /// red_data[i].f_init = (void*)RedInit<i>; /// red_data[i].f_fini = (void*)RedDest<i>; /// red_data[i].f_comb = (void*)RedOp<i>; /// red_data[i].flags = <Flag_i>; /// ... /// void* tg1 = __kmpc_taskred_modifier_init(loc, gtid, is_worksharing, n, /// red_data); /// \endcode /// \param LHSExprs List of LHS in \a Data.ReductionOps reduction operations. /// \param RHSExprs List of RHS in \a Data.ReductionOps reduction operations. /// \param Data Additional data for task generation like tiedness, final /// state, list of privates, reductions etc. llvm::Value *emitTaskReductionInit(CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs, const OMPTaskDataTy &Data) override; /// Emits the following code for reduction clause with task modifier: /// \code /// __kmpc_task_reduction_modifier_fini(loc, gtid, is_worksharing); /// \endcode void emitTaskReductionFini(CodeGenFunction &CGF, SourceLocation Loc, bool IsWorksharingReduction) override; /// Required to resolve existing problems in the runtime. Emits threadprivate /// variables to store the size of the VLAs/array sections for /// initializer/combiner/finalizer functions + emits threadprivate variable to /// store the pointer to the original reduction item for the custom /// initializer defined by declare reduction construct. /// \param RCG Allows to reuse an existing data for the reductions. /// \param N Reduction item for which fixups must be emitted. void emitTaskReductionFixups(CodeGenFunction &CGF, SourceLocation Loc, ReductionCodeGen &RCG, unsigned N) override; /// Get the address of `void *` type of the privatue copy of the reduction /// item specified by the \p SharedLVal. /// \param ReductionsPtr Pointer to the reduction data returned by the /// emitTaskReductionInit function. /// \param SharedLVal Address of the original reduction item. Address getTaskReductionItem(CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *ReductionsPtr, LValue SharedLVal) override; /// Emit code for 'taskwait' directive. void emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc) override; /// Emit code for 'cancellation point' construct. /// \param CancelRegion Region kind for which the cancellation point must be /// emitted. /// void emitCancellationPointCall(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind CancelRegion) override; /// Emit code for 'cancel' construct. /// \param IfCond Condition in the associated 'if' clause, if it was /// specified, nullptr otherwise. /// \param CancelRegion Region kind for which the cancel must be emitted. /// void emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc, const Expr *IfCond, OpenMPDirectiveKind CancelRegion) override; /// Emit outilined function for 'target' directive. /// \param D Directive to emit. /// \param ParentName Name of the function that encloses the target region. /// \param OutlinedFn Outlined function value to be defined by this call. /// \param OutlinedFnID Outlined function ID value to be defined by this call. /// \param IsOffloadEntry True if the outlined function is an offload entry. /// \param CodeGen Code generation sequence for the \a D directive. /// An outlined function may not be an entry if, e.g. the if clause always /// evaluates to false. void emitTargetOutlinedFunction(const OMPExecutableDirective &D, StringRef ParentName, llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID, bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) override; /// Emit the target offloading code associated with \a D. The emitted /// code attempts offloading the execution to the device, an the event of /// a failure it executes the host version outlined in \a OutlinedFn. /// \param D Directive to emit. /// \param OutlinedFn Host version of the code to be offloaded. /// \param OutlinedFnID ID of host version of the code to be offloaded. /// \param IfCond Expression evaluated in if clause associated with the target /// directive, or null if no if clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used and device modifier. void emitTargetCall( CodeGenFunction &CGF, const OMPExecutableDirective &D, llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond, llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device, llvm::function_ref<llvm::Value *(CodeGenFunction &CGF, const OMPLoopDirective &D)> SizeEmitter) override; /// Emit the target regions enclosed in \a GD function definition or /// the function itself in case it is a valid device function. Returns true if /// \a GD was dealt with successfully. /// \param GD Function to scan. bool emitTargetFunctions(GlobalDecl GD) override; /// Emit the global variable if it is a valid device global variable. /// Returns true if \a GD was dealt with successfully. /// \param GD Variable declaration to emit. bool emitTargetGlobalVariable(GlobalDecl GD) override; /// Emit the global \a GD if it is meaningful for the target. Returns /// if it was emitted successfully. /// \param GD Global to scan. bool emitTargetGlobal(GlobalDecl GD) override; /// Emits code for teams call of the \a OutlinedFn with /// variables captured in a record which address is stored in \a /// CapturedStruct. /// \param OutlinedFn Outlined function to be run by team masters. Type of /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*). /// \param CapturedVars A pointer to the record with the references to /// variables used in \a OutlinedFn function. /// void emitTeamsCall(CodeGenFunction &CGF, const OMPExecutableDirective &D, SourceLocation Loc, llvm::Function *OutlinedFn, ArrayRef<llvm::Value *> CapturedVars) override; /// Emits call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32 /// global_tid, kmp_int32 num_teams, kmp_int32 thread_limit) to generate code /// for num_teams clause. /// \param NumTeams An integer expression of teams. /// \param ThreadLimit An integer expression of threads. void emitNumTeamsClause(CodeGenFunction &CGF, const Expr *NumTeams, const Expr *ThreadLimit, SourceLocation Loc) override; /// Emit the target data mapping code associated with \a D. /// \param D Directive to emit. /// \param IfCond Expression evaluated in if clause associated with the /// target directive, or null if no device clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used. /// \param Info A record used to store information that needs to be preserved /// until the region is closed. void emitTargetDataCalls(CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond, const Expr *Device, const RegionCodeGenTy &CodeGen, TargetDataInfo &Info) override; /// Emit the data mapping/movement code associated with the directive /// \a D that should be of the form 'target [{enter|exit} data | update]'. /// \param D Directive to emit. /// \param IfCond Expression evaluated in if clause associated with the target /// directive, or null if no if clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used. void emitTargetDataStandAloneCall(CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond, const Expr *Device) override; /// Emit initialization for doacross loop nesting support. /// \param D Loop-based construct used in doacross nesting construct. void emitDoacrossInit(CodeGenFunction &CGF, const OMPLoopDirective &D, ArrayRef<Expr *> NumIterations) override; /// Emit code for doacross ordered directive with 'depend' clause. /// \param C 'depend' clause with 'sink|source' dependency kind. void emitDoacrossOrdered(CodeGenFunction &CGF, const OMPDependClause *C) override; /// Translates the native parameter of outlined function if this is required /// for target. /// \param FD Field decl from captured record for the parameter. /// \param NativeParam Parameter itself. const VarDecl *translateParameter(const FieldDecl *FD, const VarDecl *NativeParam) const override; /// Gets the address of the native argument basing on the address of the /// target-specific parameter. /// \param NativeParam Parameter itself. /// \param TargetParam Corresponding target-specific parameter. Address getParameterAddress(CodeGenFunction &CGF, const VarDecl *NativeParam, const VarDecl *TargetParam) const override; /// Gets the OpenMP-specific address of the local variable. Address getAddressOfLocalVariable(CodeGenFunction &CGF, const VarDecl *VD) override { return Address::invalid(); } }; } // namespace CodeGen } // namespace clang #endif
schedbench.c
/**************************************************************************** * * * OpenMP MicroBenchmark Suite - Version 3.1 * * * * produced by * * * * Mark Bull, Fiona Reid and Nix Mc Donnell * * * * at * * * * Edinburgh Parallel Computing Centre * * * * email: markb@epcc.ed.ac.uk or fiona@epcc.ed.ac.uk * * * * * * This version copyright (c) The University of Edinburgh, 2015. * * * * * * Licensed under the Apache License, Version 2.0 (the "License"); * * you may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * * * http://www.apache.org/licenses/LICENSE-2.0 * * * * Unless required by applicable law or agreed to in writing, software * * distributed under the License is distributed on an "AS IS" BASIS, * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * * See the License for the specific language governing permissions and * * limitations under the License. * * * ****************************************************************************/ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> #include "common.h" #include "schedbench.h" int cksz, itersperthr = 8192; char testName[32]; int main(int argc, char **argv) { init(argc, argv); // Also print itersperthr printf("\t%d iterations per threads\n", itersperthr); /* GENERATE REFERENCE TIME */ reference("reference time", &refer); /* TEST STATIC */ benchmark("STATIC", &teststatic); /* TEST STATIC,n */ cksz = 1; while (cksz <= itersperthr) { sprintf(testName, "STATIC %d", cksz); benchmark(testName, &teststaticn); cksz *= 2; } /* TEST DYNAMIC,n */ cksz = 1; while (cksz <= itersperthr) { sprintf(testName, "DYNAMIC %d", cksz); benchmark(testName, &testdynamicn); cksz *= 2; } /* TEST GUIDED,n */ cksz = 1; while (cksz <= itersperthr / nthreads) { sprintf(testName, "GUIDED %d", cksz); benchmark(testName, &testguidedn); cksz *= 2; } finalise(); return EXIT_SUCCESS; } void refer() { int i, j; for (j = 0; j < innerreps; j++) { for (i = 0; i < itersperthr; i++) { delay(delaylength); } } } void teststatic() { int i, j; #pragma omp parallel private(j) { for (j = 0; j < innerreps; j++) { #pragma omp for schedule(static) for (i = 0; i < itersperthr * nthreads; i++) { delay(delaylength); } } } } void teststaticn() { int i, j; #pragma omp parallel private(j) { for (j = 0; j < innerreps; j++) { #pragma omp for schedule(static,cksz) for (i = 0; i < itersperthr * nthreads; i++) { delay(delaylength); } } } } void testdynamicn() { int i, j; #pragma omp parallel private(j) { for (j = 0; j < innerreps; j++) { #pragma omp for schedule(dynamic,cksz) for (i = 0; i < itersperthr * nthreads; i++) { delay(delaylength); } } } } void testguidedn() { int i, j; #pragma omp parallel private(j) { for (j = 0; j < innerreps; j++) { #pragma omp for schedule(guided,cksz) for (i = 0; i < itersperthr * nthreads; i++) { delay(delaylength); } } } }
GB_unaryop__minv_fp32_int32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_fp32_int32 // op(A') function: GB_tran__minv_fp32_int32 // C type: float // A type: int32_t // cast: float cij = (float) aij // unaryop: cij = (1.0F)/aij #define GB_ATYPE \ int32_t #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = (1.0F)/x ; // casting #define GB_CASTING(z, aij) \ float z = (float) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_FP32 || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_fp32_int32 ( float *Cx, // Cx and Ax may be aliased int32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_fp32_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop_transpose.c
//------------------------------------------------------------------------------ // GB_unop_transpose: C=op(cast(A')), transpose, typecast, and apply op //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // This method is parallel, but not highly scalable. It uses only naslice = // nnz(A)/(A->vlen) threads. Each thread requires O(vlen) workspace. { // Ax unused for some uses of this template #include "GB_unused.h" //-------------------------------------------------------------------------- // get A and C //-------------------------------------------------------------------------- const int64_t *GB_RESTRICT Ai = A->i ; #if defined ( GB_PHASE_2_OF_2 ) const GB_ATYPE *GB_RESTRICT Ax = (GB_ATYPE *) A->x ; int64_t *GB_RESTRICT Ci = C->i ; GB_CTYPE *GB_RESTRICT Cx = (GB_CTYPE *) C->x ; #endif //-------------------------------------------------------------------------- // C = op (cast (A')) //-------------------------------------------------------------------------- int taskid ; #pragma omp parallel for num_threads(naslice) schedule(static) for (taskid = 0 ; taskid < naslice ; taskid++) { // get the rowcount for this slice, of size A->vlen int64_t *GB_RESTRICT rowcount = Rowcounts [taskid] ; for (int64_t Iter_k = A_slice [taskid] ; Iter_k < A_slice [taskid+1] ; Iter_k++) { GBI_jth_iteration_with_iter (Iter, j, pA, pA_end) ; for ( ; pA < pA_end ; pA++) { #if defined ( GB_PHASE_1_OF_2) // count one more entry in C(i,:) for this slice rowcount [Ai [pA]]++ ; #else // insert the entry into C(i,:) for this slice int64_t pC = rowcount [Ai [pA]]++ ; Ci [pC] = j ; // Cx [pC] = op (Ax [pA]) GB_CAST_OP (pC, pA) ; #endif } } } }
edge_data_c2c.h
/* ============================================================================== KratosPFEMApplication A library based on: Kratos A General Purpose Software for Multi-Physics Finite Element Analysis Version 1.0 (Released on march 05, 2007). Copyright 2007 Pooyan Dadvand, Riccardo Rossi pooyan@cimne.upc.edu rrossi@cimne.upc.edu - CIMNE (International Center for Numerical Methods in Engineering), Gran Capita' s/n, 08034 Barcelona, Spain Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following condition: Distribution of this code for any commercial purpose is permissible ONLY BY DIRECT ARRANGEMENT WITH THE COPYRIGHT OWNERS. The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ============================================================================== */ // // Project Name: Kratos // Last Modified by: $Author: antonia $ // Date: $Date: 2009-01-14 08:26:51 $ // Revision: $Revision: 1.11 $ // // #if !defined(KRATOS_EDGE_DATA_C2C_H_INCLUDED ) #define KRATOS_EDGE_DATA_C2C_H_INCLUDED //we suggest defining the following macro #define USE_CONSERVATIVE_FORM_FOR_SCALAR_CONVECTION //we suggest defining the following macro*/*/ // #define USE_CONSERVATIVE_FORM_FOR_VECTOR_CONVECTION // System includes #include <string> #include <iostream> #include <algorithm> // External includes // Project includes #include "includes/define.h" #include "includes/model_part.h" #include "includes/node.h" //#include "geometries/geometry.h" #include "utilities/geometry_utilities.h" #include "free_surface_application.h" #include "utilities/openmp_utils.h" namespace Kratos { // template<unsigned int TDim> // class EdgeConstructionScratch // { // public: // array_1d<double, TDim+1> N; // boost::numeric::ublas::bounded_matrix <double, TDim+1,TDim> dN_dx; // double volume; // double weighting_factor = 1.0 / static_cast<double>(TDim+1); // boost::numeric::ublas::bounded_matrix <double, TDim+1,TDim+1> mass_consistent; // array_1d<double, TDim+1> mass_lumped; // array_1d<unsigned int, TDim+1> nodal_indices; // array_1d<double, TDim+1> heights; // // } //structure definition for fast access to edge data using CSR format template<unsigned int TDim> class EdgesStructureTypeC2C { public: //component ij of the consistent mass matrix (M = Ni * Nj * dOmega) double Mass; //components kl of the laplacian matrix of edge ij (L = dNi/dxk * dNj/dxl * dOmega) //double Laplacian; boost::numeric::ublas::bounded_matrix<double, TDim, TDim> LaplacianIJ; //components k of the gradient matrix of edge ij (G = Ni * dNj/dxl * dOmega) array_1d<double, TDim> Ni_DNj; //components k of the transposed gradient matrix of edge ij (GT = dNi/dxl * Nj * dOmega) //TRANSPOSED GRADIENT array_1d<double, TDim> DNi_Nj; //************************************************************************************* //************************************************************************************* //gradient integrated by parts //RHSi += DNi_Nj pj + Aboundary * pext ==> RHS += Ni_DNj p_j - DNi_Nj p_i //ATTENTION: + Aboundary * pext is NOT included!! it should be included "manually" inline void Add_Gp(array_1d<double, TDim>& destination, const double& p_i, const double& p_j) { for (unsigned int comp = 0; comp < TDim; comp++) destination[comp] -= Ni_DNj[comp] * p_j - DNi_Nj[comp] * p_i; } inline void Sub_Gp(array_1d<double, TDim>& destination, const double& p_i, const double& p_j) { for (unsigned int comp = 0; comp < TDim; comp++) destination[comp] += Ni_DNj[comp] * p_j - DNi_Nj[comp] * p_i; } //************************************************************************************* //************************************************************************************* //gradient //RHSi += Ni_DNj[k]*v[k] inline void Add_D_v(double& destination, const array_1d<double, TDim>& v_i, const array_1d<double, TDim>& v_j) { for (unsigned int comp = 0; comp < TDim; comp++) destination += Ni_DNj[comp] * (v_j[comp] - v_i[comp]); } inline void Sub_D_v(double& destination, const array_1d<double, TDim>& v_i, const array_1d<double, TDim>& v_j) { for (unsigned int comp = 0; comp < TDim; comp++) destination -= Ni_DNj[comp] * (v_j[comp] - v_i[comp]); } //************************************************************************************* //************************************************************************************* //gradient //RHSi += Ni_DNj pj inline void Add_grad_p(array_1d<double, TDim>& destination, const double& p_i, const double& p_j) { for (unsigned int comp = 0; comp < TDim; comp++) destination[comp] += Ni_DNj[comp] * (p_j - p_i); } inline void Sub_grad_p(array_1d<double, TDim>& destination, const double& p_i, const double& p_j) { for (unsigned int comp = 0; comp < TDim; comp++) destination[comp] -= Ni_DNj[comp] * (p_j - p_i); } //************************************************************************************* //************************************************************************************* //gradient //RHSi += DNi_Nj[k]*v[k] inline void Add_div_v(double& destination, const array_1d<double, TDim>& v_i, const array_1d<double, TDim>& v_j) { for (unsigned int comp = 0; comp < TDim; comp++) destination -= Ni_DNj[comp] * v_j[comp] - DNi_Nj[comp] * v_i[comp]; } inline void Sub_div_v(double& destination, const array_1d<double, TDim>& v_i, const array_1d<double, TDim>& v_j) { for (unsigned int comp = 0; comp < TDim; comp++) destination += Ni_DNj[comp] * v_j[comp] - DNi_Nj[comp] * v_i[comp]; } //************************************************************************************* //************************************************************************************* //gets the trace of the laplacian matrix inline void CalculateScalarLaplacian(double& l_ij) { l_ij = LaplacianIJ(0, 0); for (unsigned int comp = 1; comp < TDim; comp++) l_ij += LaplacianIJ(comp, comp); } inline void Add_ConvectiveContribution(array_1d<double, TDim>& destination, const array_1d<double, TDim>& a_i, const array_1d<double, TDim>& U_i, const array_1d<double, TDim>& a_j, const array_1d<double, TDim>& U_j) { // #ifdef USE_CONSERVATIVE_FORM_FOR_VECTOR_CONVECTION // double temp = a_i[0] * Ni_DNj[0]; // for (unsigned int k_comp = 1; k_comp < TDim; k_comp++) // temp += a_i[k_comp] * Ni_DNj[k_comp]; // for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) // destination[l_comp] += temp * (U_j[l_comp] - U_i[l_comp]); // #else // double aux_i = a_i[0] * Ni_DNj[0]; // double aux_j = a_j[0] * Ni_DNj[0]; // for (unsigned int k_comp = 1; k_comp < TDim; k_comp++) // { // aux_i += a_i[k_comp] * Ni_DNj[k_comp]; // aux_j += a_j[k_comp] * Ni_DNj[k_comp]; // } // for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) // destination[l_comp] += aux_j * U_j[l_comp] - aux_i * U_i[l_comp]; // #endif // for (unsigned int comp = 0; comp < TDim; comp++) // destination[comp] -= Ni_DNj[comp] * p_j - DNi_Nj[comp] * p_i; double second = a_i[0] * DNi_Nj[0]; double first = a_j[0] * Ni_DNj[0]; for (unsigned int k_comp = 1; k_comp < TDim; k_comp++) { second += a_i[k_comp] * DNi_Nj[k_comp]; first += a_j[k_comp] * Ni_DNj[k_comp]; } for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) destination[l_comp] += first * U_j[l_comp] - second * U_i[l_comp]; } inline void Sub_ConvectiveContribution(array_1d<double, TDim>& destination, const array_1d<double, TDim>& a_i, const array_1d<double, TDim>& U_i, const array_1d<double, TDim>& a_j, const array_1d<double, TDim>& U_j) { // #ifdef USE_CONSERVATIVE_FORM_FOR_VECTOR_CONVECTION // double temp = a_i[0] * Ni_DNj[0]; // for (unsigned int k_comp = 1; k_comp < TDim; k_comp++) // temp += a_i[k_comp] * Ni_DNj[k_comp]; // for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) // destination[l_comp] -= temp * (U_j[l_comp] - U_i[l_comp]); // #else // double aux_i = a_i[0] * Ni_DNj[0]; // double aux_j = a_j[0] * Ni_DNj[0]; // for (unsigned int k_comp = 1; k_comp < TDim; k_comp++) // { // aux_i += a_i[k_comp] * Ni_DNj[k_comp]; // aux_j += a_j[k_comp] * Ni_DNj[k_comp]; // } // for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) // destination[l_comp] -= aux_j * U_j[l_comp] - aux_i * U_i[l_comp]; // #endif double second = a_i[0] * DNi_Nj[0]; double first = a_j[0] * Ni_DNj[0]; for (unsigned int k_comp = 1; k_comp < TDim; k_comp++) { second += a_i[k_comp] * DNi_Nj[k_comp]; first += a_j[k_comp] * Ni_DNj[k_comp]; } for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) destination[l_comp] -= first * U_j[l_comp] - second * U_i[l_comp]; } inline void Sub_ConvectiveContribution(double& destination, const array_1d<double, TDim>& a_i, const double& phi_i, const array_1d<double, TDim>& a_j, const double& phi_j) { #ifdef USE_CONSERVATIVE_FORM_FOR_SCALAR_CONVECTION double temp = a_i[0] * Ni_DNj[0]; for (unsigned int k_comp = 1; k_comp < TDim; k_comp++) temp += a_i[k_comp] * Ni_DNj[k_comp]; destination -= temp * (phi_j - phi_i); #else double aux_i = a_i[0] * Ni_DNj[0]; double aux_j = a_j[0] * Ni_DNj[0]; for (unsigned int k_comp = 1; k_comp < TDim; k_comp++) { aux_i += a_i[k_comp] * Ni_DNj[k_comp]; aux_j += a_j[k_comp] * Ni_DNj[k_comp]; } destination -= aux_j * phi_j - aux_i * phi_i; #endif // double second = a_i[0] * DNi_Nj[0]; // double first = a_j[0] * Ni_DNj[0]; // for (unsigned int k_comp = 1; k_comp < TDim; k_comp++) // { // second += a_i[k_comp] * DNi_Nj[k_comp]; // first += a_j[k_comp] * Ni_DNj[k_comp]; // } // destination -= first * phi_j - second * phi_i; } inline void Add_ConvectiveContribution(double& destination, const array_1d<double, TDim>& a_i, const double& phi_i, const array_1d<double, TDim>& a_j, const double& phi_j) { #ifdef USE_CONSERVATIVE_FORM_FOR_SCALAR_CONVECTION double temp = a_i[0] * Ni_DNj[0]; for (unsigned int k_comp = 1; k_comp < TDim; k_comp++) temp += a_i[k_comp] * Ni_DNj[k_comp]; destination += temp * (phi_j - phi_i); #else double aux_i = a_i[0] * Ni_DNj[0]; double aux_j = a_j[0] * Ni_DNj[0]; for (unsigned int k_comp = 1; k_comp < TDim; k_comp++) { aux_i += a_i[k_comp] * Ni_DNj[k_comp]; aux_j += a_j[k_comp] * Ni_DNj[k_comp]; } destination += aux_j * phi_j - aux_i * phi_i; #endif // double second = a_i[0] * DNi_Nj[0]; // double first = a_j[0] * Ni_DNj[0]; // for (unsigned int k_comp = 1; k_comp < TDim; k_comp++) // { // second += a_i[k_comp] * DNi_Nj[k_comp]; // first += a_j[k_comp] * Ni_DNj[k_comp]; // } // destination += first * phi_j - second * phi_i; } //************************************************************************************* //************************************************************************************* inline void CalculateConvectionStabilization_LOW(array_1d<double, TDim>& stab_low, const array_1d<double, TDim>& a_i, const array_1d<double, TDim>& U_i, const array_1d<double, TDim>& a_j, const array_1d<double, TDim>& U_j) { double conv_stab = 0.0; for (unsigned int k_comp = 0; k_comp < TDim; k_comp++) for (unsigned int m_comp = 0; m_comp < TDim; m_comp++) conv_stab += a_i[k_comp] * a_i[m_comp] * LaplacianIJ(k_comp, m_comp); for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) stab_low[l_comp] = conv_stab * (U_j[l_comp] - U_i[l_comp]); // double temp = 0.0; // double lij = 0.0; // for (unsigned int k_comp = 0; k_comp < TDim; k_comp++) // { // lij += LaplacianIJ(k_comp,k_comp); // temp = a_i[k_comp] * a_i[k_comp]; // } // // for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) // stab_low[l_comp] = temp * lij * (U_j[l_comp] - U_i[l_comp]); } // inline void CalculateConvectionStabilization_LOW( array_1d<double,TDim>& stab_low, // const array_1d<double,TDim>& a_i, const array_1d<double,TDim>& U_i, const double& p_i, // const array_1d<double,TDim>& a_j, const array_1d<double,TDim>& U_j, const double& p_j // ) // { // double conv_stab = 0.0; // for (unsigned int k_comp = 0; k_comp < TDim; k_comp++) // for (unsigned int m_comp = 0; m_comp < TDim; m_comp++) // conv_stab += a_i[k_comp] * a_i[m_comp] * LaplacianIJ(k_comp,m_comp); // for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) // stab_low[l_comp] = conv_stab * (U_j[l_comp] - U_i[l_comp]); // //// adding pressure // double press_diff = p_j-p_i; // for (unsigned int j_comp = 0; j_comp < TDim; j_comp++) // { // for (unsigned int i_comp = 0; i_comp < TDim; i_comp++) // stab_low[j_comp] -= a_i[i_comp] * LaplacianIJ(i_comp,j_comp) * press_diff ; // } // // // } inline void CalculateConvectionStabilization_LOW(double& stab_low, const array_1d<double, TDim>& a_i, const double& phi_i, const array_1d<double, TDim>& a_j, const double& phi_j) { double conv_stab = 0.0; for (unsigned int k_comp = 0; k_comp < TDim; k_comp++) for (unsigned int m_comp = 0; m_comp < TDim; m_comp++) conv_stab += a_i[k_comp] * a_i[m_comp] * LaplacianIJ(k_comp, m_comp); stab_low = conv_stab * (phi_j - phi_i); } //************************************************************************************* //************************************************************************************* inline void CalculateConvectionStabilization_HIGH(array_1d<double, TDim>& stab_high, const array_1d<double, TDim>& a_i, const array_1d<double, TDim>& pi_i, const array_1d<double, TDim>& a_j, const array_1d<double, TDim>& pi_j) { #ifdef USE_CONSERVATIVE_FORM_FOR_VECTOR_CONVECTION double temp = 0.0; for (unsigned int k_comp = 0; k_comp < TDim; k_comp++) temp += a_i[k_comp] * Ni_DNj[k_comp]; for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) stab_high[l_comp] = -temp * (pi_j[l_comp] - pi_i[l_comp]); //check if the minus sign is correct // double temp_i = 0.0; // double temp_j = 0.0; // for (unsigned int k_comp = 0; k_comp < TDim; k_comp++) // { // temp_j += a_i[k_comp] * Ni_DNj[k_comp]; // temp_i += a_i[k_comp] * DNi_Nj[k_comp]; // } // for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) // stab_high[l_comp] = +(temp_j*pi_j[l_comp] - temp_i*pi_i[l_comp]); //check if the minus sign is correct // double temp_i = 0.0; // double temp_j = 0.0; // for (unsigned int k_comp = 0; k_comp < TDim; k_comp++) // { // temp_i += a_i[k_comp] * Ni_DNj[k_comp]; // temp_j += a_i[k_comp] * DNi_Nj[k_comp]; // } // for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) // stab_high[l_comp] = (temp_j*pi_j[l_comp] + temp_i*pi_i[l_comp]); //check if the minus sign is correct #else double aux_i = a_i[0] * Ni_DNj[0]; double aux_j = a_j[0] * Ni_DNj[0]; for (unsigned int k_comp = 1; k_comp < TDim; k_comp++) { aux_i += a_i[k_comp] * Ni_DNj[k_comp]; aux_j += a_j[k_comp] * Ni_DNj[k_comp]; } for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) stab_high[l_comp] = -(aux_j * pi_j[l_comp] - aux_i * pi_i[l_comp]); #endif } inline void CalculateConvectionStabilization_HIGH(double& stab_high, const array_1d<double, TDim>& a_i, const double& pi_i, const array_1d<double, TDim>& a_j, const double& pi_j) { #ifdef USE_CONSERVATIVE_FORM_FOR_SCALAR_CONVECTION double temp = 0.0; for (unsigned int k_comp = 0; k_comp < TDim; k_comp++) temp += a_i[k_comp] * Ni_DNj[k_comp]; stab_high = -temp * (pi_j - pi_i); //check if the minus sign is correct #else double aux_i = a_i[0] * Ni_DNj[0]; double aux_j = a_j[0] * Ni_DNj[0]; for (unsigned int k_comp = 1; k_comp < TDim; k_comp++) { aux_i += a_i[k_comp] * Ni_DNj[k_comp]; aux_j += a_j[k_comp] * Ni_DNj[k_comp]; } stab_high = -(aux_j * pi_j - aux_i * pi_i); #endif } //************************************************************************************* //************************************************************************************* inline void Add_StabContribution(array_1d<double, TDim>& destination, const double tau, const double beta, const array_1d<double, TDim>& stab_low, const array_1d<double, TDim>& stab_high) { for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) destination[l_comp] += tau * (stab_low[l_comp] - beta * stab_high[l_comp]); } inline void Add_StabContribution(double& destination, const double tau, const double beta, const double& stab_low, const double& stab_high) { destination += tau * (stab_low - beta * stab_high); } inline void Sub_StabContribution(array_1d<double, TDim>& destination, const double tau, const double beta, const array_1d<double, TDim>& stab_low, const array_1d<double, TDim>& stab_high) { for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) destination[l_comp] -= tau * (stab_low[l_comp] - beta * stab_high[l_comp]); } inline void Sub_StabContribution(double& destination, const double tau, const double beta, const double& stab_low, const double& stab_high) { destination -= tau * (stab_low - beta * stab_high); } //************************************************************************************* //************************************************************************************* inline void Add_ViscousContribution(array_1d<double, TDim>& destination, const array_1d<double, TDim>& U_i, const double& nu_i, const array_1d<double, TDim>& U_j, const double& nu_j) { //calculate scalar laplacian double L = 0.0; for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) L += LaplacianIJ(l_comp, l_comp); //double nu_avg = 0.5*(nu_i+nu_j); for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) destination[l_comp] += nu_i * L * (U_j[l_comp] - U_i[l_comp]); } inline void Sub_ViscousContribution(array_1d<double, TDim>& destination, const array_1d<double, TDim>& U_i, const double& nu_i, const array_1d<double, TDim>& U_j, const double& nu_j) { //calculate scalar laplacian double L = 0.0; for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) L += LaplacianIJ(l_comp, l_comp); //double nu_avg = 0.5*(nu_i+nu_j); for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) destination[l_comp] -= nu_i * L * (U_j[l_comp] - U_i[l_comp]); } }; //class definition of matrices using CSR format template<unsigned int TDim, class TSparseSpace> class MatrixContainerC2C { public: //name for the self defined structure typedef EdgesStructureTypeC2C<TDim> CSR_Tuple; typedef vector<CSR_Tuple> EdgesVectorType; //name for row start and column index vectors typedef vector<unsigned int> IndicesVectorType; //names for separately stored node based values typedef vector<double> ValuesVectorType; // typedef std::vector< array_1d<double,TDim> > CalcVectorType; typedef vector< array_1d<double, TDim> > CalcVectorType; //constructor and destructor MatrixContainerC2C() { }; ~MatrixContainerC2C() { }; //functions to return private values inline unsigned int GetNumberEdges() { return mNumberEdges; } inline EdgesVectorType& GetEdgeValues() { return mNonzeroEdgeValues; } inline IndicesVectorType& GetColumnIndex() { return mColumnIndex; } inline IndicesVectorType& GetRowStartIndex() { return mRowStartIndex; } inline ValuesVectorType& GetLumpedMass() { return mLumpedMassMatrix; } inline ValuesVectorType& GetInvertedMass() { return mInvertedMassMatrix; } inline CalcVectorType& GetDiagGradient() { return mDiagGradientMatrix; } inline ValuesVectorType& GetHmin() { return mHmin; } //******************************************************** //function to size and initialize the vector of CSR tuples void ConstructCSRVector(ModelPart& model_part) { KRATOS_TRY //SIZE OF CSR VECTOR //defining the number of nodes and edges int n_nodes = model_part.Nodes().size(); //remark: no colouring algorithm is used here (symmetry is neglected) // respectively edge ij is considered different from edge ji mNumberEdges = 0; //counter to assign and get global nodal index int i_node = 0; //counting the edges connecting the nodes for (typename ModelPart::NodesContainerType::iterator node_it = model_part.NodesBegin(); node_it != model_part.NodesEnd(); node_it++) { //counting neighbours of each node mNumberEdges += (node_it->GetValue(NEIGHBOUR_NODES)).size(); //DIAGONAL TERMS //mNumberEdges++; //assigning global index to each node node_it->FastGetSolutionStepValue(AUX_INDEX) = static_cast<double> (i_node++); } //error message in case number of nodes does not coincide with number of indices if (i_node != n_nodes) KRATOS_WATCH("ERROR - Highest nodal index doesn't coincide with number of nodes!"); //allocating memory for block of CSR data - setting to zero for first-touch OpenMP allocation mNonzeroEdgeValues.resize(mNumberEdges); //SetToZero(mNonzeroEdgeValues); mColumnIndex.resize(mNumberEdges); //SetToZero(mColumnIndex); mRowStartIndex.resize(n_nodes + 1); //SetToZero(mRowStartIndex); mLumpedMassMatrix.resize(n_nodes); SetToZero(mLumpedMassMatrix); mInvertedMassMatrix.resize(n_nodes); SetToZero(mInvertedMassMatrix); mDiagGradientMatrix.resize(n_nodes); SetToZero(mDiagGradientMatrix); mHmin.resize(n_nodes); SetToZero(mHmin); //INITIALIZING OF THE CSR VECTOR //temporary variable as the row start index of a node depends on the number of neighbours of the previous one unsigned int row_start_temp = 0; int number_of_threads = OpenMPUtils::GetNumThreads(); std::vector<int> row_partition(number_of_threads); OpenMPUtils::DivideInPartitions(model_part.Nodes().size(), number_of_threads, row_partition); for (int k = 0; k < number_of_threads; k++) { #pragma omp parallel if (OpenMPUtils::ThisThread() == k) { for (unsigned int aux_i = static_cast<unsigned int> (row_partition[k]); aux_i < static_cast<unsigned int> (row_partition[k + 1]); aux_i++) { typename ModelPart::NodesContainerType::iterator node_it = model_part.NodesBegin() + aux_i; //main loop over all nodes // for (typename ModelPart::NodesContainerType::iterator node_it=model_part.NodesBegin(); node_it!=model_part.NodesEnd(); node_it++) // { //getting the global index of the node i_node = static_cast<unsigned int> (node_it->FastGetSolutionStepValue(AUX_INDEX)); //determining its neighbours WeakPointerVector< Node < 3 > >& neighb_nodes = node_it->GetValue(NEIGHBOUR_NODES); //number of neighbours of node i determines row start index for the following node unsigned int n_neighbours = neighb_nodes.size(); //DIAGONAL TERMS //n_neighbours++; //reserving memory for work array std::vector<unsigned int> work_array; work_array.reserve(n_neighbours); //DIAGONAL TERMS //work_array.push_back(i_node); //nested loop over the neighbouring nodes for (WeakPointerVector< Node < 3 > >::iterator neighb_it = neighb_nodes.begin(); neighb_it != neighb_nodes.end(); neighb_it++) { //getting global index of the neighbouring node work_array.push_back(static_cast<unsigned int> (neighb_it->FastGetSolutionStepValue(AUX_INDEX))); } //reordering neighbours following their global indices std::sort(work_array.begin(), work_array.end()); //setting current row start index mRowStartIndex[i_node] = row_start_temp; //nested loop over the by now ordered neighbours for (unsigned int counter = 0; counter < n_neighbours; counter++) { //getting global index of the neighbouring node unsigned int j_neighbour = work_array[counter]; //calculating CSR index unsigned int csr_index = mRowStartIndex[i_node] + counter; //saving column index j of the original matrix mColumnIndex[csr_index] = j_neighbour; //initializing the CSR vector entries with zero mNonzeroEdgeValues[csr_index].Mass = 0.0; //mNonzeroEdgeValues[csr_index].Laplacian = 0.0; noalias(mNonzeroEdgeValues[csr_index].LaplacianIJ) = ZeroMatrix(TDim, TDim); noalias(mNonzeroEdgeValues[csr_index].Ni_DNj) = ZeroVector(TDim); //TRANSPOSED GRADIENT noalias(mNonzeroEdgeValues[csr_index].DNi_Nj) = ZeroVector(TDim); } //preparing row start index for next node row_start_temp += n_neighbours; } } } //adding last entry (necessary for abort criterion of loops) mRowStartIndex[n_nodes] = mNumberEdges; //INITIALIZING NODE BASED VALUES //lumped mass matrix (elements Mi) /* #pragma omp parallel for for (int i_node=0; i_node<n_nodes; i_node++) mLumpedMassMatrix[i_node] = 0.0;*/ #pragma omp parallel for //set the heights to a huge number for (int i_node = 0; i_node < n_nodes; i_node++) mHmin[i_node] = 1e10; //diagonal of gradient matrix (elements Gii) // #pragma omp parallel for // for (int i_node=0; i_node<n_nodes; i_node++) // noalias(mDiagGradientMatrix[i_node]) = ZeroVector(TDim); KRATOS_CATCH("") } //********************************* //function to precalculate CSR data void BuildCSRData(ModelPart& model_part) { KRATOS_TRY //PRECALCULATING CSR DATA //defining temporary local variables for elementwise addition //shape functions array_1d<double, TDim + 1 > N; //shape function derivatives boost::numeric::ublas::bounded_matrix <double, TDim + 1, TDim> dN_dx; //volume double volume; //weighting factor double weighting_factor = 1.0 / static_cast<double> (TDim + 1); //elemental matrices boost::numeric::ublas::bounded_matrix <double, TDim + 1, TDim + 1 > mass_consistent; //boost::numeric::ublas::bounded_matrix <double, TDim+1,TDim+1> laplacian; array_1d<double, TDim + 1 > mass_lumped; //global indices of elemental nodes array_1d<unsigned int, TDim + 1 > nodal_indices; array_1d<double, TDim + 1 > heights; //loop over all elements for (typename ModelPart::ElementsContainerType::iterator elem_it = model_part.ElementsBegin(); elem_it != model_part.ElementsEnd(); elem_it++) { //LOCAL ELEMENTWISE CALCULATIONS //getting geometry data of the element GeometryUtils::CalculateGeometryData(elem_it->GetGeometry(), dN_dx, N, volume); //calculate lenght of the heights of the element for (unsigned int ie_node = 0; ie_node <= TDim; ie_node++) { heights[ie_node] = dN_dx(ie_node, 0) * dN_dx(ie_node, 0); for (unsigned int comp = 1; comp < TDim; comp++) { heights[ie_node] += dN_dx(ie_node, comp) * dN_dx(ie_node, comp); } heights[ie_node] = 1.0 / sqrt(heights[ie_node]); // KRATOS_WATCH(heights); } //setting up elemental mass matrices CalculateMassMatrix(mass_consistent, volume); noalias(mass_lumped) = ZeroVector(TDim + 1); for (unsigned int ie_node = 0; ie_node <= TDim; ie_node++) { for (unsigned int je_node = 0; je_node <= TDim; je_node++) { //mass_consistent(ie_node,je_node) = N(ie_node) * N(je_node) * volume; mass_lumped[ie_node] += mass_consistent(ie_node, je_node); } //mass_lumped[ie_node] = volume * N[ie_node]; } /*OLD DATA STRUCTURE //calculating elemental laplacian matrix noalias(laplacian) = ZeroMatrix(TDim+1,TDim+1); for (unsigned int ie_node=0; ie_node<=TDim; ie_node++) for (unsigned int je_node=ie_node+1; je_node<=TDim; je_node++) //componentwise multiplication for (unsigned int component=0; component<TDim; component++) { //taking advantage of symmetry double temp = dN_dx(ie_node,component) * dN_dx(je_node,component) * volume; laplacian(ie_node,je_node) += temp; laplacian(je_node,ie_node) += temp; } //multiply gradient with volume referring to each gauss point dN_dx *= (volume / double(TDim+1));*/ //(corresponding to Ni * dOmega respectively Nj * dOmega) double weighted_volume = volume * weighting_factor; //ASSEMBLING GLOBAL DATA STRUCTURE //loop over the nodes of the element to determine their global indices for (unsigned int ie_node = 0; ie_node <= TDim; ie_node++) nodal_indices[ie_node] = static_cast<unsigned int> (elem_it->GetGeometry()[ie_node].FastGetSolutionStepValue(AUX_INDEX)); //assembling global "edge matrices" by adding local contributions for (unsigned int ie_node = 0; ie_node <= TDim; ie_node++) { //check the heights and change the value if minimal is found if (mHmin[ nodal_indices[ie_node] ] > heights[ie_node]) mHmin[ nodal_indices[ie_node] ] = heights[ie_node]; for (unsigned int je_node = 0; je_node <= TDim; je_node++) { //remark: there is no edge linking node i with itself! //DIAGONAL TERMS if (ie_node != je_node) { //calculating CSR index from global index unsigned int csr_index = GetCSRIndex(nodal_indices[ie_node], nodal_indices[je_node]); //assigning precalculated element data to the referring edges //contribution to edge mass mNonzeroEdgeValues[csr_index].Mass += mass_consistent(ie_node, je_node); //contribution to edge laplacian /*OLD DATA STRUCTURE mNonzeroEdgeValues[csr_index].Laplacian = laplacian(ie_node,je_node);*/ boost::numeric::ublas::bounded_matrix <double, TDim, TDim>& laplacian = mNonzeroEdgeValues[csr_index].LaplacianIJ; for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) for (unsigned int k_comp = 0; k_comp < TDim; k_comp++) laplacian(l_comp, k_comp) += dN_dx(ie_node, l_comp) * dN_dx(je_node, k_comp) * volume; //contribution to edge gradient array_1d<double, TDim>& gradient = mNonzeroEdgeValues[csr_index].Ni_DNj; for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) //gradient[l_comp] += dN_dx(je_node,l_comp); gradient[l_comp] += dN_dx(je_node, l_comp) * weighted_volume; //TRANSPOSED GRADIENT //contribution to transposed edge gradient array_1d<double, TDim>& transp_gradient = mNonzeroEdgeValues[csr_index].DNi_Nj; for (unsigned int l_comp = 0; l_comp < TDim; l_comp++) //transp_gradient[l_comp] += dN_dx(ie_node,l_comp); transp_gradient[l_comp] += dN_dx(ie_node, l_comp) * weighted_volume; } } } //assembling node based vectors for (unsigned int ie_node = 0; ie_node <= TDim; ie_node++) //diagonal of the global lumped mass matrix mLumpedMassMatrix[nodal_indices[ie_node]] += mass_lumped[ie_node]; for (unsigned int ie_node = 0; ie_node <= TDim; ie_node++) { //diagonal of the global gradient matrix array_1d<double, TDim>& gradient = mDiagGradientMatrix[nodal_indices[ie_node]]; for (unsigned int component = 0; component < TDim; component++) //gradient[component] += dN_dx(ie_node,component); gradient[component] += dN_dx(ie_node, component) * weighted_volume; } } //copy mass matrix to inverted mass matrix for (unsigned int inode = 0; inode < mLumpedMassMatrix.size(); inode++) { mInvertedMassMatrix[inode] = mLumpedMassMatrix[inode]; } //perform MPI syncronization between the domains //calculating inverted mass matrix (this requires syncronization for MPI paraellelism for (unsigned int inode = 0; inode < mInvertedMassMatrix.size(); inode++) { mInvertedMassMatrix[inode] = 1.0 / mInvertedMassMatrix[inode]; } KRATOS_CATCH("") } //****************************************** //function to calculate CSR index of edge ij unsigned int GetCSRIndex(unsigned int NodeI, unsigned int NeighbourJ) { KRATOS_TRY //index indicating data position of edge ij unsigned int csr_index; //searching for coincidence of stored column index and neighbour index j for (csr_index = mRowStartIndex[NodeI]; csr_index != mRowStartIndex[NodeI + 1]; csr_index++) if (mColumnIndex[csr_index] == NeighbourJ) break; //returning CSR index of edge ij return csr_index; KRATOS_CATCH("") } //*********************************************** //function to get pointer to CSR tuple of edge ij CSR_Tuple* GetTuplePointer(unsigned int NodeI, unsigned int NeighbourJ) { KRATOS_TRY //index indicating data position of edge ij unsigned int csr_index; //searching for coincidence of stored column index and neighbour index j for (csr_index = mRowStartIndex[NodeI]; csr_index != mRowStartIndex[NodeI + 1]; csr_index++) if (mColumnIndex[csr_index] == NeighbourJ) break; //returning pointer to CSR tuple of edge ij return &mNonzeroEdgeValues[csr_index]; KRATOS_CATCH("") } //******************************* //function to free dynamic memory void Clear() { KRATOS_TRY mNonzeroEdgeValues.clear(); mColumnIndex.clear(); mRowStartIndex.clear(); mInvertedMassMatrix.clear(); mLumpedMassMatrix.clear(); mDiagGradientMatrix.clear(); mHmin.clear(); KRATOS_CATCH("") } //**************************** //functions to access database //(note that this is already thought for parallel; // for a single processor this could be done in a faster way) void FillCoordinatesFromDatabase(CalcVectorType& rDestination, ModelPart::NodesContainerType& rNodes) { KRATOS_TRY //loop over all nodes int n_nodes = rNodes.size(); ModelPart::NodesContainerType::iterator it_begin = rNodes.begin(); #pragma omp parallel for firstprivate(n_nodes, it_begin) for (int i = 0; i < n_nodes; i++) { ModelPart::NodesContainerType::iterator node_it = it_begin + i; //get the global index of node i // // unsigned int i_node = static_cast<unsigned int>(node_it->FastGetSolutionStepValue(AUX_INDEX)); unsigned int i_node = i; //save value in the destination vector for (unsigned int component = 0; component < TDim; component++) (rDestination[i_node])[component] = (*node_it)[component]; } KRATOS_CATCH(""); } //**************************** //functions to access database //(note that this is already thought for parallel; // for a single processor this could be done in a faster way) void FillVectorFromDatabase(Variable<array_1d<double, 3 > >& rVariable, CalcVectorType& rDestination, ModelPart::NodesContainerType& rNodes) { KRATOS_TRY //loop over all nodes int n_nodes = rNodes.size(); ModelPart::NodesContainerType::iterator it_begin = rNodes.begin(); unsigned int var_pos = it_begin->pGetVariablesList()->Index(rVariable); #pragma omp parallel for firstprivate(n_nodes, it_begin,var_pos) for (int i = 0; i < n_nodes; i++) { ModelPart::NodesContainerType::iterator node_it = it_begin + i; //get the global index of node i // // unsigned int i_node = static_cast<unsigned int>(node_it->FastGetSolutionStepValue(AUX_INDEX)); unsigned int i_node = i; //get the requested value in vector form array_1d<double, 3 > & vector = node_it-> FastGetCurrentSolutionStepValue(rVariable, var_pos); //save value in the destination vector for (unsigned int component = 0; component < TDim; component++) (rDestination[i_node])[component] = vector[component]; } KRATOS_CATCH(""); } void FillOldVectorFromDatabase(Variable<array_1d<double, 3 > >& rVariable, CalcVectorType& rDestination, ModelPart::NodesContainerType& rNodes) { KRATOS_TRY //loop over all nodes int n_nodes = rNodes.size(); ModelPart::NodesContainerType::iterator it_begin = rNodes.begin(); unsigned int var_pos = it_begin->pGetVariablesList()->Index(rVariable); #pragma omp parallel for firstprivate(n_nodes, it_begin,var_pos) for (int i = 0; i < n_nodes; i++) { ModelPart::NodesContainerType::iterator node_it = it_begin + i; //get the global index of node i // // unsigned int i_node = static_cast<unsigned int>(node_it->FastGetSolutionStepValue(AUX_INDEX)); unsigned int i_node = i; //get the requested value in vector form array_1d<double, 3 > & vector = node_it->FastGetSolutionStepValue(rVariable, 1, var_pos); //save value in the destination vector for (unsigned int component = 0; component < TDim; component++) (rDestination[i_node])[component] = vector[component]; } KRATOS_CATCH(""); } void FillScalarFromDatabase(Variable<double>& rVariable, ValuesVectorType& rDestination, ModelPart::NodesContainerType& rNodes) { KRATOS_TRY //loop over all nodes int n_nodes = rNodes.size(); ModelPart::NodesContainerType::iterator it_begin = rNodes.begin(); unsigned int var_pos = it_begin->pGetVariablesList()->Index(rVariable); #pragma omp parallel for firstprivate(n_nodes, it_begin,var_pos) for (int i = 0; i < n_nodes; i++) { ModelPart::NodesContainerType::iterator node_it = it_begin + i; //get the global index of node i // // unsigned int i_node = static_cast<unsigned int>(node_it->FastGetSolutionStepValue(AUX_INDEX)); unsigned int i_node = i; //get the requested scalar value double& scalar = node_it-> FastGetCurrentSolutionStepValue(rVariable, var_pos); //save value in the destination vector rDestination[i_node] = scalar; } KRATOS_CATCH(""); } void FillOldScalarFromDatabase(Variable<double>& rVariable, ValuesVectorType& rDestination, ModelPart::NodesContainerType& rNodes) { KRATOS_TRY int n_nodes = rNodes.size(); ModelPart::NodesContainerType::iterator it_begin = rNodes.begin(); unsigned int var_pos = it_begin->pGetVariablesList()->Index(rVariable); #pragma omp parallel for firstprivate(n_nodes, it_begin,var_pos) for (int i = 0; i < n_nodes; i++) { ModelPart::NodesContainerType::iterator node_it = it_begin + i; //get the global index of node i // // unsigned int i_node = static_cast<unsigned int>(node_it->FastGetSolutionStepValue(AUX_INDEX)); unsigned int i_node = i; //get the requested scalar value double& scalar = node_it->FastGetSolutionStepValue(rVariable, 1, var_pos); //save value in the destination vector rDestination[i_node] = scalar; } KRATOS_CATCH(""); } void WriteVectorToDatabase(Variable<array_1d<double, 3 > >& rVariable, CalcVectorType& rOrigin, ModelPart::NodesContainerType& rNodes) { KRATOS_TRY //loop over all nodes int n_nodes = rNodes.size(); ModelPart::NodesContainerType::iterator it_begin = rNodes.begin(); unsigned int var_pos = it_begin->pGetVariablesList()->Index(rVariable); #pragma omp parallel for firstprivate(n_nodes, it_begin,var_pos) for (int i = 0; i < n_nodes; i++) { ModelPart::NodesContainerType::iterator node_it = it_begin + i; //get the global index of node i // // unsigned int i_node = static_cast<unsigned int>(node_it->FastGetSolutionStepValue(AUX_INDEX)); unsigned int i_node = i; //get reference of destination array_1d<double, 3 > & vector = node_it->FastGetCurrentSolutionStepValue(rVariable, var_pos); //save vector in database for (unsigned int component = 0; component < TDim; component++) vector[component] = (rOrigin[i_node])[component]; } KRATOS_CATCH(""); } void WriteScalarToDatabase(Variable<double>& rVariable, ValuesVectorType& rOrigin, ModelPart::NodesContainerType& rNodes) { KRATOS_TRY //loop over all nodes int n_nodes = rNodes.size(); ModelPart::NodesContainerType::iterator it_begin = rNodes.begin(); unsigned int var_pos = it_begin->pGetVariablesList()->Index(rVariable); #pragma omp parallel for firstprivate(n_nodes, it_begin,var_pos) for (int i = 0; i < n_nodes; i++) { ModelPart::NodesContainerType::iterator node_it = it_begin + i; //get the global index of node i // // unsigned int i_node = static_cast<unsigned int>(node_it->FastGetSolutionStepValue(AUX_INDEX)); int i_node = i; //get reference of destination double& scalar = node_it-> FastGetCurrentSolutionStepValue(rVariable, var_pos); //save scalar in database scalar = rOrigin[i_node]; } KRATOS_CATCH(""); } //********************************************************************* //destination = origin1 + value * Minv*origin void Add_Minv_value( CalcVectorType& destination, const CalcVectorType& origin1, const double value, const ValuesVectorType& Minv_vec, const CalcVectorType& origin ) { KRATOS_TRY int loop_size = destination.size(); #pragma omp parallel for for (int i_node = 0; i_node < loop_size; i_node++) { array_1d<double, TDim>& dest = destination[i_node]; const double m_inv = Minv_vec[i_node]; const array_1d<double, TDim>& origin_vec1 = origin1[i_node]; const array_1d<double, TDim>& origin_value = origin[i_node]; double temp = value * m_inv; for (unsigned int comp = 0; comp < TDim; comp++) dest[comp] = origin_vec1[comp] + temp * origin_value[comp]; } KRATOS_CATCH("") } void Add_Minv_value( ValuesVectorType& destination, const ValuesVectorType& origin1, const double value, const ValuesVectorType& Minv_vec, const ValuesVectorType& origin ) { KRATOS_TRY int loop_size = destination.size(); #pragma omp parallel for for (int i_node = 0; i_node < loop_size; i_node++) { double& dest = destination[i_node]; const double m_inv = Minv_vec[i_node]; const double& origin_vec1 = origin1[i_node]; const double& origin_value = origin[i_node]; double temp = value * m_inv; dest = origin_vec1 + temp * origin_value; } KRATOS_CATCH("") } //********************************************************************** void AllocateAndSetToZero(CalcVectorType& data_vector, int size) { data_vector.resize(size); int loop_size = size; #pragma omp parallel for for (int i_node = 0; i_node < loop_size; i_node++) { array_1d<double, TDim>& aaa = data_vector[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) aaa[comp] = 0.0; } } void AllocateAndSetToZero(ValuesVectorType& data_vector, int size) { data_vector.resize(size); int loop_size = size; #pragma omp parallel for for (int i_node = 0; i_node < loop_size; i_node++) { data_vector[i_node] = 0.0; ; } } //********************************************************************** void SetToZero(CalcVectorType& data_vector) { int loop_size = data_vector.size(); #pragma omp parallel for for (int i_node = 0; i_node < loop_size; i_node++) { array_1d<double, TDim>& aaa = data_vector[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) aaa[comp] = 0.0; } } void SetToZero(ValuesVectorType& data_vector) { int loop_size = data_vector.size(); #pragma omp parallel for for (int i_node = 0; i_node < loop_size; i_node++) { data_vector[i_node] = 0.0; ; } } //********************************************************************** void AssignVectorToVector(const CalcVectorType& origin, CalcVectorType& destination ) { int loop_size = origin.size(); #pragma omp parallel for for (int i_node = 0; i_node < loop_size; i_node++) { const array_1d<double, TDim>& orig = origin[i_node]; array_1d<double, TDim>& dest = destination[i_node]; for (unsigned int comp = 0; comp < TDim; comp++) dest[comp] = orig[comp]; } } void AssignVectorToVector(const ValuesVectorType& origin, ValuesVectorType& destination ) { int loop_size = origin.size(); #pragma omp parallel for for (int i_node = 0; i_node < loop_size; i_node++) { destination[i_node] = origin[i_node]; } } private: //number of edges unsigned int mNumberEdges; //CSR data vector for storage of the G, L and consistent M components of edge ij EdgesVectorType mNonzeroEdgeValues; //vector to store column indices of nonzero matrix elements for each row IndicesVectorType mColumnIndex; //index vector to access the start of matrix row i in the column vector IndicesVectorType mRowStartIndex; //inverse of the mass matrix ... for parallel calculation each subdomain should contain this correctly calculated (including contributions of the neighbours) ValuesVectorType mInvertedMassMatrix; //minimum height around one node ValuesVectorType mHmin; //lumped mass matrix (separately stored due to lack of diagonal elements of the consistent mass matrix) ValuesVectorType mLumpedMassMatrix; //diagonal of the gradient matrix (separately stored due to special calculations) CalcVectorType mDiagGradientMatrix; //******************************************* //functions to set up elemental mass matrices void CalculateMassMatrix(boost::numeric::ublas::bounded_matrix<double, 3, 3 > & mass_consistent, double volume) { for (unsigned int i_node = 0; i_node <= TDim; i_node++) { //diagonal terms mass_consistent(i_node, i_node) = 0.16666666666666666667 * volume; //1/6 //non-diagonal terms double temp = 0.08333333333333333333 * volume; // 1/12 for (unsigned int j_neighbour = i_node + 1; j_neighbour <= TDim; j_neighbour++) { //taking advantage of symmetry mass_consistent(i_node, j_neighbour) = temp; mass_consistent(j_neighbour, i_node) = temp; } } } void CalculateMassMatrix(boost::numeric::ublas::bounded_matrix<double, 4, 4 > & mass_consistent, double volume) { for (unsigned int i_node = 0; i_node <= TDim; i_node++) { //diagonal terms mass_consistent(i_node, i_node) = 0.1 * volume; //non-diagonal terms double temp = 0.05 * volume; for (unsigned int j_neighbour = i_node + 1; j_neighbour <= TDim; j_neighbour++) { //taking advantage of symmetry mass_consistent(i_node, j_neighbour) = temp; mass_consistent(j_neighbour, i_node) = temp; } } } }; } //namespace Kratos #endif //KRATOS_EDGE_DATA_C2C_H_INCLUDED defined
convolution_3x3.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // Copyright (C) 2019 BUG1989. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p=0; p<outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); for (int q=0; q<inch; q++) { float* outptr = out; float* outptr2 = outptr + outw; const float* img0 = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch*9 + q*9; const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w*2; const float* r3 = img0 + w*3; const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; int i = 0; for (; i+1 < outh; i+=2) { int remain = outw; for (; remain>0; remain--) { float sum = 0; float sum2 = 0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; sum2 += r1[0] * k0[0]; sum2 += r1[1] * k0[1]; sum2 += r1[2] * k0[2]; sum2 += r2[0] * k1[0]; sum2 += r2[1] * k1[1]; sum2 += r2[2] * k1[2]; sum2 += r3[0] * k2[0]; sum2 += r3[1] * k2[1]; sum2 += r3[2] * k2[2]; *outptr += sum; *outptr2 += sum2; r0++; r1++; r2++; r3++; outptr++; outptr2++; } r0 += 2 + w; r1 += 2 + w; r2 += 2 + w; r3 += 2 + w; outptr += outw; outptr2 += outw; } for (; i < outh; i++) { int remain = outw; for (; remain>0; remain--) { float sum = 0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; *outptr += sum; r0++; r1++; r2++; outptr++; } r0 += 2; r1 += 2; r2 += 2; } } } } static void conv3x3s1_winograd23_transform_kernel_sse(const Mat& kernel, Mat& kernel_tm, int inch, int outch) { kernel_tm.create(4*4, inch, outch); // G const float ktm[4][3] = { { 1.0f, 0.0f, 0.0f}, { 1.0f/2, 1.0f/2, 1.0f/2}, { 1.0f/2, -1.0f/2, 1.0f/2}, { 0.0f, 0.0f, 1.0f} }; #pragma omp parallel for for (int p = 0; p<outch; p++) { for (int q = 0; q<inch; q++) { const float* kernel0 = (const float*)kernel + p*inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[4][3]; for (int i=0; i<4; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j=0; j<4; j++) { float* tmpp = &tmp[j][0]; for (int i=0; i<4; i++) { kernel_tm0[j*4 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } } static void conv3x3s1_winograd23_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 2n+2, winograd F(2,3) Mat bottom_blob_bordered = bottom_blob; outw = (outw + 1) / 2 * 2; outh = (outh + 1) / 2 * 2; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt.workspace_allocator, opt.num_threads); const float* bias = _bias; // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 2 * 4; int h_tm = outh / 2 * 4; int nColBlocks = h_tm/4; // may be the block num in Feathercnn int nRowBlocks = w_tm/4; const int tiles = nColBlocks * nRowBlocks; bottom_blob_tm.create(4*4, tiles, inch, 4u, opt.workspace_allocator); // BT // const float itm[4][4] = { // {1.0f, 0.0f, -1.0f, 0.0f}, // {0.0f, 1.0f, 1.00f, 0.0f}, // {0.0f, -1.0f, 1.00f, 0.0f}, // {0.0f, -1.0f, 0.00f, 1.0f} // }; #pragma omp parallel for num_threads(opt.num_threads) for (int q=0; q<inch; q++) { const float* img = bottom_blob_bordered.channel(q); float* out_tm0 = bottom_blob_tm.channel(q); for (int j = 0; j < nColBlocks; j++) { const float* r0 = img + w * j * 2; const float* r1 = r0 + w; const float* r2 = r1 + w; const float* r3 = r2 + w; for (int i = 0; i < nRowBlocks; i++) { #if __AVX__ __m128 _d0, _d1, _d2, _d3; __m128 _w0, _w1, _w2, _w3; // load _d0 = _mm_loadu_ps(r0); _d1 = _mm_loadu_ps(r1); _d2 = _mm_loadu_ps(r2); _d3 = _mm_loadu_ps(r3); // w = B_t * d _w0 = _mm_sub_ps(_d0, _d2); _w1 = _mm_add_ps(_d1, _d2); _w2 = _mm_sub_ps(_d2, _d1); _w3 = _mm_sub_ps(_d3, _d1); // transpose d to d_t _MM_TRANSPOSE4_PS(_w0, _w1, _w2, _w3); // d = B_t * d_t _d0 = _mm_sub_ps(_w0, _w2); _d1 = _mm_add_ps(_w1, _w2); _d2 = _mm_sub_ps(_w2, _w1); _d3 = _mm_sub_ps(_w3, _w1); // save to out_tm _mm_storeu_ps(out_tm0, _d0); _mm_storeu_ps(out_tm0+4, _d1); _mm_storeu_ps(out_tm0+8, _d2); _mm_storeu_ps(out_tm0+12, _d3); #else float d0[4],d1[4],d2[4],d3[4]; float w0[4],w1[4],w2[4],w3[4]; float t0[4],t1[4],t2[4],t3[4]; // load for (int n = 0; n < 4; n++) { d0[n] = r0[n]; d1[n] = r1[n]; d2[n] = r2[n]; d3[n] = r3[n]; } // w = B_t * d for (int n = 0; n < 4; n++) { w0[n] = d0[n] - d2[n]; w1[n] = d1[n] + d2[n]; w2[n] = d2[n] - d1[n]; w3[n] = d3[n] - d1[n]; } // transpose d to d_t { t0[0]=w0[0]; t1[0]=w0[1]; t2[0]=w0[2]; t3[0]=w0[3]; t0[1]=w1[0]; t1[1]=w1[1]; t2[1]=w1[2]; t3[1]=w1[3]; t0[2]=w2[0]; t1[2]=w2[1]; t2[2]=w2[2]; t3[2]=w2[3]; t0[3]=w3[0]; t1[3]=w3[1]; t2[3]=w3[2]; t3[3]=w3[3]; } // d = B_t * d_t for (int n = 0; n < 4; n++) { d0[n] = t0[n] - t2[n]; d1[n] = t1[n] + t2[n]; d2[n] = t2[n] - t1[n]; d3[n] = t3[n] - t1[n]; } // save to out_tm for (int n = 0; n < 4; n++) { out_tm0[n ] = d0[n]; out_tm0[n+ 4] = d1[n]; out_tm0[n+ 8] = d2[n]; out_tm0[n+12] = d3[n]; } #endif r0 += 2; r1 += 2; r2 += 2; r3 += 2; out_tm0 += 16; } } } } bottom_blob_bordered = Mat(); // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 2 * 4; int h_tm = outh / 2 * 4; int nColBlocks = h_tm/4; // may be the block num in Feathercnn int nRowBlocks = w_tm/4; const int tiles = nColBlocks * nRowBlocks; top_blob_tm.create(16, tiles, outch, 4u, opt.workspace_allocator); int nn_outch = outch >> 2; int remain_outch_start = nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int p = pp * 4; Mat out0_tm = top_blob_tm.channel(p); Mat out1_tm = top_blob_tm.channel(p+1); Mat out2_tm = top_blob_tm.channel(p+2); Mat out3_tm = top_blob_tm.channel(p+3); const Mat kernel0_tm = kernel_tm.channel(p); const Mat kernel1_tm = kernel_tm.channel(p+1); const Mat kernel2_tm = kernel_tm.channel(p+2); const Mat kernel3_tm = kernel_tm.channel(p+3); for (int i=0; i<tiles; i++) { float* output0_tm = out0_tm.row(i); float* output1_tm = out1_tm.row(i); float* output2_tm = out2_tm.row(i); float* output3_tm = out3_tm.row(i); #if __AVX__ float zero_val = 0.f; __m256 _sum0 = _mm256_broadcast_ss(&zero_val); __m256 _sum0n = _mm256_broadcast_ss(&zero_val); __m256 _sum1 = _mm256_broadcast_ss(&zero_val); __m256 _sum1n = _mm256_broadcast_ss(&zero_val); __m256 _sum2 = _mm256_broadcast_ss(&zero_val); __m256 _sum2n = _mm256_broadcast_ss(&zero_val); __m256 _sum3 = _mm256_broadcast_ss(&zero_val); __m256 _sum3n = _mm256_broadcast_ss(&zero_val); int q = 0; for (; q+3<inch; q+=4) { const float* r0 = bottom_blob_tm.channel(q).row(i); const float* r1 = bottom_blob_tm.channel(q+1).row(i); const float* r2 = bottom_blob_tm.channel(q+2).row(i); const float* r3 = bottom_blob_tm.channel(q+3).row(i); const float* k0 = kernel0_tm.row(q); const float* k1 = kernel1_tm.row(q); const float* k2 = kernel2_tm.row(q); const float* k3 = kernel3_tm.row(q); __m256 _r0 = _mm256_loadu_ps(r0); __m256 _r0n = _mm256_loadu_ps(r0+8); // k0 __m256 _k0 = _mm256_loadu_ps(k0); __m256 _k0n = _mm256_loadu_ps(k0+8); __m256 _k1 = _mm256_loadu_ps(k1); __m256 _k1n = _mm256_loadu_ps(k1+8); __m256 _k2 = _mm256_loadu_ps(k2); __m256 _k2n = _mm256_loadu_ps(k2+8); __m256 _k3 = _mm256_loadu_ps(k3); __m256 _k3n = _mm256_loadu_ps(k3+8); _sum0 = _mm256_fmadd_ps(_r0, _k0, _sum0); _sum0n = _mm256_fmadd_ps(_r0n, _k0n, _sum0n); _sum1 = _mm256_fmadd_ps(_r0, _k1, _sum1); _sum1n = _mm256_fmadd_ps(_r0n, _k1n, _sum1n); _sum2 = _mm256_fmadd_ps(_r0, _k2, _sum2); _sum2n = _mm256_fmadd_ps(_r0n, _k2n, _sum2n); _sum3 = _mm256_fmadd_ps(_r0, _k3, _sum3); _sum3n = _mm256_fmadd_ps(_r0n, _k3n, _sum3n); // k1 _r0 = _mm256_loadu_ps(r1); _r0n = _mm256_loadu_ps(r1+8); _k0 = _mm256_loadu_ps(k0+16); _k0n = _mm256_loadu_ps(k0+24); _k1 = _mm256_loadu_ps(k1+16); _k1n = _mm256_loadu_ps(k1+24); _k2 = _mm256_loadu_ps(k2+16); _k2n = _mm256_loadu_ps(k2+24); _k3 = _mm256_loadu_ps(k3+16); _k3n = _mm256_loadu_ps(k3+24); _sum0 = _mm256_fmadd_ps(_r0, _k0, _sum0); _sum0n = _mm256_fmadd_ps(_r0n, _k0n, _sum0n); _sum1 = _mm256_fmadd_ps(_r0, _k1, _sum1); _sum1n = _mm256_fmadd_ps(_r0n, _k1n, _sum1n); _sum2 = _mm256_fmadd_ps(_r0, _k2, _sum2); _sum2n = _mm256_fmadd_ps(_r0n, _k2n, _sum2n); _sum3 = _mm256_fmadd_ps(_r0, _k3, _sum3); _sum3n = _mm256_fmadd_ps(_r0n, _k3n, _sum3n); // k2 _r0 = _mm256_loadu_ps(r2); _r0n = _mm256_loadu_ps(r2+8); _k0 = _mm256_loadu_ps(k0+32); _k0n = _mm256_loadu_ps(k0+40); _k1 = _mm256_loadu_ps(k1+32); _k1n = _mm256_loadu_ps(k1+40); _k2 = _mm256_loadu_ps(k2+32); _k2n = _mm256_loadu_ps(k2+40); _k3 = _mm256_loadu_ps(k3+32); _k3n = _mm256_loadu_ps(k3+40); _sum0 = _mm256_fmadd_ps(_r0, _k0, _sum0); _sum0n = _mm256_fmadd_ps(_r0n, _k0n, _sum0n); _sum1 = _mm256_fmadd_ps(_r0, _k1, _sum1); _sum1n = _mm256_fmadd_ps(_r0n, _k1n, _sum1n); _sum2 = _mm256_fmadd_ps(_r0, _k2, _sum2); _sum2n = _mm256_fmadd_ps(_r0n, _k2n, _sum2n); _sum3 = _mm256_fmadd_ps(_r0, _k3, _sum3); _sum3n = _mm256_fmadd_ps(_r0n, _k3n, _sum3n); // k3 _r0 = _mm256_loadu_ps(r3); _r0n = _mm256_loadu_ps(r3+8); _k0 = _mm256_loadu_ps(k0+48); _k0n = _mm256_loadu_ps(k0+56); _k1 = _mm256_loadu_ps(k1+48); _k1n = _mm256_loadu_ps(k1+56); _k2 = _mm256_loadu_ps(k2+48); _k2n = _mm256_loadu_ps(k2+56); _k3 = _mm256_loadu_ps(k3+48); _k3n = _mm256_loadu_ps(k3+56); _sum0 = _mm256_fmadd_ps(_r0, _k0, _sum0); _sum0n = _mm256_fmadd_ps(_r0n, _k0n, _sum0n); _sum1 = _mm256_fmadd_ps(_r0, _k1, _sum1); _sum1n = _mm256_fmadd_ps(_r0n, _k1n, _sum1n); _sum2 = _mm256_fmadd_ps(_r0, _k2, _sum2); _sum2n = _mm256_fmadd_ps(_r0n, _k2n, _sum2n); _sum3 = _mm256_fmadd_ps(_r0, _k3, _sum3); _sum3n = _mm256_fmadd_ps(_r0n, _k3n, _sum3n); } for (; q<inch; q++) { const float* r0 = bottom_blob_tm.channel(q).row(i); const float* k0 = kernel0_tm.row(q); const float* k1 = kernel1_tm.row(q); const float* k2 = kernel2_tm.row(q); const float* k3 = kernel3_tm.row(q); __m256 _r0 = _mm256_loadu_ps(r0); __m256 _r0n = _mm256_loadu_ps(r0+8); __m256 _k0 = _mm256_loadu_ps(k0); __m256 _k0n = _mm256_loadu_ps(k0+8); __m256 _k1 = _mm256_loadu_ps(k1); __m256 _k1n = _mm256_loadu_ps(k1+8); __m256 _k2 = _mm256_loadu_ps(k2); __m256 _k2n = _mm256_loadu_ps(k2+8); __m256 _k3 = _mm256_loadu_ps(k3); __m256 _k3n = _mm256_loadu_ps(k3+8); _sum0 = _mm256_fmadd_ps(_r0, _k0, _sum0); _sum0n = _mm256_fmadd_ps(_r0n, _k0n, _sum0n); _sum1 = _mm256_fmadd_ps(_r0, _k1, _sum1); _sum1n = _mm256_fmadd_ps(_r0n, _k1n, _sum1n); _sum2 = _mm256_fmadd_ps(_r0, _k2, _sum2); _sum2n = _mm256_fmadd_ps(_r0n, _k2n, _sum2n); _sum3 = _mm256_fmadd_ps(_r0, _k3, _sum3); _sum3n = _mm256_fmadd_ps(_r0n, _k3n, _sum3n); } _mm256_storeu_ps(output0_tm, _sum0); _mm256_storeu_ps(output0_tm+8, _sum0n); _mm256_storeu_ps(output1_tm, _sum1); _mm256_storeu_ps(output1_tm+8, _sum1n); _mm256_storeu_ps(output2_tm, _sum2); _mm256_storeu_ps(output2_tm+8, _sum2n); _mm256_storeu_ps(output3_tm, _sum3); _mm256_storeu_ps(output3_tm+8, _sum3n); #else float sum0[16] = {0.0f}; float sum1[16] = {0.0f}; float sum2[16] = {0.0f}; float sum3[16] = {0.0f}; int q = 0; for (; q+3<inch; q+=4) { const float* r0 = bottom_blob_tm.channel(q).row(i); const float* r1 = bottom_blob_tm.channel(q+1).row(i); const float* r2 = bottom_blob_tm.channel(q+2).row(i); const float* r3 = bottom_blob_tm.channel(q+3).row(i); const float* k0 = kernel0_tm.row(q); const float* k1 = kernel1_tm.row(q); const float* k2 = kernel2_tm.row(q); const float* k3 = kernel3_tm.row(q); for (int n=0; n<16; n++) { sum0[n] += r0[n] * k0[n]; k0 += 16; sum0[n] += r1[n] * k0[n]; k0 += 16; sum0[n] += r2[n] * k0[n]; k0 += 16; sum0[n] += r3[n] * k0[n]; k0 -= 16 * 3; sum1[n] += r0[n] * k1[n]; k1 += 16; sum1[n] += r1[n] * k1[n]; k1 += 16; sum1[n] += r2[n] * k1[n]; k1 += 16; sum1[n] += r3[n] * k1[n]; k1 -= 16 * 3; sum2[n] += r0[n] * k2[n]; k2 += 16; sum2[n] += r1[n] * k2[n]; k2 += 16; sum2[n] += r2[n] * k2[n]; k2 += 16; sum2[n] += r3[n] * k2[n]; k2 -= 16 * 3; sum3[n] += r0[n] * k3[n]; k3 += 16; sum3[n] += r1[n] * k3[n]; k3 += 16; sum3[n] += r2[n] * k3[n]; k3 += 16; sum3[n] += r3[n] * k3[n]; k3 -= 16 * 3; } } for (; q<inch; q++) { const float* r0 = bottom_blob_tm.channel(q).row(i); const float* k0 = kernel0_tm.row(q); const float* k1 = kernel1_tm.row(q); const float* k2 = kernel2_tm.row(q); const float* k3 = kernel3_tm.row(q); for (int n=0; n<16; n++) { sum0[n] += r0[n] * k0[n]; sum1[n] += r0[n] * k1[n]; sum2[n] += r0[n] * k2[n]; sum3[n] += r0[n] * k3[n]; } } for (int n=0; n<16; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; } #endif } } #pragma omp parallel for num_threads(opt.num_threads) for (int p=remain_outch_start; p<outch; p++) { Mat out0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p); for (int i=0; i<tiles; i++) { float* output0_tm = out0_tm.row(i); float sum0[16] = {0.0f}; int q = 0; for (; q+3<inch; q+=4) { const float* r0 = bottom_blob_tm.channel(q).row(i); const float* r1 = bottom_blob_tm.channel(q+1).row(i); const float* r2 = bottom_blob_tm.channel(q+2).row(i); const float* r3 = bottom_blob_tm.channel(q+3).row(i); const float* k0 = kernel0_tm.row(q); const float* k1 = kernel0_tm.row(q+1); const float* k2 = kernel0_tm.row(q+2); const float* k3 = kernel0_tm.row(q+3); for (int n=0; n<16; n++) { sum0[n] += r0[n] * k0[n]; sum0[n] += r1[n] * k1[n]; sum0[n] += r2[n] * k2[n]; sum0[n] += r3[n] * k3[n]; } } for (; q<inch; q++) { const float* r0 = bottom_blob_tm.channel(q).row(i); const float* k0 = kernel0_tm.row(q); for (int n=0; n<16; n++) { sum0[n] += r0[n] * k0[n]; } } for (int n=0; n<16; n++) { output0_tm[n] = sum0[n]; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator); { // AT // const float itm[2][4] = { // {1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 1.0f} // }; int w_tm = outw / 2 * 4; int h_tm = outh / 2 * 4; int nColBlocks = h_tm/4; // may be the block num in Feathercnn int nRowBlocks = w_tm/4; #pragma omp parallel for num_threads(opt.num_threads) for (int p=0; p<outch; p++) { Mat out_tm = top_blob_tm.channel(p); Mat out = top_blob_bordered.channel(p); const float bias0 = bias ? bias[p] : 0.f; for (int j=0; j<nColBlocks; j++) { float* outRow0 = out.row(j*2); float* outRow1 = out.row(j*2+1); for(int i=0; i<nRowBlocks; i++) { float* out_tile = out_tm.row(j*nRowBlocks + i); float s0[4],s1[4],s2[4],s3[4]; float w0[4],w1[4]; float d0[2],d1[2],d2[2],d3[2]; float o0[2],o1[2]; // load for (int n = 0; n < 4; n++) { s0[n] = out_tile[n]; s1[n] = out_tile[n+ 4]; s2[n] = out_tile[n+ 8]; s3[n] = out_tile[n+12]; } // w = A_T * W for (int n = 0; n < 4; n++) { w0[n] = s0[n] + s1[n] + s2[n]; w1[n] = s1[n] - s2[n] + s3[n]; } // transpose w to w_t { d0[0] = w0[0]; d0[1] = w1[0]; d1[0] = w0[1]; d1[1] = w1[1]; d2[0] = w0[2]; d2[1] = w1[2]; d3[0] = w0[3]; d3[1] = w1[3]; } // Y = A_T * w_t for (int n = 0; n < 2; n++) { o0[n] = d0[n] + d1[n] + d2[n] + bias0; o1[n] = d1[n] - d2[n] + d3[n] + bias0; } // save to top blob tm outRow0[0] = o0[0]; outRow0[1] = o0[1]; outRow1[0] = o1[0]; outRow1[1] = o1[1]; outRow0 += 2; outRow1 += 2; } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt.blob_allocator, opt.num_threads); } static void conv3x3s1_winograd43_transform_kernel_sse(const Mat& kernel, std::vector<Mat> &kernel_tm2, int inch, int outch) { Mat kernel_tm(6*6, inch, outch); // G const float ktm[6][3] = { { 1.0f/4, 0.0f, 0.0f}, { -1.0f/6, -1.0f/6, -1.0f/6}, { -1.0f/6, 1.0f/6, -1.0f/6}, { 1.0f/24, 1.0f/12, 1.0f/6}, { 1.0f/24, -1.0f/12, 1.0f/6}, { 0.0f, 0.0f, 1.0f} }; #pragma omp parallel for for (int p = 0; p<outch; p++) { for (int q = 0; q<inch; q++) { const float* kernel0 = (const float*)kernel + p*inch * 9 + q * 9; float* kernel_tm0 = kernel_tm.channel(p).row(q); // transform kernel const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[6][3]; for (int i=0; i<6; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j=0; j<6; j++) { float* tmpp = &tmp[j][0]; for (int i=0; i<6; i++) { kernel_tm0[j*6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } for (int r=0; r<9; r++) { Mat kernel_tm_test(4*8, inch, outch/8 + (outch%8)/4 + outch%4); int p = 0; for (; p+7<outch; p+=8) { const float* kernel0 = (const float*)kernel_tm.channel(p); const float* kernel1 = (const float*)kernel_tm.channel(p+1); const float* kernel2 = (const float*)kernel_tm.channel(p+2); const float* kernel3 = (const float*)kernel_tm.channel(p+3); const float* kernel4 = (const float*)kernel_tm.channel(p+4); const float* kernel5 = (const float*)kernel_tm.channel(p+5); const float* kernel6 = (const float*)kernel_tm.channel(p+6); const float* kernel7 = (const float*)kernel_tm.channel(p+7); float* ktmp = kernel_tm_test.channel(p/8); for (int q=0; q<inch; q++) { ktmp[0] = kernel0[r*4+0]; ktmp[1] = kernel0[r*4+1]; ktmp[2] = kernel0[r*4+2]; ktmp[3] = kernel0[r*4+3]; ktmp[4] = kernel1[r*4+0]; ktmp[5] = kernel1[r*4+1]; ktmp[6] = kernel1[r*4+2]; ktmp[7] = kernel1[r*4+3]; ktmp[8] = kernel2[r*4+0]; ktmp[9] = kernel2[r*4+1]; ktmp[10] = kernel2[r*4+2]; ktmp[11] = kernel2[r*4+3]; ktmp[12] = kernel3[r*4+0]; ktmp[13] = kernel3[r*4+1]; ktmp[14] = kernel3[r*4+2]; ktmp[15] = kernel3[r*4+3]; ktmp[16] = kernel4[r*4+0]; ktmp[17] = kernel4[r*4+1]; ktmp[18] = kernel4[r*4+2]; ktmp[19] = kernel4[r*4+3]; ktmp[20] = kernel5[r*4+0]; ktmp[21] = kernel5[r*4+1]; ktmp[22] = kernel5[r*4+2]; ktmp[23] = kernel5[r*4+3]; ktmp[24] = kernel6[r*4+0]; ktmp[25] = kernel6[r*4+1]; ktmp[26] = kernel6[r*4+2]; ktmp[27] = kernel6[r*4+3]; ktmp[28] = kernel7[r*4+0]; ktmp[29] = kernel7[r*4+1]; ktmp[30] = kernel7[r*4+2]; ktmp[31] = kernel7[r*4+3]; ktmp += 32; kernel0 += 36; kernel1 += 36; kernel2 += 36; kernel3 += 36; kernel4 += 36; kernel5 += 36; kernel6 += 36; kernel7 += 36; } } for (; p+3<outch; p+=4) { const float* kernel0 = (const float*)kernel_tm.channel(p); const float* kernel1 = (const float*)kernel_tm.channel(p+1); const float* kernel2 = (const float*)kernel_tm.channel(p+2); const float* kernel3 = (const float*)kernel_tm.channel(p+3); float* ktmp = kernel_tm_test.channel(p/8 + (p%8)/4); for (int q=0; q<inch; q++) { ktmp[0] = kernel0[r*4+0]; ktmp[1] = kernel0[r*4+1]; ktmp[2] = kernel0[r*4+2]; ktmp[3] = kernel0[r*4+3]; ktmp[4] = kernel1[r*4+0]; ktmp[5] = kernel1[r*4+1]; ktmp[6] = kernel1[r*4+2]; ktmp[7] = kernel1[r*4+3]; ktmp[8] = kernel2[r*4+0]; ktmp[9] = kernel2[r*4+1]; ktmp[10] = kernel2[r*4+2]; ktmp[11] = kernel2[r*4+3]; ktmp[12] = kernel3[r*4+0]; ktmp[13] = kernel3[r*4+1]; ktmp[14] = kernel3[r*4+2]; ktmp[15] = kernel3[r*4+3]; ktmp += 16; kernel0 += 36; kernel1 += 36; kernel2 += 36; kernel3 += 36; } } for (; p<outch; p++) { const float* kernel0 = (const float*)kernel_tm.channel(p); float* ktmp = kernel_tm_test.channel(p/8 + (p%8)/4 + p%4); for (int q=0; q<inch; q++) { ktmp[0] = kernel0[r*4+0]; ktmp[1] = kernel0[r*4+1]; ktmp[2] = kernel0[r*4+2]; ktmp[3] = kernel0[r*4+3]; ktmp += 4; kernel0 += 36; } } kernel_tm2.push_back(kernel_tm_test); } } static void conv3x3s1_winograd43_sse(const Mat& bottom_blob, Mat& top_blob, const std::vector<Mat> &kernel_tm_test, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; size_t elemsize = bottom_blob.elemsize; const float* bias = _bias; // pad to 4n+2, winograd F(4,3) Mat bottom_blob_bordered = bottom_blob; outw = (outw + 3) / 4 * 4; outh = (outh + 3) / 4 * 4; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt.workspace_allocator, opt.num_threads); // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; int nColBlocks = h_tm/6; // may be the block num in Feathercnn int nRowBlocks = w_tm/6; const int tiles = nColBlocks * nRowBlocks; bottom_blob_tm.create(4, inch, tiles*9, elemsize, opt.workspace_allocator); // BT // const float itm[4][4] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r03 + r04 // 2 = 4 * (r01 - r02) - r03 + r04 // 3 = -2 * r01 - r02 + 2 * r03 + r04 // 4 = 2 * r01 - r02 - 2 * r03 + r04 // 5 = 4 * r01 - 5 * r03 + r05 // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r03 + r04 // 2 = 4 * (r01 - r02) - r03 + r04 // 3 = -2 * r01 - r02 + 2 * r03 + r04 // 4 = 2 * r01 - r02 - 2 * r03 + r04 // 5 = 4 * r01 - 5 * r03 + r05 #if __AVX__ __m256 _1_n = _mm256_set1_ps(-1); __m256 _2_p = _mm256_set1_ps(2); __m256 _2_n = _mm256_set1_ps(-2); __m256 _4_p = _mm256_set1_ps(4); __m256 _4_n = _mm256_set1_ps(-4); __m256 _5_n = _mm256_set1_ps(-5); #endif #pragma omp parallel for num_threads(opt.num_threads) for (int q=0; q<inch; q++) { const float* img = bottom_blob_bordered.channel(q); for (int j = 0; j < nColBlocks; j++) { const float* r0 = img + w * j * 4; const float* r1 = r0 + w; const float* r2 = r1 + w; const float* r3 = r2 + w; const float* r4 = r3 + w; const float* r5 = r4 + w; for (int i = 0; i < nRowBlocks; i++) { float* out_tm0 = bottom_blob_tm.channel(tiles*0+j*nRowBlocks+i).row(q); float* out_tm1 = bottom_blob_tm.channel(tiles*1+j*nRowBlocks+i).row(q); float* out_tm2 = bottom_blob_tm.channel(tiles*2+j*nRowBlocks+i).row(q); float* out_tm3 = bottom_blob_tm.channel(tiles*3+j*nRowBlocks+i).row(q); float* out_tm4 = bottom_blob_tm.channel(tiles*4+j*nRowBlocks+i).row(q); float* out_tm5 = bottom_blob_tm.channel(tiles*5+j*nRowBlocks+i).row(q); float* out_tm6 = bottom_blob_tm.channel(tiles*6+j*nRowBlocks+i).row(q); float* out_tm7 = bottom_blob_tm.channel(tiles*7+j*nRowBlocks+i).row(q); float* out_tm8 = bottom_blob_tm.channel(tiles*8+j*nRowBlocks+i).row(q); #if __AVX__ __m256 _d0, _d1, _d2, _d3, _d4, _d5; __m256 _w0, _w1, _w2, _w3, _w4, _w5; __m256 _t0, _t1, _t2, _t3, _t4, _t5; __m256 _n0, _n1, _n2, _n3, _n4, _n5; // load _d0 = _mm256_loadu_ps(r0); _d1 = _mm256_loadu_ps(r1); _d2 = _mm256_loadu_ps(r2); _d3 = _mm256_loadu_ps(r3); _d4 = _mm256_loadu_ps(r4); _d5 = _mm256_loadu_ps(r5); // w = B_t * d _w0 = _mm256_mul_ps(_d0, _4_p); _w0 = _mm256_fmadd_ps(_d2, _5_n, _w0); _w0 = _mm256_add_ps(_w0, _d4); _w1 = _mm256_mul_ps(_d1, _4_n); _w1 = _mm256_fmadd_ps(_d2, _4_n, _w1); _w1 = _mm256_add_ps(_w1, _d3); _w1 = _mm256_add_ps(_w1, _d4); _w2 = _mm256_mul_ps(_d1, _4_p); _w2 = _mm256_fmadd_ps(_d2, _4_n, _w2); _w2 = _mm256_fmadd_ps(_d3, _1_n, _w2); _w2 = _mm256_add_ps(_w2, _d4); _w3 = _mm256_mul_ps(_d1, _2_n); _w3 = _mm256_fmadd_ps(_d2, _1_n, _w3); _w3 = _mm256_fmadd_ps(_d3, _2_p, _w3); _w3 = _mm256_add_ps(_w3, _d4); _w4 = _mm256_mul_ps(_d1, _2_p); _w4 = _mm256_fmadd_ps(_d2, _1_n, _w4); _w4 = _mm256_fmadd_ps(_d3, _2_n, _w4); _w4 = _mm256_add_ps(_w4, _d4); _w5 = _mm256_mul_ps(_d1, _4_p); _w5 = _mm256_fmadd_ps(_d3, _5_n, _w5); _w5 = _mm256_add_ps(_w5, _d5); // transpose d to d_t #ifdef _WIN32 { _t0.m256_f32[0]=_w0.m256_f32[0]; _t1.m256_f32[0]=_w0.m256_f32[1]; _t2.m256_f32[0]=_w0.m256_f32[2]; _t3.m256_f32[0]=_w0.m256_f32[3]; _t4.m256_f32[0]=_w0.m256_f32[4]; _t5.m256_f32[0]=_w0.m256_f32[5]; _t0.m256_f32[1]=_w1.m256_f32[0]; _t1.m256_f32[1]=_w1.m256_f32[1]; _t2.m256_f32[1]=_w1.m256_f32[2]; _t3.m256_f32[1]=_w1.m256_f32[3]; _t4.m256_f32[1]=_w1.m256_f32[4]; _t5.m256_f32[1]=_w1.m256_f32[5]; _t0.m256_f32[2]=_w2.m256_f32[0]; _t1.m256_f32[2]=_w2.m256_f32[1]; _t2.m256_f32[2]=_w2.m256_f32[2]; _t3.m256_f32[2]=_w2.m256_f32[3]; _t4.m256_f32[2]=_w2.m256_f32[4]; _t5.m256_f32[2]=_w2.m256_f32[5]; _t0.m256_f32[3]=_w3.m256_f32[0]; _t1.m256_f32[3]=_w3.m256_f32[1]; _t2.m256_f32[3]=_w3.m256_f32[2]; _t3.m256_f32[3]=_w3.m256_f32[3]; _t4.m256_f32[3]=_w3.m256_f32[4]; _t5.m256_f32[3]=_w3.m256_f32[5]; _t0.m256_f32[4]=_w4.m256_f32[0]; _t1.m256_f32[4]=_w4.m256_f32[1]; _t2.m256_f32[4]=_w4.m256_f32[2]; _t3.m256_f32[4]=_w4.m256_f32[3]; _t4.m256_f32[4]=_w4.m256_f32[4]; _t5.m256_f32[4]=_w4.m256_f32[5]; _t0.m256_f32[5]=_w5.m256_f32[0]; _t1.m256_f32[5]=_w5.m256_f32[1]; _t2.m256_f32[5]=_w5.m256_f32[2]; _t3.m256_f32[5]=_w5.m256_f32[3]; _t4.m256_f32[5]=_w5.m256_f32[4]; _t5.m256_f32[5]=_w5.m256_f32[5]; } #else { _t0[0]=_w0[0]; _t1[0]=_w0[1]; _t2[0]=_w0[2]; _t3[0]=_w0[3]; _t4[0]=_w0[4]; _t5[0]=_w0[5]; _t0[1]=_w1[0]; _t1[1]=_w1[1]; _t2[1]=_w1[2]; _t3[1]=_w1[3]; _t4[1]=_w1[4]; _t5[1]=_w1[5]; _t0[2]=_w2[0]; _t1[2]=_w2[1]; _t2[2]=_w2[2]; _t3[2]=_w2[3]; _t4[2]=_w2[4]; _t5[2]=_w2[5]; _t0[3]=_w3[0]; _t1[3]=_w3[1]; _t2[3]=_w3[2]; _t3[3]=_w3[3]; _t4[3]=_w3[4]; _t5[3]=_w3[5]; _t0[4]=_w4[0]; _t1[4]=_w4[1]; _t2[4]=_w4[2]; _t3[4]=_w4[3]; _t4[4]=_w4[4]; _t5[4]=_w4[5]; _t0[5]=_w5[0]; _t1[5]=_w5[1]; _t2[5]=_w5[2]; _t3[5]=_w5[3]; _t4[5]=_w5[4]; _t5[5]=_w5[5]; } #endif // d = B_t * d_t _n0 = _mm256_mul_ps(_t0, _4_p); _n0 = _mm256_fmadd_ps(_t2, _5_n, _n0); _n0 = _mm256_add_ps(_n0, _t4); _n1 = _mm256_mul_ps(_t1, _4_n); _n1 = _mm256_fmadd_ps(_t2, _4_n, _n1); _n1 = _mm256_add_ps(_n1, _t3); _n1 = _mm256_add_ps(_n1, _t4); _n2 = _mm256_mul_ps(_t1, _4_p); _n2 = _mm256_fmadd_ps(_t2, _4_n, _n2); _n2 = _mm256_fmadd_ps(_t3, _1_n, _n2); _n2 = _mm256_add_ps(_n2, _t4); _n3 = _mm256_mul_ps(_t1, _2_n); _n3 = _mm256_fmadd_ps(_t2, _1_n, _n3); _n3 = _mm256_fmadd_ps(_t3, _2_p, _n3); _n3 = _mm256_add_ps(_n3, _t4); _n4 = _mm256_mul_ps(_t1, _2_p); _n4 = _mm256_fmadd_ps(_t2, _1_n, _n4); _n4 = _mm256_fmadd_ps(_t3, _2_n, _n4); _n4 = _mm256_add_ps(_n4, _t4); _n5 = _mm256_mul_ps(_t1, _4_p); _n5 = _mm256_fmadd_ps(_t3, _5_n, _n5); _n5 = _mm256_add_ps(_n5, _t5); // save to out_tm float output_n0[8] = {0.f};_mm256_storeu_ps(output_n0, _n0); float output_n1[8] = {0.f};_mm256_storeu_ps(output_n1, _n1); float output_n2[8] = {0.f};_mm256_storeu_ps(output_n2, _n2); float output_n3[8] = {0.f};_mm256_storeu_ps(output_n3, _n3); float output_n4[8] = {0.f};_mm256_storeu_ps(output_n4, _n4); float output_n5[8] = {0.f};_mm256_storeu_ps(output_n5, _n5); out_tm0[0]=output_n0[0];out_tm0[1]=output_n0[1];out_tm0[2]=output_n0[2];out_tm0[3]=output_n0[3]; out_tm1[0]=output_n0[4];out_tm1[1]=output_n0[5];out_tm1[2]=output_n1[0];out_tm1[3]=output_n1[1]; out_tm2[0]=output_n1[2];out_tm2[1]=output_n1[3];out_tm2[2]=output_n1[4];out_tm2[3]=output_n1[5]; out_tm3[0]=output_n2[0];out_tm3[1]=output_n2[1];out_tm3[2]=output_n2[2];out_tm3[3]=output_n2[3]; out_tm4[0]=output_n2[4];out_tm4[1]=output_n2[5];out_tm4[2]=output_n3[0];out_tm4[3]=output_n3[1]; out_tm5[0]=output_n3[2];out_tm5[1]=output_n3[3];out_tm5[2]=output_n3[4];out_tm5[3]=output_n3[5]; out_tm6[0]=output_n4[0];out_tm6[1]=output_n4[1];out_tm6[2]=output_n4[2];out_tm6[3]=output_n4[3]; out_tm7[0]=output_n4[4];out_tm7[1]=output_n4[5];out_tm7[2]=output_n5[0];out_tm7[3]=output_n5[1]; out_tm8[0]=output_n5[2];out_tm8[1]=output_n5[3];out_tm8[2]=output_n5[4];out_tm8[3]=output_n5[5]; #else float d0[6],d1[6],d2[6],d3[6],d4[6],d5[6]; float w0[6],w1[6],w2[6],w3[6],w4[6],w5[6]; float t0[6],t1[6],t2[6],t3[6],t4[6],t5[6]; // load for (int n = 0; n < 6; n++) { d0[n] = r0[n]; d1[n] = r1[n]; d2[n] = r2[n]; d3[n] = r3[n]; d4[n] = r4[n]; d5[n] = r5[n]; } // w = B_t * d for (int n = 0; n < 6; n++) { w0[n] = 4*d0[n] - 5*d2[n] + d4[n]; w1[n] = -4*d1[n] - 4*d2[n] + d3[n] + d4[n]; w2[n] = 4*d1[n] - 4*d2[n] - d3[n] + d4[n]; w3[n] = -2*d1[n] - d2[n] + 2*d3[n] + d4[n]; w4[n] = 2*d1[n] - d2[n] - 2*d3[n] + d4[n]; w5[n] = 4*d1[n] - 5*d3[n] + d5[n]; } // transpose d to d_t { t0[0]=w0[0]; t1[0]=w0[1]; t2[0]=w0[2]; t3[0]=w0[3]; t4[0]=w0[4]; t5[0]=w0[5]; t0[1]=w1[0]; t1[1]=w1[1]; t2[1]=w1[2]; t3[1]=w1[3]; t4[1]=w1[4]; t5[1]=w1[5]; t0[2]=w2[0]; t1[2]=w2[1]; t2[2]=w2[2]; t3[2]=w2[3]; t4[2]=w2[4]; t5[2]=w2[5]; t0[3]=w3[0]; t1[3]=w3[1]; t2[3]=w3[2]; t3[3]=w3[3]; t4[3]=w3[4]; t5[3]=w3[5]; t0[4]=w4[0]; t1[4]=w4[1]; t2[4]=w4[2]; t3[4]=w4[3]; t4[4]=w4[4]; t5[4]=w4[5]; t0[5]=w5[0]; t1[5]=w5[1]; t2[5]=w5[2]; t3[5]=w5[3]; t4[5]=w5[4]; t5[5]=w5[5]; } // d = B_t * d_t for (int n = 0; n < 6; n++) { d0[n] = 4*t0[n] - 5*t2[n] + t4[n]; d1[n] = - 4*t1[n] - 4*t2[n] + t3[n] + t4[n]; d2[n] = 4*t1[n] - 4*t2[n] - t3[n] + t4[n]; d3[n] = - 2*t1[n] - t2[n] + 2*t3[n] + t4[n]; d4[n] = 2*t1[n] - t2[n] - 2*t3[n] + t4[n]; d5[n] = 4*t1[n] - 5*t3[n] + t5[n]; } // save to out_tm { out_tm0[0]=d0[0];out_tm0[1]=d0[1];out_tm0[2]=d0[2];out_tm0[3]=d0[3]; out_tm1[0]=d0[4];out_tm1[1]=d0[5];out_tm1[2]=d1[0];out_tm1[3]=d1[1]; out_tm2[0]=d1[2];out_tm2[1]=d1[3];out_tm2[2]=d1[4];out_tm2[3]=d1[5]; out_tm3[0]=d2[0];out_tm3[1]=d2[1];out_tm3[2]=d2[2];out_tm3[3]=d2[3]; out_tm4[0]=d2[4];out_tm4[1]=d2[5];out_tm4[2]=d3[0];out_tm4[3]=d3[1]; out_tm5[0]=d3[2];out_tm5[1]=d3[3];out_tm5[2]=d3[4];out_tm5[3]=d3[5]; out_tm6[0]=d4[0];out_tm6[1]=d4[1];out_tm6[2]=d4[2];out_tm6[3]=d4[3]; out_tm7[0]=d4[4];out_tm7[1]=d4[5];out_tm7[2]=d5[0];out_tm7[3]=d5[1]; out_tm8[0]=d5[2];out_tm8[1]=d5[3];out_tm8[2]=d5[4];out_tm8[3]=d5[5]; } #endif // __AVX__ r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; r5 += 4; } } } } bottom_blob_bordered = Mat(); // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; int nColBlocks = h_tm/6; // may be the block num in Feathercnn int nRowBlocks = w_tm/6; const int tiles = nColBlocks * nRowBlocks; top_blob_tm.create(36, tiles, outch, elemsize, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int r=0; r<9; r++) { int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 3; remain_outch_start = nn_outch << 3; for (int pp=0; pp<nn_outch; pp++) { int p = pp * 8; float* output0_tm = top_blob_tm.channel(p); float* output1_tm = top_blob_tm.channel(p+1); float* output2_tm = top_blob_tm.channel(p+2); float* output3_tm = top_blob_tm.channel(p+3); float* output4_tm = top_blob_tm.channel(p+4); float* output5_tm = top_blob_tm.channel(p+5); float* output6_tm = top_blob_tm.channel(p+6); float* output7_tm = top_blob_tm.channel(p+7); output0_tm = output0_tm + r*4; output1_tm = output1_tm + r*4; output2_tm = output2_tm + r*4; output3_tm = output3_tm + r*4; output4_tm = output4_tm + r*4; output5_tm = output5_tm + r*4; output6_tm = output6_tm + r*4; output7_tm = output7_tm + r*4; for (int i=0; i<tiles; i++) { const float* kptr = kernel_tm_test[r].channel(p/8); const float* r0 = bottom_blob_tm.channel(tiles*r+i); #if __AVX__ || __SSE__ #if __AVX__ float zero_val = 0.f; __m128 _sum0 = _mm_broadcast_ss(&zero_val); __m128 _sum1 = _mm_broadcast_ss(&zero_val); __m128 _sum2 = _mm_broadcast_ss(&zero_val); __m128 _sum3 = _mm_broadcast_ss(&zero_val); __m128 _sum4 = _mm_broadcast_ss(&zero_val); __m128 _sum5 = _mm_broadcast_ss(&zero_val); __m128 _sum6 = _mm_broadcast_ss(&zero_val); __m128 _sum7 = _mm_broadcast_ss(&zero_val); #else __m128 _sum0 = _mm_set1_ps(0.f); __m128 _sum1 = _mm_set1_ps(0.f); __m128 _sum2 = _mm_set1_ps(0.f); __m128 _sum3 = _mm_set1_ps(0.f); __m128 _sum4 = _mm_set1_ps(0.f); __m128 _sum5 = _mm_set1_ps(0.f); __m128 _sum6 = _mm_set1_ps(0.f); __m128 _sum7 = _mm_set1_ps(0.f); #endif int q=0; for (; q+3<inch; q=q+4) { __m128 _r0 = _mm_loadu_ps(r0); __m128 _r1 = _mm_loadu_ps(r0+4); __m128 _r2 = _mm_loadu_ps(r0+8); __m128 _r3 = _mm_loadu_ps(r0+12); __m128 _k0 = _mm_loadu_ps(kptr); __m128 _k1 = _mm_loadu_ps(kptr+4); __m128 _k2 = _mm_loadu_ps(kptr+8); __m128 _k3 = _mm_loadu_ps(kptr+12); __m128 _k4 = _mm_loadu_ps(kptr+16); __m128 _k5 = _mm_loadu_ps(kptr+20); __m128 _k6 = _mm_loadu_ps(kptr+24); __m128 _k7 = _mm_loadu_ps(kptr+28); #if __AVX__ _sum0 = _mm_fmadd_ps(_r0, _k0, _sum0); _sum1 = _mm_fmadd_ps(_r0, _k1, _sum1); _sum2 = _mm_fmadd_ps(_r0, _k2, _sum2); _sum3 = _mm_fmadd_ps(_r0, _k3, _sum3); _sum4 = _mm_fmadd_ps(_r0, _k4, _sum4); _sum5 = _mm_fmadd_ps(_r0, _k5, _sum5); _sum6 = _mm_fmadd_ps(_r0, _k6, _sum6); _sum7 = _mm_fmadd_ps(_r0, _k7, _sum7); #else _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3)); _sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r0, _k4)); _sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r0, _k5)); _sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r0, _k6)); _sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r0, _k7)); #endif kptr += 32; _k0 = _mm_loadu_ps(kptr); _k1 = _mm_loadu_ps(kptr+4); _k2 = _mm_loadu_ps(kptr+8); _k3 = _mm_loadu_ps(kptr+12); _k4 = _mm_loadu_ps(kptr+16); _k5 = _mm_loadu_ps(kptr+20); _k6 = _mm_loadu_ps(kptr+24); _k7 = _mm_loadu_ps(kptr+28); #if __AVX__ _sum0 = _mm_fmadd_ps(_r1, _k0, _sum0); _sum1 = _mm_fmadd_ps(_r1, _k1, _sum1); _sum2 = _mm_fmadd_ps(_r1, _k2, _sum2); _sum3 = _mm_fmadd_ps(_r1, _k3, _sum3); _sum4 = _mm_fmadd_ps(_r1, _k4, _sum4); _sum5 = _mm_fmadd_ps(_r1, _k5, _sum5); _sum6 = _mm_fmadd_ps(_r1, _k6, _sum6); _sum7 = _mm_fmadd_ps(_r1, _k7, _sum7); #else _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r1, _k0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r1, _k1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r1, _k2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r1, _k3)); _sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r1, _k4)); _sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r1, _k5)); _sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r1, _k6)); _sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r1, _k7)); #endif kptr += 32; _k0 = _mm_loadu_ps(kptr); _k1 = _mm_loadu_ps(kptr+4); _k2 = _mm_loadu_ps(kptr+8); _k3 = _mm_loadu_ps(kptr+12); _k4 = _mm_loadu_ps(kptr+16); _k5 = _mm_loadu_ps(kptr+20); _k6 = _mm_loadu_ps(kptr+24); _k7 = _mm_loadu_ps(kptr+28); #if __AVX__ _sum0 = _mm_fmadd_ps(_r2, _k0, _sum0); _sum1 = _mm_fmadd_ps(_r2, _k1, _sum1); _sum2 = _mm_fmadd_ps(_r2, _k2, _sum2); _sum3 = _mm_fmadd_ps(_r2, _k3, _sum3); _sum4 = _mm_fmadd_ps(_r2, _k4, _sum4); _sum5 = _mm_fmadd_ps(_r2, _k5, _sum5); _sum6 = _mm_fmadd_ps(_r2, _k6, _sum6); _sum7 = _mm_fmadd_ps(_r2, _k7, _sum7); #else _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r2, _k0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r2, _k1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r2, _k2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r2, _k3)); _sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r2, _k4)); _sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r2, _k5)); _sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r2, _k6)); _sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r2, _k7)); #endif kptr += 32; _k0 = _mm_loadu_ps(kptr); _k1 = _mm_loadu_ps(kptr+4); _k2 = _mm_loadu_ps(kptr+8); _k3 = _mm_loadu_ps(kptr+12); _k4 = _mm_loadu_ps(kptr+16); _k5 = _mm_loadu_ps(kptr+20); _k6 = _mm_loadu_ps(kptr+24); _k7 = _mm_loadu_ps(kptr+28); #if __AVX__ _sum0 = _mm_fmadd_ps(_r3, _k0, _sum0); _sum1 = _mm_fmadd_ps(_r3, _k1, _sum1); _sum2 = _mm_fmadd_ps(_r3, _k2, _sum2); _sum3 = _mm_fmadd_ps(_r3, _k3, _sum3); _sum4 = _mm_fmadd_ps(_r3, _k4, _sum4); _sum5 = _mm_fmadd_ps(_r3, _k5, _sum5); _sum6 = _mm_fmadd_ps(_r3, _k6, _sum6); _sum7 = _mm_fmadd_ps(_r3, _k7, _sum7); #else _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r3, _k0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r3, _k1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r3, _k2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r3, _k3)); _sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r3, _k4)); _sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r3, _k5)); _sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r3, _k6)); _sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r3, _k7)); #endif kptr += 32; r0 += 16; } for (; q<inch; q++) { __m128 _r0 = _mm_loadu_ps(r0); __m128 _k0 = _mm_loadu_ps(kptr); __m128 _k1 = _mm_loadu_ps(kptr+4); __m128 _k2 = _mm_loadu_ps(kptr+8); __m128 _k3 = _mm_loadu_ps(kptr+12); __m128 _k4 = _mm_loadu_ps(kptr+16); __m128 _k5 = _mm_loadu_ps(kptr+20); __m128 _k6 = _mm_loadu_ps(kptr+24); __m128 _k7 = _mm_loadu_ps(kptr+28); #if __AVX__ _sum0 = _mm_fmadd_ps(_r0, _k0, _sum0); _sum1 = _mm_fmadd_ps(_r0, _k1, _sum1); _sum2 = _mm_fmadd_ps(_r0, _k2, _sum2); _sum3 = _mm_fmadd_ps(_r0, _k3, _sum3); _sum4 = _mm_fmadd_ps(_r0, _k4, _sum4); _sum5 = _mm_fmadd_ps(_r0, _k5, _sum5); _sum6 = _mm_fmadd_ps(_r0, _k6, _sum6); _sum7 = _mm_fmadd_ps(_r0, _k7, _sum7); #else _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3)); _sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r0, _k4)); _sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r0, _k5)); _sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r0, _k6)); _sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r0, _k7)); #endif kptr += 32; r0 += 4; } _mm_storeu_ps(output0_tm, _sum0); _mm_storeu_ps(output1_tm, _sum1); _mm_storeu_ps(output2_tm, _sum2); _mm_storeu_ps(output3_tm, _sum3); _mm_storeu_ps(output4_tm, _sum4); _mm_storeu_ps(output5_tm, _sum5); _mm_storeu_ps(output6_tm, _sum6); _mm_storeu_ps(output7_tm, _sum7); #else float sum0[4] = {0}; float sum1[4] = {0}; float sum2[4] = {0}; float sum3[4] = {0}; float sum4[4] = {0}; float sum5[4] = {0}; float sum6[4] = {0}; float sum7[4] = {0}; for (int q=0; q<inch; q++) { for (int n=0; n<4; n++) { sum0[n] += r0[n] * kptr[n]; sum1[n] += r0[n] * kptr[n+4]; sum2[n] += r0[n] * kptr[n+8]; sum3[n] += r0[n] * kptr[n+12]; sum4[n] += r0[n] * kptr[n+16]; sum5[n] += r0[n] * kptr[n+20]; sum6[n] += r0[n] * kptr[n+24]; sum7[n] += r0[n] * kptr[n+28]; } kptr += 32; r0 += 4; } for (int n=0; n<4; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; output4_tm[n] = sum4[n]; output5_tm[n] = sum5[n]; output6_tm[n] = sum6[n]; output7_tm[n] = sum7[n]; } #endif // __AVX__ output0_tm += 36; output1_tm += 36; output2_tm += 36; output3_tm += 36; output4_tm += 36; output5_tm += 36; output6_tm += 36; output7_tm += 36; } } nn_outch = (outch - remain_outch_start) >> 2; for (int pp=0; pp<nn_outch; pp++) { int p = remain_outch_start + pp * 4; float* output0_tm = top_blob_tm.channel(p); float* output1_tm = top_blob_tm.channel(p+1); float* output2_tm = top_blob_tm.channel(p+2); float* output3_tm = top_blob_tm.channel(p+3); output0_tm = output0_tm + r*4; output1_tm = output1_tm + r*4; output2_tm = output2_tm + r*4; output3_tm = output3_tm + r*4; for (int i=0; i<tiles; i++) { const float* kptr = kernel_tm_test[r].channel(p/8 + (p%8)/4); const float* r0 = bottom_blob_tm.channel(tiles*r+i); #if __AVX__ || __SSE__ #if __AVX__ float zero_val = 0.f; __m128 _sum0 = _mm_broadcast_ss(&zero_val); __m128 _sum1 = _mm_broadcast_ss(&zero_val); __m128 _sum2 = _mm_broadcast_ss(&zero_val); __m128 _sum3 = _mm_broadcast_ss(&zero_val); #else __m128 _sum0 = _mm_set1_ps(0.f); __m128 _sum1 = _mm_set1_ps(0.f); __m128 _sum2 = _mm_set1_ps(0.f); __m128 _sum3 = _mm_set1_ps(0.f); #endif for (int q=0; q<inch; q++) { __m128 _r0 = _mm_loadu_ps(r0); __m128 _k0 = _mm_loadu_ps(kptr); __m128 _k1 = _mm_loadu_ps(kptr+4); __m128 _k2 = _mm_loadu_ps(kptr+8); __m128 _k3 = _mm_loadu_ps(kptr+12); #if __AVX__ _sum0 = _mm_fmadd_ps(_r0, _k0, _sum0); _sum1 = _mm_fmadd_ps(_r0, _k1, _sum1); _sum2 = _mm_fmadd_ps(_r0, _k2, _sum2); _sum3 = _mm_fmadd_ps(_r0, _k3, _sum3); #else _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3)); #endif kptr += 16; r0 += 4; } _mm_storeu_ps(output0_tm, _sum0); _mm_storeu_ps(output1_tm, _sum1); _mm_storeu_ps(output2_tm, _sum2); _mm_storeu_ps(output3_tm, _sum3); #else float sum0[4] = {0}; float sum1[4] = {0}; float sum2[4] = {0}; float sum3[4] = {0}; for (int q=0; q<inch; q++) { for (int n=0; n<4; n++) { sum0[n] += r0[n] * kptr[n]; sum1[n] += r0[n] * kptr[n+4]; sum2[n] += r0[n] * kptr[n+8]; sum3[n] += r0[n] * kptr[n+12]; } kptr += 16; r0 += 4; } for (int n=0; n<4; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; } #endif // __AVX__ output0_tm += 36; output1_tm += 36; output2_tm += 36; output3_tm += 36; } } remain_outch_start += nn_outch << 2; for (int p=remain_outch_start; p<outch; p++) { float* output0_tm = top_blob_tm.channel(p); output0_tm = output0_tm + r*4; for (int i=0; i<tiles; i++) { const float* kptr = kernel_tm_test[r].channel(p/8 + (p%8)/4 + p%4); const float* r0 = bottom_blob_tm.channel(tiles*r+i); #if __AVX__ || __SSE__ #if __AVX__ float zero_val = 0.f; __m128 _sum0 = _mm_broadcast_ss(&zero_val); #else __m128 _sum0 = _mm_set1_ps(0.f); #endif for (int q=0; q<inch; q++) { __m128 _r0 = _mm_loadu_ps(r0); __m128 _k0 = _mm_loadu_ps(kptr); #if __AVX__ _sum0 = _mm_fmadd_ps(_r0, _k0, _sum0); #else _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0)); #endif kptr += 16; r0 += 4; } _mm_storeu_ps(output0_tm, _sum0); #else float sum0[4] = {0}; for (int q=0; q<inch; q++) { for (int n=0; n<4; n++) { sum0[n] += (int)r0[n] * kptr[n]; } kptr += 4; r0 += 4; } for (int n=0; n<4; n++) { output0_tm[n] = sum0[n]; } #endif // __AVX__ || __SSE__ output0_tm += 36; } } // for (int p=0; p<outch; p++) // { // Mat out0_tm = top_blob_tm.channel(p); // const Mat kernel0_tm = kernel_tm.channel(p); // for (int i=0; i<tiles; i++) // { // float* output0_tm = out0_tm.row<int>(i); // int sum0[36] = {0}; // for (int q=0; q<inch; q++) // { // const float* r0 = bottom_blob_tm.channel(q).row<float>(i); // const float* k0 = kernel0_tm.row<float>(q); // for (int n=0; n<36; n++) // { // sum0[n] += (int)r0[n] * k0[n]; // } // } // for (int n=0; n<36; n++) // { // output0_tm[n] = sum0[n]; // } // } // } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; top_blob_bordered.create(outw, outh, outch, elemsize, opt.workspace_allocator); { // AT // const float itm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + r01 + r02 + r03 + r04 // 1 = r01 - r02 + 2 * (r03 - r04) // 2 = r01 + r02 + 4 * (r03 + r04) // 3 = r01 - r02 + 8 * (r03 - r04) + r05 int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; int nColBlocks = h_tm/6; // may be the block num in Feathercnn int nRowBlocks = w_tm/6; #pragma omp parallel for num_threads(opt.num_threads) for (int p=0; p<outch; p++) { float* out_tile = top_blob_tm.channel(p); float* outRow0 = top_blob_bordered.channel(p); float* outRow1 = outRow0 + outw; float* outRow2 = outRow0 + outw * 2; float* outRow3 = outRow0 + outw * 3; const float bias0 = bias ? bias[p] : 0.f; for (int j=0; j<nColBlocks; j++) { for(int i=0; i<nRowBlocks; i++) { // TODO AVX2 float s0[6],s1[6],s2[6],s3[6],s4[6],s5[6]; float w0[6],w1[6],w2[6],w3[6]; float d0[4],d1[4],d2[4],d3[4],d4[4],d5[4]; float o0[4],o1[4],o2[4],o3[4]; // load for (int n = 0; n < 6; n++) { s0[n] = out_tile[n]; s1[n] = out_tile[n+ 6]; s2[n] = out_tile[n+12]; s3[n] = out_tile[n+18]; s4[n] = out_tile[n+24]; s5[n] = out_tile[n+30]; } // w = A_T * W for (int n = 0; n < 6; n++) { w0[n] = s0[n] + s1[n] + s2[n] + s3[n] + s4[n]; w1[n] = s1[n] - s2[n] + 2*s3[n] - 2*s4[n]; w2[n] = s1[n] + s2[n] + 4*s3[n] + 4*s4[n]; w3[n] = s1[n] - s2[n] + 8*s3[n] - 8*s4[n] + s5[n]; } // transpose w to w_t { d0[0] = w0[0]; d0[1] = w1[0]; d0[2] = w2[0]; d0[3] = w3[0]; d1[0] = w0[1]; d1[1] = w1[1]; d1[2] = w2[1]; d1[3] = w3[1]; d2[0] = w0[2]; d2[1] = w1[2]; d2[2] = w2[2]; d2[3] = w3[2]; d3[0] = w0[3]; d3[1] = w1[3]; d3[2] = w2[3]; d3[3] = w3[3]; d4[0] = w0[4]; d4[1] = w1[4]; d4[2] = w2[4]; d4[3] = w3[4]; d5[0] = w0[5]; d5[1] = w1[5]; d5[2] = w2[5]; d5[3] = w3[5]; } // Y = A_T * w_t for (int n = 0; n < 4; n++) { o0[n] = d0[n] + d1[n] + d2[n] + d3[n] + d4[n]; o1[n] = d1[n] - d2[n] + 2*d3[n] - 2*d4[n]; o2[n] = d1[n] + d2[n] + 4*d3[n] + 4*d4[n]; o3[n] = d1[n] - d2[n] + 8*d3[n] - 8*d4[n] + d5[n]; } // save to top blob tm for (int n = 0; n < 4; n++) { outRow0[n] = o0[n] + bias0; outRow1[n] = o1[n] + bias0; outRow2[n] = o2[n] + bias0; outRow3[n] = o3[n] + bias0; } out_tile += 36; outRow0 += 4; outRow1 += 4; outRow2 += 4; outRow3 += 4; } outRow0 += outw * 3; outRow1 += outw * 3; outRow2 += outw * 3; outRow3 += outw * 3; } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt.blob_allocator, opt.num_threads); } static void conv3x3s2_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2 * outw + w; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out = top_blob.channel(p); const float bias0 = bias ? bias[p] : 0.f; out.fill(bias0); for (int q = 0; q < inch; q++) { float *outptr = out; const float *img = bottom_blob.channel(q); const float* kernel0 = kernel + p*inch*9 + q*9; const float *r0 = img; const float *r1 = img + w; const float *r2 = img + w * 2; const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; for (int i = 0; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { float sum = 0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; *outptr += sum; r0 += 2; r1 += 2; r2 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } } } }
DRB004-antidep2-var-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Two nested loops with loop-carried anti-dependence on the outer level. This is a variable-length array version in C99. Data race pair: a[i][j]@70:7 vs. a[i+1][j]@70:18 */ #include <stdlib.h> #include <stdio.h> int main(int argc,char *argv[]) { int i, j; int len = 20; if (argc>1) len = atoi(argv[1]); double a[len][len]; #pragma omp parallel for private(j) for (i=0; i< len; i++) #pragma omp parallel for simd for (j=0; j<len; j++) a[i][j] = 0.5; for (i = 0; i < len - 1; i += 1) { #pragma omp parallel for simd for (j = 0; j < len ; j += 1) { a[i][j] += a[i + 1][j]; } } #pragma omp parallel for private(j) ordered for (i=0; i< len; i++) #pragma omp parallel for simd ordered for (j=0; j<len; j++) #pragma omp ordered simd printf("%lf\n",a[i][j]); return 0; }
single_misc_messages.c
// RUN: %clang_cc1 -fsyntax-only -fopenmp -verify %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -verify %s -Wuninitialized void xxx(int argc) { int x; // expected-note {{initialize the variable 'x' to silence this warning}} #pragma omp single argc = x; // expected-warning {{variable 'x' is uninitialized when used here}} } void foo(); // expected-error@+1 {{unexpected OpenMP directive '#pragma omp single'}} #pragma omp single // expected-error@+1 {{unexpected OpenMP directive '#pragma omp single'}} #pragma omp single foo void test_no_clause() { int i; #pragma omp single foo(); #pragma omp single ++i; } void test_branch_protected_scope() { int i = 0; L1: ++i; int x[24]; #pragma omp parallel #pragma omp single { if (i == 5) goto L1; // expected-error {{use of undeclared label 'L1'}} else if (i == 6) return; // expected-error {{cannot return from OpenMP region}} else if (i == 7) goto L2; else if (i == 8) { L2: x[i]++; } } if (x[0] == 0) goto L2; // expected-error {{use of undeclared label 'L2'}} else if (x[1] == 1) goto L1; } void test_invalid_clause() { int i; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp single' are ignored}} #pragma omp single foo bar foo(); } void test_non_identifiers() { int i, x; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp single' are ignored}} #pragma omp single; foo(); #pragma omp parallel // expected-error@+2 {{unexpected OpenMP clause 'linear' in directive '#pragma omp single'}} // expected-warning@+1 {{extra tokens at the end of '#pragma omp single' are ignored}} #pragma omp single linear(x); foo(); #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp single' are ignored}} #pragma omp single private(x); foo(); #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp single' are ignored}} #pragma omp single, private(x); foo(); } void test_private() { int i; #pragma omp parallel // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp single private( foo(); #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp single private(, foo(); #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp single private(, ) foo(); #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp single private() foo(); #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp single private(int) foo(); #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp single private(0) foo(); int x, y, z; #pragma omp parallel #pragma omp single private(x) foo(); #pragma omp parallel #pragma omp single private(x, y) foo(); #pragma omp parallel #pragma omp single private(x, y, z) foo(); } void test_firstprivate() { int i; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp single firstprivate( foo(); #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp single firstprivate(, foo(); #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp single firstprivate(, ) foo(); #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp single firstprivate() foo(); #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp single firstprivate(int) foo(); #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp single firstprivate(0) foo(); } void test_nowait() { #pragma omp single nowait nowait // expected-error {{directive '#pragma omp single' cannot contain more than one 'nowait' clause}} for (int i = 0; i < 16; ++i) ; }
GB_binop__isle_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isle_fp32) // A.*B function (eWiseMult): GB (_AemultB_08__isle_fp32) // A.*B function (eWiseMult): GB (_AemultB_02__isle_fp32) // A.*B function (eWiseMult): GB (_AemultB_04__isle_fp32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isle_fp32) // A*D function (colscale): GB (_AxD__isle_fp32) // D*A function (rowscale): GB (_DxB__isle_fp32) // C+=B function (dense accum): GB (_Cdense_accumB__isle_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__isle_fp32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isle_fp32) // C=scalar+B GB (_bind1st__isle_fp32) // C=scalar+B' GB (_bind1st_tran__isle_fp32) // C=A+scalar GB (_bind2nd__isle_fp32) // C=A'+scalar GB (_bind2nd_tran__isle_fp32) // C type: float // A type: float // A pattern? 0 // B type: float // B pattern? 0 // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ float aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ float bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x <= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISLE || GxB_NO_FP32 || GxB_NO_ISLE_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__isle_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isle_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isle_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isle_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isle_fp32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isle_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; float alpha_scalar ; float beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((float *) alpha_scalar_in)) ; beta_scalar = (*((float *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__isle_fp32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isle_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__isle_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isle_fp32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isle_fp32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; float bij = GBX (Bx, p, false) ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isle_fp32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = GBX (Ax, p, false) ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB (_bind1st_tran__isle_fp32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB (_bind2nd_tran__isle_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
channel.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC H H AAA N N N N EEEEE L % % C H H A A NN N NN N E L % % C HHHHH AAAAA N N N N N N EEE L % % C H H A A N NN N NN E L % % CCCC H H A A N N N N EEEEE LLLLL % % % % % % MagickCore Image Channel Methods % % % % Software Design % % Cristy % % December 2003 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/cache-private.h" #include "MagickCore/channel.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite-private.h" #include "MagickCore/enhance.h" #include "MagickCore/image.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/resource_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/utility.h" #include "MagickCore/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C h a n n e l F x I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ChannelFxImage() applies a channel expression to the specified image. The % expression consists of one or more channels, either mnemonic or numeric (e.g. % red, 1), separated by actions as follows: % % <=> exchange two channels (e.g. red<=>blue) % => copy one channel to another channel (e.g. red=>green) % = assign a constant value to a channel (e.g. red=50%) % , write new image channels in the specified order (e.g. red, green) % | add a new output image for the next set of channel operations % ; move to the next input image for the source of channel data % % For example, to create 3 grayscale images from the red, green, and blue % channels of an image, use: % % -channel-fx "red; green; blue" % % A channel without an operation symbol implies separate (i.e, semicolon). % % The format of the ChannelFxImage method is: % % Image *ChannelFxImage(const Image *image,const char *expression, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o expression: A channel expression. % % o exception: return any errors or warnings in this structure. % */ typedef enum { ExtractChannelOp, AssignChannelOp, ExchangeChannelOp, TransferChannelOp } ChannelFx; static MagickBooleanType ChannelImage(Image *destination_image, const PixelChannel destination_channel,const ChannelFx channel_op, const Image *source_image,const PixelChannel source_channel, const Quantum pixel,ExceptionInfo *exception) { CacheView *source_view, *destination_view; MagickBooleanType status; size_t height, width; ssize_t y; status=MagickTrue; source_view=AcquireVirtualCacheView(source_image,exception); destination_view=AcquireAuthenticCacheView(destination_image,exception); height=MagickMin(source_image->rows,destination_image->rows); width=MagickMin(source_image->columns,destination_image->columns); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(source_image,source_image,height,1) #endif for (y=0; y < (ssize_t) height; y++) { PixelTrait destination_traits, source_traits; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns,1, exception); q=GetCacheViewAuthenticPixels(destination_view,0,y, destination_image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } destination_traits=GetPixelChannelTraits(destination_image, destination_channel); source_traits=GetPixelChannelTraits(source_image,source_channel); if ((destination_traits == UndefinedPixelTrait) || (source_traits == UndefinedPixelTrait)) continue; for (x=0; x < (ssize_t) width; x++) { if (channel_op == AssignChannelOp) SetPixelChannel(destination_image,destination_channel,pixel,q); else SetPixelChannel(destination_image,destination_channel, GetPixelChannel(source_image,source_channel,p),q); p+=GetPixelChannels(source_image); q+=GetPixelChannels(destination_image); } if (SyncCacheViewAuthenticPixels(destination_view,exception) == MagickFalse) status=MagickFalse; } destination_view=DestroyCacheView(destination_view); source_view=DestroyCacheView(source_view); return(status); } MagickExport Image *ChannelFxImage(const Image *image,const char *expression, ExceptionInfo *exception) { #define ChannelFxImageTag "ChannelFx/Image" ChannelFx channel_op; ChannelType channel_mask; char token[MagickPathExtent]; const char *p; const Image *source_image; double pixel; Image *destination_image; MagickBooleanType status; PixelChannel source_channel, destination_channel; ssize_t channels; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); source_image=image; destination_image=CloneImage(source_image,0,0,MagickTrue,exception); if (destination_image == (Image *) NULL) return((Image *) NULL); if (expression == (const char *) NULL) return(destination_image); status=SetImageStorageClass(destination_image,DirectClass,exception); if (status == MagickFalse) { destination_image=GetLastImageInList(destination_image); return((Image *) NULL); } destination_channel=RedPixelChannel; channel_mask=UndefinedChannel; pixel=0.0; p=(char *) expression; GetNextToken(p,&p,MagickPathExtent,token); channel_op=ExtractChannelOp; for (channels=0; *token != '\0'; ) { ssize_t i; /* Interpret channel expression. */ switch (*token) { case ',': { GetNextToken(p,&p,MagickPathExtent,token); break; } case '|': { if (GetNextImageInList(source_image) != (Image *) NULL) source_image=GetNextImageInList(source_image); else source_image=GetFirstImageInList(source_image); GetNextToken(p,&p,MagickPathExtent,token); break; } case ';': { Image *canvas; (void) SetPixelChannelMask(destination_image,channel_mask); if ((channel_op == ExtractChannelOp) && (channels == 1)) { (void) SetPixelMetaChannels(destination_image,0,exception); (void) SetImageColorspace(destination_image,GRAYColorspace, exception); } canvas=CloneImage(source_image,0,0,MagickTrue,exception); if (canvas == (Image *) NULL) { destination_image=DestroyImageList(destination_image); return(destination_image); } AppendImageToList(&destination_image,canvas); destination_image=GetLastImageInList(destination_image); status=SetImageStorageClass(destination_image,DirectClass,exception); if (status == MagickFalse) { destination_image=GetLastImageInList(destination_image); return((Image *) NULL); } GetNextToken(p,&p,MagickPathExtent,token); channels=0; destination_channel=RedPixelChannel; channel_mask=UndefinedChannel; break; } default: break; } i=ParsePixelChannelOption(token); if (i < 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnrecognizedChannelType","`%s'",token); destination_image=DestroyImageList(destination_image); return(destination_image); } source_channel=(PixelChannel) i; channel_op=ExtractChannelOp; GetNextToken(p,&p,MagickPathExtent,token); if (*token == '<') { channel_op=ExchangeChannelOp; GetNextToken(p,&p,MagickPathExtent,token); } if (*token == '=') { if (channel_op != ExchangeChannelOp) channel_op=AssignChannelOp; GetNextToken(p,&p,MagickPathExtent,token); } if (*token == '>') { if (channel_op != ExchangeChannelOp) channel_op=TransferChannelOp; GetNextToken(p,&p,MagickPathExtent,token); } switch (channel_op) { case AssignChannelOp: case ExchangeChannelOp: case TransferChannelOp: { if (channel_op == AssignChannelOp) pixel=StringToDoubleInterval(token,(double) QuantumRange+1.0); else { i=ParsePixelChannelOption(token); if (i < 0) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnrecognizedChannelType","`%s'",token); destination_image=DestroyImageList(destination_image); return(destination_image); } } destination_channel=(PixelChannel) i; if (i >= (ssize_t) GetPixelChannels(destination_image)) (void) SetPixelMetaChannels(destination_image,(size_t) ( destination_channel-GetPixelChannels(destination_image)+1), exception); if (image->colorspace != UndefinedColorspace) switch (destination_channel) { case RedPixelChannel: case GreenPixelChannel: case BluePixelChannel: case BlackPixelChannel: case IndexPixelChannel: break; case AlphaPixelChannel: { destination_image->alpha_trait=BlendPixelTrait; break; } case ReadMaskPixelChannel: { destination_image->read_mask=MagickTrue; break; } case WriteMaskPixelChannel: { destination_image->write_mask=MagickTrue; break; } case MetaPixelChannel: default: { (void) SetPixelMetaChannels(destination_image,(size_t) ( destination_channel-GetPixelChannels(destination_image)+1), exception); break; } } channel_mask=(ChannelType) (channel_mask | ParseChannelOption(token)); if (((channels >= 1) || (destination_channel >= 1)) && (IsGrayColorspace(destination_image->colorspace) != MagickFalse)) (void) SetImageColorspace(destination_image,sRGBColorspace,exception); GetNextToken(p,&p,MagickPathExtent,token); break; } default: break; } status=ChannelImage(destination_image,destination_channel,channel_op, source_image,source_channel,ClampToQuantum(pixel),exception); if (status == MagickFalse) { destination_image=DestroyImageList(destination_image); break; } channels++; if (channel_op == ExchangeChannelOp) { status=ChannelImage(destination_image,source_channel,channel_op, source_image,destination_channel,ClampToQuantum(pixel),exception); if (status == MagickFalse) { destination_image=DestroyImageList(destination_image); break; } channels++; } switch (channel_op) { case ExtractChannelOp: { channel_mask=(ChannelType) (channel_mask | (1 << destination_channel)); destination_channel=(PixelChannel) (destination_channel+1); break; } default: break; } status=SetImageProgress(source_image,ChannelFxImageTag,p-expression, strlen(expression)); if (status == MagickFalse) break; } (void) SetPixelChannelMask(destination_image,channel_mask); if ((channel_op == ExtractChannelOp) && (channels == 1)) { (void) SetPixelMetaChannels(destination_image,0,exception); (void) SetImageColorspace(destination_image,GRAYColorspace,exception); } return(GetFirstImageInList(destination_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m b i n e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CombineImages() combines one or more images into a single image. The % grayscale value of the pixels of each image in the sequence is assigned in % order to the specified channels of the combined image. The typical % ordering would be image 1 => Red, 2 => Green, 3 => Blue, etc. % % The format of the CombineImages method is: % % Image *CombineImages(const Image *images,const ColorspaceType colorspace, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o colorspace: the image colorspace. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CombineImages(const Image *image, const ColorspaceType colorspace,ExceptionInfo *exception) { #define CombineImageTag "Combine/Image" CacheView *combine_view; Image *combine_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Ensure the image are the same size. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); combine_image=CloneImage(image,0,0,MagickTrue,exception); if (combine_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(combine_image,DirectClass,exception) == MagickFalse) { combine_image=DestroyImage(combine_image); return((Image *) NULL); } if (colorspace != UndefinedColorspace) (void) SetImageColorspace(combine_image,colorspace,exception); else if (fabs(image->gamma-1.0) <= MagickEpsilon) (void) SetImageColorspace(combine_image,RGBColorspace,exception); else (void) SetImageColorspace(combine_image,sRGBColorspace,exception); switch (combine_image->colorspace) { case UndefinedColorspace: case sRGBColorspace: { if (GetImageListLength(image) > 3) combine_image->alpha_trait=BlendPixelTrait; break; } case LinearGRAYColorspace: case GRAYColorspace: { if (GetImageListLength(image) > 1) combine_image->alpha_trait=BlendPixelTrait; break; } case CMYKColorspace: { if (GetImageListLength(image) > 4) combine_image->alpha_trait=BlendPixelTrait; break; } default: break; } /* Combine images. */ status=MagickTrue; progress=0; combine_view=AcquireAuthenticCacheView(combine_image,exception); for (y=0; y < (ssize_t) combine_image->rows; y++) { CacheView *image_view; const Image *next; Quantum *pixels; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t i; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(combine_view,0,y,combine_image->columns, 1,exception); if (pixels == (Quantum *) NULL) { status=MagickFalse; continue; } next=image; for (i=0; i < (ssize_t) GetPixelChannels(combine_image); i++) { register ssize_t x; PixelChannel channel = GetPixelChannelChannel(combine_image,i); PixelTrait traits = GetPixelChannelTraits(combine_image,channel); if (traits == UndefinedPixelTrait) continue; if (next == (Image *) NULL) continue; image_view=AcquireVirtualCacheView(next,exception); p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); if (p == (const Quantum *) NULL) continue; q=pixels; for (x=0; x < (ssize_t) combine_image->columns; x++) { if (x < (ssize_t) next->columns) { q[i]=GetPixelGray(next,p); p+=GetPixelChannels(next); } q+=GetPixelChannels(combine_image); } image_view=DestroyCacheView(image_view); next=GetNextImageInList(next); } if (SyncCacheViewAuthenticPixels(combine_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,CombineImageTag,progress++, combine_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } combine_view=DestroyCacheView(combine_view); if (status == MagickFalse) combine_image=DestroyImage(combine_image); return(combine_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e A l p h a C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageAlphaChannel() returns MagickFalse if the image alpha channel is % not activated. That is, the image is RGB rather than RGBA or CMYK rather % than CMYKA. % % The format of the GetImageAlphaChannel method is: % % MagickBooleanType GetImageAlphaChannel(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType GetImageAlphaChannel(const Image *image) { assert(image != (const Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); return(image->alpha_trait != UndefinedPixelTrait ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e p a r a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SeparateImage() separates a channel from the image and returns it as a % grayscale image. % % The format of the SeparateImage method is: % % Image *SeparateImage(const Image *image,const ChannelType channel, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the image channel. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SeparateImage(const Image *image, const ChannelType channel_type,ExceptionInfo *exception) { #define GetChannelBit(mask,bit) (((size_t) (mask) >> (size_t) (bit)) & 0x01) #define SeparateImageTag "Separate/Image" CacheView *image_view, *separate_view; Image *separate_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Initialize separate image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); separate_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (separate_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(separate_image,DirectClass,exception) == MagickFalse) { separate_image=DestroyImage(separate_image); return((Image *) NULL); } separate_image->alpha_trait=UndefinedPixelTrait; (void) SetImageColorspace(separate_image,GRAYColorspace,exception); separate_image->gamma=image->gamma; /* Separate image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); separate_view=AcquireAuthenticCacheView(separate_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(separate_view,0,y,separate_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; if (GetPixelWriteMask(image,p) <= (QuantumRange/2)) { SetPixelBackgoundColor(separate_image,q); p+=GetPixelChannels(image); q+=GetPixelChannels(separate_image); continue; } SetPixelChannel(separate_image,GrayPixelChannel,0,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits == UndefinedPixelTrait) || (GetChannelBit(channel_type,channel) == 0)) continue; SetPixelChannel(separate_image,GrayPixelChannel,p[i],q); } p+=GetPixelChannels(image); q+=GetPixelChannels(separate_image); } if (SyncCacheViewAuthenticPixels(separate_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SeparateImage) #endif proceed=SetImageProgress(image,SeparateImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } separate_view=DestroyCacheView(separate_view); image_view=DestroyCacheView(image_view); (void) SetImageChannelMask(separate_image,DefaultChannels); if (status == MagickFalse) separate_image=DestroyImage(separate_image); return(separate_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e p a r a t e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SeparateImages() returns a separate grayscale image for each channel % specified. % % The format of the SeparateImages method is: % % Image *SeparateImages(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SeparateImages(const Image *image,ExceptionInfo *exception) { Image *images, *separate_image; register ssize_t i; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); images=NewImageList(); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits == UndefinedPixelTrait) || ((traits & UpdatePixelTrait) == 0)) continue; separate_image=SeparateImage(image,(ChannelType) (1 << channel),exception); if (separate_image != (Image *) NULL) AppendImageToList(&images,separate_image); } if (images == (Image *) NULL) images=SeparateImage(image,UndefinedChannel,exception); return(images); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e A l p h a C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageAlphaChannel() activates, deactivates, resets, or sets the alpha % channel. % % The format of the SetImageAlphaChannel method is: % % MagickBooleanType SetImageAlphaChannel(Image *image, % const AlphaChannelOption alpha_type,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o alpha_type: The alpha channel type: ActivateAlphaChannel, % AssociateAlphaChannel, CopyAlphaChannel, DeactivateAlphaChannel, % DisassociateAlphaChannel, ExtractAlphaChannel, OffAlphaChannel, % OnAlphaChannel, OpaqueAlphaChannel, SetAlphaChannel, ShapeAlphaChannel, % and TransparentAlphaChannel. % % o exception: return any errors or warnings in this structure. % */ static inline void FlattenPixelInfo(const Image *image,const PixelInfo *p, const double alpha,const Quantum *q,const double beta, Quantum *composite) { double Da, gamma, Sa; register ssize_t i; /* Compose pixel p over pixel q with the given alpha. */ Sa=QuantumScale*alpha; Da=QuantumScale*beta, gamma=Sa*(-Da)+Sa+Da; gamma=PerceptibleReciprocal(gamma); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; switch (channel) { case RedPixelChannel: { composite[i]=ClampToQuantum(gamma*MagickOver_((double) q[i],beta, (double) p->red,alpha)); break; } case GreenPixelChannel: { composite[i]=ClampToQuantum(gamma*MagickOver_((double) q[i],beta, (double) p->green,alpha)); break; } case BluePixelChannel: { composite[i]=ClampToQuantum(gamma*MagickOver_((double) q[i],beta, (double) p->blue,alpha)); break; } case BlackPixelChannel: { composite[i]=ClampToQuantum(gamma*MagickOver_((double) q[i],beta, (double) p->black,alpha)); break; } case AlphaPixelChannel: { composite[i]=ClampToQuantum(QuantumRange*(Sa*(-Da)+Sa+Da)); break; } default: break; } } } MagickExport MagickBooleanType SetImageAlphaChannel(Image *image, const AlphaChannelOption alpha_type,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); status=MagickTrue; switch (alpha_type) { case ActivateAlphaChannel: { image->alpha_trait=BlendPixelTrait; break; } case AssociateAlphaChannel: { /* Associate alpha. */ status=SetImageStorageClass(image,DirectClass,exception); if (status == MagickFalse) break; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double gamma; register ssize_t i; if (GetPixelWriteMask(image,q) <= (QuantumRange/2)) { q+=GetPixelChannels(image); continue; } gamma=QuantumScale*GetPixelAlpha(image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (channel == AlphaPixelChannel) continue; if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampToQuantum(gamma*q[i]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->alpha_trait=CopyPixelTrait; return(status); } case BackgroundAlphaChannel: { /* Set transparent pixels to background color. */ if (image->alpha_trait == UndefinedPixelTrait) break; status=SetImageStorageClass(image,DirectClass,exception); if (status == MagickFalse) break; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelAlpha(image,q) == TransparentAlpha) { SetPixelViaPixelInfo(image,&image->background_color,q); SetPixelChannel(image,AlphaPixelChannel,TransparentAlpha,q); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } case CopyAlphaChannel: { image->alpha_trait=UpdatePixelTrait; status=CompositeImage(image,image,IntensityCompositeOp,MagickTrue,0,0, exception); break; } case DeactivateAlphaChannel: { if (image->alpha_trait == UndefinedPixelTrait) status=SetImageAlpha(image,OpaqueAlpha,exception); image->alpha_trait=CopyPixelTrait; break; } case DisassociateAlphaChannel: { /* Disassociate alpha. */ status=SetImageStorageClass(image,DirectClass,exception); if (status == MagickFalse) break; image->alpha_trait=BlendPixelTrait; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double gamma, Sa; register ssize_t i; if (GetPixelWriteMask(image,q) <= (QuantumRange/2)) { q+=GetPixelChannels(image); continue; } Sa=QuantumScale*GetPixelAlpha(image,q); gamma=PerceptibleReciprocal(Sa); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (channel == AlphaPixelChannel) continue; if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampToQuantum(gamma*q[i]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->alpha_trait=UndefinedPixelTrait; return(status); } case DiscreteAlphaChannel: { if (image->alpha_trait == UndefinedPixelTrait) status=SetImageAlpha(image,OpaqueAlpha,exception); image->alpha_trait=UpdatePixelTrait; break; } case ExtractAlphaChannel: { status=CompositeImage(image,image,AlphaCompositeOp,MagickTrue,0,0, exception); image->alpha_trait=UndefinedPixelTrait; break; } case OffAlphaChannel: { image->alpha_trait=UndefinedPixelTrait; break; } case OnAlphaChannel: { if (image->alpha_trait == UndefinedPixelTrait) status=SetImageAlpha(image,OpaqueAlpha,exception); image->alpha_trait=BlendPixelTrait; break; } case OpaqueAlphaChannel: { status=SetImageAlpha(image,OpaqueAlpha,exception); break; } case RemoveAlphaChannel: { /* Remove transparency. */ if (image->alpha_trait == UndefinedPixelTrait) break; status=SetImageStorageClass(image,DirectClass,exception); if (status == MagickFalse) break; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { FlattenPixelInfo(image,&image->background_color, image->background_color.alpha,q,(double) GetPixelAlpha(image,q),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->alpha_trait=image->background_color.alpha_trait; break; } case SetAlphaChannel: { if (image->alpha_trait == UndefinedPixelTrait) status=SetImageAlpha(image,OpaqueAlpha,exception); break; } case ShapeAlphaChannel: { /* Set alpha channel by shape. */ status=SetImageStorageClass(image,DirectClass,exception); if (status == MagickFalse) break; image->alpha_trait=UpdatePixelTrait; (void) SetImageMask(image,WritePixelMask,image,exception); (void) LevelImageColors(image,&image->background_color, &image->background_color,MagickTrue,exception); (void) SetImageMask(image,WritePixelMask,(Image *) NULL,exception); break; } case TransparentAlphaChannel: { status=SetImageAlpha(image,TransparentAlpha,exception); break; } case UndefinedAlphaChannel: break; } if (status == MagickFalse) return(status); (void) SetPixelChannelMask(image,image->channel_mask); return(SyncImagePixelCache(image,exception)); }
local_estimator.h
/* * estimator.h * * Created on: Mar 22, 2012 * Author: aitor */ #ifndef REC_FRAMEWORK_LOCAL_ESTIMATOR_H_ #define REC_FRAMEWORK_LOCAL_ESTIMATOR_H_ #include <pcl/apps/3d_rec_framework/feature_wrapper/normal_estimator.h> #include <pcl/keypoints/uniform_sampling.h> #include <pcl/surface/mls.h> #include <pcl/keypoints/harris_3d.h> #include <pcl/keypoints/sift_keypoint.h> #include <pcl/keypoints/susan.h> namespace pcl { template <> struct SIFTKeypointFieldSelector<PointXYZ> { inline float operator()(const PointXYZ &p) const { return p.z; } }; } // namespace pcl namespace pcl { namespace rec_3d_framework { template <typename PointInT> class KeypointExtractor { protected: typedef typename pcl::PointCloud<PointInT>::Ptr PointInTPtr; typedef typename pcl::PointCloud<PointInT>::Ptr PointOutTPtr; typename pcl::PointCloud<PointInT>::Ptr input_; float radius_; public: void setInputCloud(PointInTPtr &input) { input_ = input; } void setSupportRadius(float f) { radius_ = f; } virtual void compute(PointOutTPtr &keypoints) = 0; virtual void setNormals(const pcl::PointCloud<pcl::Normal>::Ptr & /*normals*/) {} virtual bool needNormals() { return false; } }; template <typename PointInT> class UniformSamplingExtractor : public KeypointExtractor<PointInT> { private: typedef typename pcl::PointCloud<PointInT>::Ptr PointInTPtr; bool filter_planar_; using KeypointExtractor<PointInT>::input_; using KeypointExtractor<PointInT>::radius_; float sampling_density_; boost::shared_ptr<std::vector<std::vector<int>>> neighborhood_indices_; boost::shared_ptr<std::vector<std::vector<float>>> neighborhood_dist_; void filterPlanar(PointInTPtr &input, pcl::PointCloud<int> &keypoints_cloud) { pcl::PointCloud<int> filtered_keypoints; // create a search object typename pcl::search::Search<PointInT>::Ptr tree; if (input->isOrganized()) tree.reset(new pcl::search::OrganizedNeighbor<PointInT>()); else tree.reset(new pcl::search::KdTree<PointInT>(false)); tree->setInputCloud(input); neighborhood_indices_.reset(new std::vector<std::vector<int>>); neighborhood_indices_->resize(keypoints_cloud.points.size()); neighborhood_dist_.reset(new std::vector<std::vector<float>>); neighborhood_dist_->resize(keypoints_cloud.points.size()); filtered_keypoints.points.resize(keypoints_cloud.points.size()); int good = 0; for (size_t i = 0; i < keypoints_cloud.points.size(); i++) { if (tree->radiusSearch(keypoints_cloud[i], radius_, (*neighborhood_indices_)[good], (*neighborhood_dist_)[good])) { EIGEN_ALIGN16 Eigen::Matrix3f covariance_matrix; Eigen::Vector4f xyz_centroid; EIGEN_ALIGN16 Eigen::Vector3f eigenValues; EIGEN_ALIGN16 Eigen::Matrix3f eigenVectors; // compute planarity of the region computeMeanAndCovarianceMatrix(*input, (*neighborhood_indices_)[good], covariance_matrix, xyz_centroid); pcl::eigen33(covariance_matrix, eigenVectors, eigenValues); float eigsum = eigenValues.sum(); if (!pcl_isfinite(eigsum)) { PCL_ERROR("Eigen sum is not finite\n"); } if ((fabs(eigenValues[0] - eigenValues[1]) < 1.5e-4) || (eigsum != 0 && fabs(eigenValues[0] / eigsum) > 1.e-2)) { // region is not planar, add to filtered keypoint keypoints_cloud.points[good] = keypoints_cloud.points[i]; good++; } } } neighborhood_indices_->resize(good); neighborhood_dist_->resize(good); keypoints_cloud.points.resize(good); neighborhood_indices_->clear(); neighborhood_dist_->clear(); } public: void setFilterPlanar(bool b) { filter_planar_ = b; } void setSamplingDensity(float f) { sampling_density_ = f; } void compute(PointInTPtr &keypoints) { keypoints.reset(new pcl::PointCloud<PointInT>); pcl::UniformSampling<PointInT> keypoint_extractor; keypoint_extractor.setRadiusSearch(sampling_density_); keypoint_extractor.setInputCloud(input_); pcl::PointCloud<int> keypoints_idxes; keypoint_extractor.compute(keypoints_idxes); if (filter_planar_) filterPlanar(input_, keypoints_idxes); std::vector<int> indices; indices.resize(keypoints_idxes.points.size()); for (size_t i = 0; i < indices.size(); i++) indices[i] = keypoints_idxes.points[i]; pcl::copyPointCloud(*input_, indices, *keypoints); } }; template <typename PointInT> class SIFTKeypointExtractor : public KeypointExtractor<PointInT> { typedef typename pcl::PointCloud<PointInT>::Ptr PointInTPtr; using KeypointExtractor<PointInT>::input_; using KeypointExtractor<PointInT>::radius_; public: void compute(PointInTPtr &keypoints) { keypoints.reset(new pcl::PointCloud<PointInT>); typename pcl::PointCloud<pcl::PointXYZI>::Ptr intensity_keypoints( new pcl::PointCloud<pcl::PointXYZI>); pcl::SIFTKeypoint<PointInT, pcl::PointXYZI> sift3D; sift3D.setScales(0.003f, 3, 2); sift3D.setMinimumContrast(0.1f); sift3D.setInputCloud(input_); sift3D.setSearchSurface(input_); sift3D.compute(*intensity_keypoints); pcl::copyPointCloud(*intensity_keypoints, *keypoints); } }; template <typename PointInT> class SIFTSurfaceKeypointExtractor : public KeypointExtractor<PointInT> { typedef typename pcl::PointCloud<PointInT>::Ptr PointInTPtr; pcl::PointCloud<pcl::Normal>::Ptr normals_; using KeypointExtractor<PointInT>::input_; using KeypointExtractor<PointInT>::radius_; bool needNormals() { return true; } void setNormals(const pcl::PointCloud<pcl::Normal>::Ptr &normals) { normals_ = normals; } public: void compute(PointInTPtr &keypoints) { if (normals_ == 0 || (normals_->points.size() != input_->points.size())) PCL_WARN("SIFTSurfaceKeypointExtractor -- Normals are not valid\n"); keypoints.reset(new pcl::PointCloud<PointInT>); typename pcl::PointCloud<pcl::PointNormal>::Ptr input_cloud( new pcl::PointCloud<pcl::PointNormal>); input_cloud->width = input_->width; input_cloud->height = input_->height; input_cloud->points.resize(input_->width * input_->height); for (size_t i = 0; i < input_->points.size(); i++) { input_cloud->points[i].getVector3fMap() = input_->points[i].getVector3fMap(); input_cloud->points[i].getNormalVector3fMap() = normals_->points[i].getNormalVector3fMap(); } typename pcl::PointCloud<pcl::PointXYZI>::Ptr intensity_keypoints( new pcl::PointCloud<pcl::PointXYZI>); pcl::SIFTKeypoint<pcl::PointNormal, pcl::PointXYZI> sift3D; sift3D.setScales(0.003f, 3, 2); sift3D.setMinimumContrast(0.0); sift3D.setInputCloud(input_cloud); sift3D.setSearchSurface(input_cloud); sift3D.compute(*intensity_keypoints); pcl::copyPointCloud(*intensity_keypoints, *keypoints); } }; template <typename PointInT, typename NormalT = pcl::Normal> class HarrisKeypointExtractor : public KeypointExtractor<PointInT> { pcl::PointCloud<pcl::Normal>::Ptr normals_; typedef typename pcl::PointCloud<PointInT>::Ptr PointInTPtr; using KeypointExtractor<PointInT>::input_; using KeypointExtractor<PointInT>::radius_; typename pcl::HarrisKeypoint3D<PointInT, pcl::PointXYZI>::ResponseMethod m_; float non_max_radius_; float threshold_; public: HarrisKeypointExtractor() { m_ = pcl::HarrisKeypoint3D<PointInT, pcl::PointXYZI>::HARRIS; non_max_radius_ = 0.01f; threshold_ = 0.f; } bool needNormals() { return true; } void setNormals(const pcl::PointCloud<pcl::Normal>::Ptr &normals) { normals_ = normals; } void setThreshold(float t) { threshold_ = t; } void setResponseMethod( typename pcl::HarrisKeypoint3D<PointInT, pcl::PointXYZI>::ResponseMethod m) { m_ = m; } void setNonMaximaRadius(float r) { non_max_radius_ = r; } void compute(PointInTPtr &keypoints) { keypoints.reset(new pcl::PointCloud<PointInT>); if (normals_ == 0 || (normals_->points.size() != input_->points.size())) PCL_WARN("HarrisKeypointExtractor -- Normals are not valid\n"); typename pcl::PointCloud<pcl::PointXYZI>::Ptr intensity_keypoints( new pcl::PointCloud<pcl::PointXYZI>); pcl::HarrisKeypoint3D<PointInT, pcl::PointXYZI> harris; harris.setNonMaxSupression(true); harris.setRefine(false); harris.setThreshold(threshold_); harris.setInputCloud(input_); harris.setNormals(normals_); harris.setRadius(non_max_radius_); harris.setRadiusSearch(non_max_radius_); harris.setMethod(m_); harris.compute(*intensity_keypoints); pcl::copyPointCloud(*intensity_keypoints, *keypoints); } }; template <typename PointInT, typename NormalT = pcl::Normal> class SUSANKeypointExtractor : public KeypointExtractor<PointInT> { pcl::PointCloud<pcl::Normal>::Ptr normals_; typedef typename pcl::PointCloud<PointInT>::Ptr PointInTPtr; using KeypointExtractor<PointInT>::input_; using KeypointExtractor<PointInT>::radius_; public: SUSANKeypointExtractor() {} bool needNormals() { return true; } void setNormals(const pcl::PointCloud<pcl::Normal>::Ptr &normals) { normals_ = normals; } void compute(PointInTPtr &keypoints) { keypoints.reset(new pcl::PointCloud<PointInT>); if (normals_ == 0 || (normals_->points.size() != input_->points.size())) PCL_WARN("SUSANKeypointExtractor -- Normals are not valid\n"); typename pcl::PointCloud<pcl::PointXYZI>::Ptr intensity_keypoints( new pcl::PointCloud<pcl::PointXYZI>); pcl::SUSAN<PointInT, pcl::PointXYZI> susan; susan.setNonMaxSupression(true); susan.setInputCloud(input_); susan.setNormals(normals_); susan.setRadius(0.01f); susan.setRadiusSearch(0.01f); susan.compute(*intensity_keypoints); pcl::copyPointCloud(*intensity_keypoints, *keypoints); } }; template <typename PointInT, typename FeatureT> class LocalEstimator { protected: typedef typename pcl::PointCloud<PointInT>::Ptr PointInTPtr; typedef typename pcl::PointCloud<FeatureT>::Ptr FeatureTPtr; typename boost::shared_ptr< PreProcessorAndNormalEstimator<PointInT, pcl::Normal>> normal_estimator_; // typename boost::shared_ptr<UniformSampling<PointInT> > // keypoint_extractor_; std::vector<typename boost::shared_ptr<KeypointExtractor<PointInT>>> keypoint_extractor_; // this should be a vector float support_radius_; // bool filter_planar_; bool adaptative_MLS_; boost::shared_ptr<std::vector<std::vector<int>>> neighborhood_indices_; boost::shared_ptr<std::vector<std::vector<float>>> neighborhood_dist_; // std::vector< std::vector<int> > neighborhood_indices_; // std::vector< std::vector<float> > neighborhood_dist_; void computeKeypoints(PointInTPtr &cloud, PointInTPtr &keypoints, pcl::PointCloud<pcl::Normal>::Ptr &normals) { keypoints.reset(new pcl::PointCloud<PointInT>); for (size_t i = 0; i < keypoint_extractor_.size(); i++) { keypoint_extractor_[i]->setInputCloud(cloud); if (keypoint_extractor_[i]->needNormals()) keypoint_extractor_[i]->setNormals(normals); keypoint_extractor_[i]->setSupportRadius(support_radius_); PointInTPtr detected_keypoints; keypoint_extractor_[i]->compute(detected_keypoints); *keypoints += *detected_keypoints; } } public: LocalEstimator() { adaptative_MLS_ = false; keypoint_extractor_.clear(); } void setAdaptativeMLS(bool b) { adaptative_MLS_ = b; } virtual bool estimate(PointInTPtr &in, PointInTPtr &processed, PointInTPtr &keypoints, FeatureTPtr &signatures) = 0; void setNormalEstimator( boost::shared_ptr<PreProcessorAndNormalEstimator<PointInT, pcl::Normal>> &ne) { normal_estimator_ = ne; } /** * \brief Right now only uniformSampling keypoint extractor is allowed */ void addKeypointExtractor(boost::shared_ptr<KeypointExtractor<PointInT>> &ke) { keypoint_extractor_.push_back(ke); } void setKeypointExtractors( std::vector<typename boost::shared_ptr<KeypointExtractor<PointInT>>> &ke) { keypoint_extractor_ = ke; } void setSupportRadius(float r) { support_radius_ = r; } /*void setFilterPlanar (bool b) { filter_planar_ = b; } void filterPlanar (PointInTPtr & input, KeypointCloud & keypoints_cloud) { pcl::PointCloud<int> filtered_keypoints; //create a search object typename pcl::search::Search<PointInT>::Ptr tree; if (input->isOrganized ()) tree.reset (new pcl::search::OrganizedNeighbor<PointInT> ()); else tree.reset (new pcl::search::KdTree<PointInT> (false)); tree->setInputCloud (input); //std::vector<int> nn_indices; //std::vector<float> nn_distances; neighborhood_indices_.reset (new std::vector<std::vector<int> >); neighborhood_indices_->resize (keypoints_cloud.points.size ()); neighborhood_dist_.reset (new std::vector<std::vector<float> >); neighborhood_dist_->resize (keypoints_cloud.points.size ()); filtered_keypoints.points.resize (keypoints_cloud.points.size ()); int good = 0; //#pragma omp parallel for num_threads(8) for (size_t i = 0; i < keypoints_cloud.points.size (); i++) { if (tree->radiusSearch (keypoints_cloud[i], support_radius_, (*neighborhood_indices_)[good], (*neighborhood_dist_)[good])) { EIGEN_ALIGN16 Eigen::Matrix3f covariance_matrix; Eigen::Vector4f xyz_centroid; EIGEN_ALIGN16 Eigen::Vector3f eigenValues; EIGEN_ALIGN16 Eigen::Matrix3f eigenVectors; //compute planarity of the region computeMeanAndCovarianceMatrix (*input, (*neighborhood_indices_)[good], covariance_matrix, xyz_centroid); pcl::eigen33 (covariance_matrix, eigenVectors, eigenValues); float eigsum = eigenValues.sum (); if (!pcl_isfinite(eigsum)) { PCL_ERROR("Eigen sum is not finite\n"); } if ((fabs (eigenValues[0] - eigenValues[1]) < 1.5e-4) || (eigsum != 0 && fabs (eigenValues[0] / eigsum) > 1.e-2)) { //region is not planar, add to filtered keypoint keypoints_cloud.points[good] = keypoints_cloud.points[i]; good++; } } } neighborhood_indices_->resize (good); neighborhood_dist_->resize (good); keypoints_cloud.points.resize (good); }*/ }; } // namespace rec_3d_framework } // namespace pcl #endif /* REC_FRAMEWORK_LOCAL_ESTIMATOR_H_ */
atomic-14.c
/* PR middle-end/45423 */ /* { dg-do compile } */ /* { dg-options "-fopenmp -Wno-deprecated" } */ /* { dg-skip-if "invalid in C++17" { c++17 } } */ #ifdef __cplusplus bool *baz (); #else _Bool *baz (); #endif int *bar (); int foo (void) { #pragma omp barrier #pragma omp atomic (*bar ())++; #pragma omp barrier #pragma omp atomic ++(*bar ()); #pragma omp barrier #pragma omp atomic (*bar ())--; #pragma omp barrier #pragma omp atomic --(*bar ()); #pragma omp barrier #pragma omp atomic (*baz ())++; #pragma omp barrier #pragma omp atomic ++(*baz ()); #ifndef __cplusplus #pragma omp barrier #pragma omp atomic (*baz ())--; #pragma omp barrier #pragma omp atomic --(*baz ()); #pragma omp barrier #endif return 0; }
linemax-cc.h
/** * @author Aaron Tikuisis, modified by Matthew Arnold * @file linemax-cc.h Implementation of line max * * $Id$ * * K-Best Rescoring Module * * Technologies langagieres interactives / Interactive Language Technologies * Inst. de technologie de l'information / Institute for Information Technology * Conseil national de recherches Canada / National Research Council Canada * Copyright 2005, Sa Majeste la Reine du Chef du Canada / * Copyright 2005, Her Majesty in Right of Canada */ #include "rescoring_general.h" #include "bleu.h" #include <algorithm> #include <cassert> #include "portage_defs.h" namespace Portage { /*Comment on fix: The bug fixed was in findSentenceIntervals. In some occasions it was returning values out of range for gammas (inf, nan) so a fix was added in to prevent that and the second problem was that on some occasions it picked values that were already chosen (ie. ie said that the "line" for sentence 2 crossed the target line more than once, which is impossible, so a fix was added in to prevent a k value from being selected more than once, and not just prevent the selection of the previous k-value. */ /** * Check if -inf < x < inf => x is finite * @param x operand * @return Returns true if x is finite */ inline bool finite(double x) { return (x != INFINITY && x != -INFINITY); } template <class ScoreStats> void LineMax<ScoreStats>::findSentenceIntervals(Uint &numchanges, double *& gamma, ScoreStats *& dBLEU, ScoreStats & curScore, linemaxpt *& myHeappoint, int s, const uVector& p, const uVector& dir, const uMatrix& H, const vector<ScoreStats>& scoreStats) // If a heap-point is not added, myHeappoint will be NULL. // All arrays indexed by s are replaced here by their s-th entry, ie.. // numChanges = numChanges[s], // gamma = gamma[s], // dBLEU = dBLEU[s], // H = H[s], // scoreStats = scoreStats[s] { using namespace boost::numeric; /* For the given source sentence (f_s): The estimation function, \hat{e}(f_s, \lambda) is defined as follows: \hat{e}(f_s, \lambda) = argmax_{0 <= k < K} ( \sum_{m=1}^M \lambda_m h_m(e_k, f_s) ) = max_index (H * \lambda) (H is the K x M matrix whose (k,m)-th entry is h_m(e_k, f_s).) We consider the values of \hat{e} as we vary \lambda along the line: p + \gamma * dir. As a function of \gamma (abusing notation), we have \hat{e}(\gamma) = max_index (H * p + \gamma * H * dir). Denote A = \gamma * H * dir, B = H * p. Let L(\gamma) = \gamma * A + B, and let l_k(\gamma) be the k-th entry in L(\gamma). Each l_k(\gamma) is clearly a line, called the k-th line. Clearly, the value of \hat{e}(\gamma) can only change at points where two different lines, l_i(\gamma) and l_j(\gamma) intersect. We do the following to completely determine the function \hat{e}(\gamma): i) Determine some point \gamma_0 such that no lines intersect for any \gamma <= \gamma_0 ii) Determine oldk = max_index L(\gamma_0). Let oldgamma = \gamma_0. iii) Determine newgamma = min (\gamma coordinate of intersection between l_k and l_oldk), newk = argmin (\gamma coordinate of intersection between l_k and l_oldk), where min, argmin range over all k which are different from newk and for which the intersection happens after oldgamma. If there is no such newgamma, go to step (v). iv) It has been determined that \hat{e} takes the value oldk on (oldgamma, newgamma), (or (-\infty, newgamma) if oldgamma = \gamma_0). Set oldk = newk, oldgamma = newgamma, and go to step (iii) v) It has been determined that \hat{e} takes the value oldk on (oldgamma, \infty), (or (-\infty, \infty) if oldgamma = \gamma_0). \hat{e} has been determined piecewise, so done. We store each newgamma (in ascending order) in gamma (an array), and the number of newgamma's in numchanges. Since there are K different lines, we know a priori that there are at most (K-1) newgamma's to store. In practice here, we can forget the specific values of \hat{e} but remember their contribution to the BLEU score. Thus, we tally up the total statistics for the BLEU score at \gamma_0 in curScore, and for the change in \hat{e} at gamma[i], we store the change in BLEU statistics in dBLEU[i]. */ assert(H.size1() == scoreStats.size()); const Uint K(H.size1()); const Uint M(H.size2()); const uVector A(ublas::prec_prod(H, dir)); const uVector B(ublas::prec_prod(H, p)); uVector C(K); uVector pt(M); double sortA[K]; numchanges = 0; bool found[K]; //array to track sentences we've seen fill(found, found+K, false); // Find all the cusps along the curve max_{k} (A[k]*x + B[k]) // First, find an x-coordinate that occurs before any cusp for (Uint k(0); k<K; ++k) { sortA[k] = A(k); if (isnan(sortA[k])) sortA[k] = -INFINITY; } // for sort(sortA, sortA + K); double minDA = INFINITY; for (Uint k(0); k<K-1; ++k) { if (finite(sortA[k]) && finite(sortA[k+1]) && sortA[k+1] != sortA[k]) { minDA = min(minDA, sortA[k+1] - sortA[k]); } // if } // for double oldgamma(0.0f); if (minDA == INFINITY) { oldgamma = INFINITY; } else { double minB = INFINITY; double maxB = -INFINITY; for (Uint k(0); k<K; ++k) { const double x(B(k)); if (finite(x)) { minB = min(minB, x); maxB = max(maxB, x); } // if } // for oldgamma = (minB - maxB) / minDA; // oldgamma = min_{i1,j1,i2,j2} (B[i1] - B[j1]) / (A[j2] - A[i2]) // <= min_{i,j} (B[i] - B[j]) / (A[j] - A[i]) // <= min_{i,j} (B[i] - B[j]) / (A[j] - A[i]) // oldgamma - 1 would be our \gamma_0 (for step (i)), if we don't // have any infinite values in A. } // If any entry in A is +/-INFINITY, then the "line" for that candidate // sentence will take values from {+INFINITY, -INFINITY, NaN}. When // the "line" changes from one of these values to another, consider // that to be an intersection point of that "line" with every other // line. // The value can only change at gamma where the vector p + gamma * dir // contains an entry of 0. // Here, we find the minimum gamma such that p + gamma * dir has a zero // entry, and the final \gamma_0 will be less than the minimum of this // and the previous oldgamma. // pt_m = -p_m / dir_m, for all m // ie. pt_m is the gamma s.t. p + gamma * dir = 0 pt = p; for (Uint m(0); m<M; ++m) pt(m) /= dir(m); pt *= -1.0f; // In case any dir_m = 0, for some m, replace all non-finite values in // pt with 0 for (Uint m(0); m<M; ++m) { if (!finite(pt(m))) { pt(m) = 0; } } // Subtract one here so that it's strictly less than any intersection point. oldgamma = min(oldgamma, *std::min_element(pt.begin(), pt.end())) - 1; // Determine argmax_{k} a[k]*oldgamma + b[k]: // C = B + oldgamma * A = H * (p + oldgamma * dir) // Calculate pt = p + oldgamma * dir, then calculate C // This should avoid inconsistency with infinity, which pops up with // the other way of computing C. pt = oldgamma * dir + p; C = ublas::prec_prod(H, pt); // Initially, oldk = index of maximum element in C Uint oldk(my_vector_max_index(C)); #pragma omp critical (findSentenceIntervals_curBLEU) { curScore += scoreStats[oldk]; } // ends omp critical section numchanges = 0; while (true) { // Find the line whose intersection with the oldk-th line occurs next double newgamma(oldgamma); int newk(-1); if (A(oldk) == INFINITY) { // Not going to do any better as gamma gets bigger break; } else if (A(oldk) == -INFINITY || isnan(A(oldk))) { newgamma = INFINITY; // Find if/when this "line" changes from +INFINITY to -INFINITY or NaN for (Uint m(0); m<M; ++m) { if ((H(oldk, m) == INFINITY && dir(m) < 0) || (H(oldk, m) == -INFINITY && dir(m) > 0)) { newgamma = min(newgamma, -p(m) / dir(m)); // Find gamma s.t. p_m + dir_m * gamma = 0 // The first point where this occurs (under the // conditions in that if statement above) should be // where the "line" becomes -INFINITY } } if (newgamma <= oldgamma || newgamma == INFINITY) { break; } else { // Find maximum at curgamma. pt = newgamma * dir + p; C = ublas::prec_prod(H, pt); newk = my_vector_max_index(C); while (newk != -1 && found[newk]) { //while we've seen this newk already - catch to make //sure we select a good value C(newk) = -INFINITY; newk = my_vector_max_index(C); if (C(newk) == -INFINITY && found[newk]) newk = -1; } //newk = -1 || found[newk] == false } // if } else { for (Uint k(0); k<K; ++k) { //check to see if we haven't seen yet, not just if he //wasn't the last one picked if (!found[k]) { double curgamma(0.0f); if (A(k) == INFINITY || isnan(A(k))) { curgamma = -INFINITY; // Find where this "line" changes from -INFINITY or // NaN to +INFINITY for (Uint m(0); m<M; ++m) { if ((H(k, m) == INFINITY && dir(m) > 0) || (H(k, m) == -INFINITY && dir(m) < 0)) { curgamma = max(curgamma, -p(m) / dir(m)); // Find gamma s.t. p_m + dir_m * gamma = 0 // The last point where this occurs (under // the conditions in that if statement // above) should be where the "line" // becomes +INFINITY } // if } // for } else { curgamma = (B(k) - B(oldk)) / (A(oldk) - A(k)); // curgamma = (B[k] - B[oldk]) / (A[k] - A[oldk]) // This is the x component in the intersection of // the lines: // y = A[k] * x + B[k] , y = A[oldk] * x + B[oldk] } // if if (curgamma > oldgamma && (newk == -1 || curgamma < newgamma)) { newgamma = curgamma; newk = k; } // if } // if } // for } // if //gamma unacceptable value (-inf, inf, nan) or no new intersection found if (newk == -1 || !finite(newgamma)) { // no new intersections break; } // if // Remember stuff for this intersection assert(numchanges < K); gamma[numchanges] = newgamma; dBLEU[numchanges] = scoreStats[newk] - scoreStats[oldk]; numchanges++; oldk = newk; oldgamma = newgamma; found[newk] = true; } // while if (numchanges > 0) { myHeappoint = new linemaxpt(); myHeappoint->gamma = gamma[0]; myHeappoint->s = s; myHeappoint->i = 0; } else { myHeappoint = NULL; } // if } // ends LineMax<ScoreStats>::findSentenceIntervals //////////////////////////////////////// // LINEMAX template <class ScoreStats> void LineMax<ScoreStats>::operator()(uVector& p, uVector& dir, bool record_history) { // If the best range found is (-\infty, t) or (t, \infty), we use // t - SMALL or t + SMALL respectively as the final gamma. const double SMALL(1.0f); Uint numchanges[S]; this->record_history = record_history; if (record_history) history.clear(); ScoreStats curScoreStats; // Accumulate the current BLEU statistics. // Store the linemaxpt values for the least gamma in each partition; // will subsequently become a heap. linemaxpt* heappoints[S]; int s; #pragma omp parallel for private(s) for (s=0; s<int(S); ++s) { findSentenceIntervals(numchanges[s], gammaWorkSpace[s], scoreWorkSpace[s], curScoreStats, // Needs a one time lock heappoints[s], // clean up null pointers s, // const p, // const dir, // const vH[s], // const allScoreStats[s]); // const } // for // Remove the empty heap points and recalculate the heap size linemaxpt** last_heappoint = remove_if(heappoints, heappoints+S, linemaxpt::isNull); int heapsize(last_heappoint - heappoints); // Number of members in heappoints /* Using the previous computations, we now determine the intervals on which the BLEU score is constant. Essentially, we order the gamma[s][i]'s from least to greatest: gamma[s_1][i_1] <= gamma[s_2][i_2] <= .. <= gamma[s_N][i_N] and compute the BLEU score on the intervals (-\infty, gamma[s_1][i_1]), (gamma[s_N][i_N], \infty), and (gamma[s_n][i_n], gamma[s_{n+1}][i_{n+1}]) for all n. The BLEU stats for (-\infty, gamma[s_1][i_1]) are already stored in curScore. For each (s,i), we have recorded (in scoreWorkSpace) the change in the BLEU stats from the interval just before gamma[s][i] to the interval just after gamma[s][i]. By iterating through the (s, i)'s in order by gamma[s][i] (ie. iterating through the (s_n, i_n)'s in order by n), the BLEU stats for each interval are computed by updating the stats for the previous interval. In practice here, we already have gamma[s][0] <= gamma[s][1] <= .. <= gamma[s][numchanges[s] - 1] for each s, and this can be used to order the gamma[s][i]'s more efficiently (similar to mergesort). We use a heap containing triples (gamma, s, i), with the ordering that puts the triple with the least value for gamma at the root of the heap. The following outlines how we iterate through the (s, i) in order: i) Initially, produce a heap containing (gamma[s][0], s, 0) for each s. (The heap has at it's root the triple (gamma, s, i) for which gamma is least.) (In the special case that numchanges[s] = 0 for some s, we obviously cannot have (gamma[s][0], s, 0) to the heap since there is no gamma[s][0].) ii) At each iteration, remove the top, (gamma, s, i), of the heap (the next lowest gamma) and if i+1 < numchanges[s], add (gamma[s][i+1], s, i+1) to the heap. iii) Repeat (ii) until the heap is empty. Our heap is contained in the array heappoints. */ double maxscore(0.0f); // Will hold the best BLEU score double maxgamma(0.0f); // Will hold the gamma which produces the best BLEU score if (heapsize == 0) { // Special situation: no matter what gamma is, the BLEU score is // the same. maxscore = curScoreStats.score(); maxgamma = 0; // TODO: is 0 appropriate? I think so // cerr << "score at \\gamma = 0: " << curScoreStats.score() << endl; } else { // Create the heap make_heap(heappoints, heappoints + heapsize, linemaxpt::greater); maxscore = curScoreStats.score(); maxgamma = heappoints[0]->gamma - SMALL; double oldgamma(0.0f); // Holds the left endpoint of the interval whose stats are in curScoreStats while (true) { // Put max element at end of heap pop_heap(heappoints, heappoints + heapsize, linemaxpt::greater); // Update BLEU statistics curScoreStats += scoreWorkSpace[heappoints[heapsize - 1]->s][heappoints[heapsize - 1]->i]; // Save left endpoint of the new interval oldgamma = heappoints[heapsize - 1]->gamma; heappoints[heapsize - 1]->i++; // Determine whether there is a new point to add to the heap // (same s, but i increases) if (heappoints[heapsize - 1]->i < numchanges[heappoints[heapsize - 1]->s]) { // Add point (gamma[s][i], s, i) to the heap heappoints[heapsize - 1]->gamma = gammaWorkSpace[heappoints[heapsize - 1]->s][heappoints[heapsize - 1]->i]; push_heap(heappoints, heappoints + heapsize, linemaxpt::greater); } else { // Decrease heap size delete heappoints[heapsize - 1]; --heapsize; } // if // Exit loop if there are no new points (heap is empty) if (heapsize == 0) { break; } // for // Determine the BLEU score for the interval (oldgamma, heappoints[0]->gamma). const double curscore = curScoreStats.score(); // Determine if this is the new best score AND if the interval is non-empty if (curscore > maxscore && heappoints[0]->gamma != oldgamma) { // New best score maxscore = curscore; // Use the midpoint of this range. maxgamma = (heappoints[0]->gamma + oldgamma) / 2; } if (record_history) history.push_back(make_pair((heappoints[0]->gamma + oldgamma) / 2, curscore)); } // Consider final score const double curscore = curScoreStats.score(); if (curscore > maxscore) { // New best score maxscore = curscore; maxgamma = oldgamma + SMALL; } } // Return values appropriately. dir *= maxgamma; // dir = (maxgamma - 1) * dir + dir = maxgamma * dir p += dir; } // ends LineMax<ScoreStats>::operator() } // ends namespace Portage
c-omp.c
/* This file contains routines to construct GNU OpenMP constructs, called from parsing in the C and C++ front ends. Copyright (C) 2005, 2007, 2008, 2009, 2010 Free Software Foundation, Inc. Contributed by Richard Henderson <rth@redhat.com>, Diego Novillo <dnovillo@redhat.com>. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "tm.h" #include "tree.h" #include "function.h" #include "c-common.h" #include "toplev.h" #include "gimple.h" #include "bitmap.h" #include "langhooks.h" /* Complete a #pragma omp master construct. STMT is the structured-block that follows the pragma. LOC is the l*/ tree c_finish_omp_master (location_t loc, tree stmt) { tree t = add_stmt (build1 (OMP_MASTER, void_type_node, stmt)); SET_EXPR_LOCATION (t, loc); return t; } /* Complete a #pragma omp critical construct. STMT is the structured-block that follows the pragma, NAME is the identifier in the pragma, or null if it was omitted. LOC is the location of the #pragma. */ tree c_finish_omp_critical (location_t loc, tree body, tree name) { tree stmt = make_node (OMP_CRITICAL); TREE_TYPE (stmt) = void_type_node; OMP_CRITICAL_BODY (stmt) = body; OMP_CRITICAL_NAME (stmt) = name; SET_EXPR_LOCATION (stmt, loc); return add_stmt (stmt); } /* Complete a #pragma omp ordered construct. STMT is the structured-block that follows the pragma. LOC is the location of the #pragma. */ tree c_finish_omp_ordered (location_t loc, tree stmt) { tree t = build1 (OMP_ORDERED, void_type_node, stmt); SET_EXPR_LOCATION (t, loc); return add_stmt (t); } /* Complete a #pragma omp barrier construct. LOC is the location of the #pragma. */ void c_finish_omp_barrier (location_t loc) { tree x; x = built_in_decls[BUILT_IN_GOMP_BARRIER]; x = build_call_expr_loc (loc, x, 0); add_stmt (x); } /* Complete a #pragma omp taskwait construct. LOC is the location of the pragma. */ void c_finish_omp_taskwait (location_t loc) { tree x; x = built_in_decls[BUILT_IN_GOMP_TASKWAIT]; x = build_call_expr_loc (loc, x, 0); add_stmt (x); } /* Complete a #pragma omp atomic construct. The expression to be implemented atomically is LHS code= RHS. LOC is the location of the atomic statement. The value returned is either error_mark_node (if the construct was erroneous) or an OMP_ATOMIC node which should be added to the current statement tree with add_stmt.*/ tree c_finish_omp_atomic (location_t loc, enum tree_code code, tree lhs, tree rhs) { tree x, type, addr; if (lhs == error_mark_node || rhs == error_mark_node) return error_mark_node; /* ??? According to one reading of the OpenMP spec, complex type are supported, but there are no atomic stores for any architecture. But at least icc 9.0 doesn't support complex types here either. And lets not even talk about vector types... */ type = TREE_TYPE (lhs); if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type) && !SCALAR_FLOAT_TYPE_P (type)) { error_at (loc, "invalid expression type for %<#pragma omp atomic%>"); return error_mark_node; } /* ??? Validate that rhs does not overlap lhs. */ /* Take and save the address of the lhs. From then on we'll reference it via indirection. */ addr = build_unary_op (loc, ADDR_EXPR, lhs, 0); if (addr == error_mark_node) return error_mark_node; addr = save_expr (addr); if (TREE_CODE (addr) != SAVE_EXPR && (TREE_CODE (addr) != ADDR_EXPR || TREE_CODE (TREE_OPERAND (addr, 0)) != VAR_DECL)) { /* Make sure LHS is simple enough so that goa_lhs_expr_p can recognize it even after unsharing function body. */ tree var = create_tmp_var_raw (TREE_TYPE (addr), NULL); DECL_CONTEXT (var) = current_function_decl; addr = build4 (TARGET_EXPR, TREE_TYPE (addr), var, addr, NULL, NULL); } lhs = build_indirect_ref (loc, addr, RO_NULL); /* There are lots of warnings, errors, and conversions that need to happen in the course of interpreting a statement. Use the normal mechanisms to do this, and then take it apart again. */ x = build_modify_expr (input_location, lhs, NULL_TREE, code, input_location, rhs, NULL_TREE); if (x == error_mark_node) return error_mark_node; gcc_assert (TREE_CODE (x) == MODIFY_EXPR); rhs = TREE_OPERAND (x, 1); /* Punt the actual generation of atomic operations to common code. */ x = build2 (OMP_ATOMIC, void_type_node, addr, rhs); SET_EXPR_LOCATION (x, loc); return x; } /* Complete a #pragma omp flush construct. We don't do anything with the variable list that the syntax allows. LOC is the location of the #pragma. */ void c_finish_omp_flush (location_t loc) { tree x; x = built_in_decls[BUILT_IN_SYNCHRONIZE]; x = build_call_expr_loc (loc, x, 0); add_stmt (x); } /* Check and canonicalize #pragma omp for increment expression. Helper function for c_finish_omp_for. */ static tree check_omp_for_incr_expr (location_t loc, tree exp, tree decl) { tree t; if (!INTEGRAL_TYPE_P (TREE_TYPE (exp)) || TYPE_PRECISION (TREE_TYPE (exp)) < TYPE_PRECISION (TREE_TYPE (decl))) return error_mark_node; if (exp == decl) return build_int_cst (TREE_TYPE (exp), 0); switch (TREE_CODE (exp)) { CASE_CONVERT: t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl); if (t != error_mark_node) return fold_convert_loc (loc, TREE_TYPE (exp), t); break; case MINUS_EXPR: t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl); if (t != error_mark_node) return fold_build2_loc (loc, MINUS_EXPR, TREE_TYPE (exp), t, TREE_OPERAND (exp, 1)); break; case PLUS_EXPR: t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl); if (t != error_mark_node) return fold_build2_loc (loc, PLUS_EXPR, TREE_TYPE (exp), t, TREE_OPERAND (exp, 1)); t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 1), decl); if (t != error_mark_node) return fold_build2_loc (loc, PLUS_EXPR, TREE_TYPE (exp), TREE_OPERAND (exp, 0), t); break; default: break; } return error_mark_node; } /* Validate and emit code for the OpenMP directive #pragma omp for. DECLV is a vector of iteration variables, for each collapsed loop. INITV, CONDV and INCRV are vectors containing initialization expressions, controlling predicates and increment expressions. BODY is the body of the loop and PRE_BODY statements that go before the loop. */ tree c_finish_omp_for (location_t locus, tree declv, tree initv, tree condv, tree incrv, tree body, tree pre_body) { location_t elocus; bool fail = false; int i; gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (initv)); gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (condv)); gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (incrv)); for (i = 0; i < TREE_VEC_LENGTH (declv); i++) { tree decl = TREE_VEC_ELT (declv, i); tree init = TREE_VEC_ELT (initv, i); tree cond = TREE_VEC_ELT (condv, i); tree incr = TREE_VEC_ELT (incrv, i); elocus = locus; if (EXPR_HAS_LOCATION (init)) elocus = EXPR_LOCATION (init); /* Validate the iteration variable. */ if (!INTEGRAL_TYPE_P (TREE_TYPE (decl)) && TREE_CODE (TREE_TYPE (decl)) != POINTER_TYPE) { error_at (elocus, "invalid type for iteration variable %qE", decl); fail = true; } /* In the case of "for (int i = 0...)", init will be a decl. It should have a DECL_INITIAL that we can turn into an assignment. */ if (init == decl) { elocus = DECL_SOURCE_LOCATION (decl); init = DECL_INITIAL (decl); if (init == NULL) { error_at (elocus, "%qE is not initialized", decl); init = integer_zero_node; fail = true; } init = build_modify_expr (elocus, decl, NULL_TREE, NOP_EXPR, /* FIXME diagnostics: This should be the location of the INIT. */ elocus, init, NULL_TREE); } gcc_assert (TREE_CODE (init) == MODIFY_EXPR); gcc_assert (TREE_OPERAND (init, 0) == decl); if (cond == NULL_TREE) { error_at (elocus, "missing controlling predicate"); fail = true; } else { bool cond_ok = false; if (EXPR_HAS_LOCATION (cond)) elocus = EXPR_LOCATION (cond); if (TREE_CODE (cond) == LT_EXPR || TREE_CODE (cond) == LE_EXPR || TREE_CODE (cond) == GT_EXPR || TREE_CODE (cond) == GE_EXPR || TREE_CODE (cond) == NE_EXPR || TREE_CODE (cond) == EQ_EXPR) { tree op0 = TREE_OPERAND (cond, 0); tree op1 = TREE_OPERAND (cond, 1); /* 2.5.1. The comparison in the condition is computed in the type of DECL, otherwise the behavior is undefined. For example: long n; int i; i < n; according to ISO will be evaluated as: (long)i < n; We want to force: i < (int)n; */ if (TREE_CODE (op0) == NOP_EXPR && decl == TREE_OPERAND (op0, 0)) { TREE_OPERAND (cond, 0) = TREE_OPERAND (op0, 0); TREE_OPERAND (cond, 1) = fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl), TREE_OPERAND (cond, 1)); } else if (TREE_CODE (op1) == NOP_EXPR && decl == TREE_OPERAND (op1, 0)) { TREE_OPERAND (cond, 1) = TREE_OPERAND (op1, 0); TREE_OPERAND (cond, 0) = fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl), TREE_OPERAND (cond, 0)); } if (decl == TREE_OPERAND (cond, 0)) cond_ok = true; else if (decl == TREE_OPERAND (cond, 1)) { TREE_SET_CODE (cond, swap_tree_comparison (TREE_CODE (cond))); TREE_OPERAND (cond, 1) = TREE_OPERAND (cond, 0); TREE_OPERAND (cond, 0) = decl; cond_ok = true; } if (TREE_CODE (cond) == NE_EXPR || TREE_CODE (cond) == EQ_EXPR) { if (!INTEGRAL_TYPE_P (TREE_TYPE (decl))) cond_ok = false; else if (operand_equal_p (TREE_OPERAND (cond, 1), TYPE_MIN_VALUE (TREE_TYPE (decl)), 0)) TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR ? GT_EXPR : LE_EXPR); else if (operand_equal_p (TREE_OPERAND (cond, 1), TYPE_MAX_VALUE (TREE_TYPE (decl)), 0)) TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR ? LT_EXPR : GE_EXPR); else cond_ok = false; } } if (!cond_ok) { error_at (elocus, "invalid controlling predicate"); fail = true; } } if (incr == NULL_TREE) { error_at (elocus, "missing increment expression"); fail = true; } else { bool incr_ok = false; if (EXPR_HAS_LOCATION (incr)) elocus = EXPR_LOCATION (incr); /* Check all the valid increment expressions: v++, v--, ++v, --v, v = v + incr, v = incr + v and v = v - incr. */ switch (TREE_CODE (incr)) { case POSTINCREMENT_EXPR: case PREINCREMENT_EXPR: case POSTDECREMENT_EXPR: case PREDECREMENT_EXPR: if (TREE_OPERAND (incr, 0) != decl) break; incr_ok = true; if (POINTER_TYPE_P (TREE_TYPE (decl)) && TREE_OPERAND (incr, 1)) { tree t = fold_convert_loc (elocus, sizetype, TREE_OPERAND (incr, 1)); if (TREE_CODE (incr) == POSTDECREMENT_EXPR || TREE_CODE (incr) == PREDECREMENT_EXPR) t = fold_build1_loc (elocus, NEGATE_EXPR, sizetype, t); t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (decl), decl, t); incr = build2 (MODIFY_EXPR, void_type_node, decl, t); } break; case MODIFY_EXPR: if (TREE_OPERAND (incr, 0) != decl) break; if (TREE_OPERAND (incr, 1) == decl) break; if (TREE_CODE (TREE_OPERAND (incr, 1)) == PLUS_EXPR && (TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl || TREE_OPERAND (TREE_OPERAND (incr, 1), 1) == decl)) incr_ok = true; else if ((TREE_CODE (TREE_OPERAND (incr, 1)) == MINUS_EXPR || (TREE_CODE (TREE_OPERAND (incr, 1)) == POINTER_PLUS_EXPR)) && TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl) incr_ok = true; else { tree t = check_omp_for_incr_expr (elocus, TREE_OPERAND (incr, 1), decl); if (t != error_mark_node) { incr_ok = true; t = build2 (PLUS_EXPR, TREE_TYPE (decl), decl, t); incr = build2 (MODIFY_EXPR, void_type_node, decl, t); } } break; default: break; } if (!incr_ok) { error_at (elocus, "invalid increment expression"); fail = true; } } TREE_VEC_ELT (initv, i) = init; TREE_VEC_ELT (incrv, i) = incr; } if (fail) return NULL; else { tree t = make_node (OMP_FOR); TREE_TYPE (t) = void_type_node; OMP_FOR_INIT (t) = initv; OMP_FOR_COND (t) = condv; OMP_FOR_INCR (t) = incrv; OMP_FOR_BODY (t) = body; OMP_FOR_PRE_BODY (t) = pre_body; SET_EXPR_LOCATION (t, locus); return add_stmt (t); } } /* Divide CLAUSES into two lists: those that apply to a parallel construct, and those that apply to a work-sharing construct. Place the results in *PAR_CLAUSES and *WS_CLAUSES respectively. In addition, add a nowait clause to the work-sharing list. LOC is the location of the OMP_PARALLEL*. */ void c_split_parallel_clauses (location_t loc, tree clauses, tree *par_clauses, tree *ws_clauses) { tree next; *par_clauses = NULL; *ws_clauses = build_omp_clause (loc, OMP_CLAUSE_NOWAIT); for (; clauses ; clauses = next) { next = OMP_CLAUSE_CHAIN (clauses); switch (OMP_CLAUSE_CODE (clauses)) { case OMP_CLAUSE_PRIVATE: case OMP_CLAUSE_SHARED: case OMP_CLAUSE_FIRSTPRIVATE: case OMP_CLAUSE_LASTPRIVATE: case OMP_CLAUSE_REDUCTION: case OMP_CLAUSE_COPYIN: case OMP_CLAUSE_IF: case OMP_CLAUSE_NUM_THREADS: case OMP_CLAUSE_DEFAULT: OMP_CLAUSE_CHAIN (clauses) = *par_clauses; *par_clauses = clauses; break; case OMP_CLAUSE_SCHEDULE: case OMP_CLAUSE_ORDERED: case OMP_CLAUSE_COLLAPSE: OMP_CLAUSE_CHAIN (clauses) = *ws_clauses; *ws_clauses = clauses; break; default: gcc_unreachable (); } } } /* True if OpenMP sharing attribute of DECL is predetermined. */ enum omp_clause_default_kind c_omp_predetermined_sharing (tree decl) { /* Variables with const-qualified type having no mutable member are predetermined shared. */ if (TREE_READONLY (decl)) return OMP_CLAUSE_DEFAULT_SHARED; return OMP_CLAUSE_DEFAULT_UNSPECIFIED; }
region_layer.c
#include "region_layer.h" #include "activations.h" #include "blas.h" #include "box.h" #include "dark_cuda.h" #include "utils.h" #include <stdio.h> #include <assert.h> #include <string.h> #include <stdlib.h> #define DOABS 1 region_layer make_region_layer(int batch, int w, int h, int n, int classes, int coords, int max_boxes) { region_layer l = { (LAYER_TYPE)0 }; l.type = REGION; l.n = n; l.batch = batch; l.h = h; l.w = w; l.classes = classes; l.coords = coords; l.cost = (float*)xcalloc(1, sizeof(float)); l.biases = (float*)xcalloc(n * 2, sizeof(float)); l.bias_updates = (float*)xcalloc(n * 2, sizeof(float)); l.outputs = h*w*n*(classes + coords + 1); l.inputs = l.outputs; l.max_boxes = max_boxes; l.truth_size = 4 + 2; l.truths = max_boxes*l.truth_size; l.delta = (float*)xcalloc(batch * l.outputs, sizeof(float)); l.output = (float*)xcalloc(batch * l.outputs, sizeof(float)); int i; for(i = 0; i < n*2; ++i){ l.biases[i] = .5; } l.forward = forward_region_layer; l.backward = backward_region_layer; #ifdef GPU l.forward_gpu = forward_region_layer_gpu; l.backward_gpu = backward_region_layer_gpu; l.output_gpu = cuda_make_array(l.output, batch*l.outputs); l.delta_gpu = cuda_make_array(l.delta, batch*l.outputs); #endif fprintf(stderr, "detection\n"); srand(time(0)); return l; } void resize_region_layer(layer *l, int w, int h) { #ifdef GPU int old_w = l->w; int old_h = l->h; #endif l->w = w; l->h = h; l->outputs = h*w*l->n*(l->classes + l->coords + 1); l->inputs = l->outputs; l->output = (float*)xrealloc(l->output, l->batch * l->outputs * sizeof(float)); l->delta = (float*)xrealloc(l->delta, l->batch * l->outputs * sizeof(float)); #ifdef GPU //if (old_w < w || old_h < h) { cuda_free(l->delta_gpu); cuda_free(l->output_gpu); l->delta_gpu = cuda_make_array(l->delta, l->batch*l->outputs); l->output_gpu = cuda_make_array(l->output, l->batch*l->outputs); } #endif } box get_region_box(float *x, float *biases, int n, int index, int i, int j, int w, int h) { box b; b.x = (i + logistic_activate(x[index + 0])) / w; b.y = (j + logistic_activate(x[index + 1])) / h; b.w = exp(x[index + 2]) * biases[2*n]; b.h = exp(x[index + 3]) * biases[2*n+1]; if(DOABS){ b.w = exp(x[index + 2]) * biases[2*n] / w; b.h = exp(x[index + 3]) * biases[2*n+1] / h; } return b; } float delta_region_box(box truth, float *x, float *biases, int n, int index, int i, int j, int w, int h, float *delta, float scale) { box pred = get_region_box(x, biases, n, index, i, j, w, h); float iou = box_iou(pred, truth); float tx = (truth.x*w - i); float ty = (truth.y*h - j); float tw = log(truth.w / biases[2*n]); float th = log(truth.h / biases[2*n + 1]); if(DOABS){ tw = log(truth.w*w / biases[2*n]); th = log(truth.h*h / biases[2*n + 1]); } delta[index + 0] = scale * (tx - logistic_activate(x[index + 0])) * logistic_gradient(logistic_activate(x[index + 0])); delta[index + 1] = scale * (ty - logistic_activate(x[index + 1])) * logistic_gradient(logistic_activate(x[index + 1])); delta[index + 2] = scale * (tw - x[index + 2]); delta[index + 3] = scale * (th - x[index + 3]); return iou; } void delta_region_class(float *output, float *delta, int index, int class_id, int classes, tree *hier, float scale, float *avg_cat, int focal_loss) { int i, n; if(hier){ float pred = 1; while(class_id >= 0){ pred *= output[index + class_id]; int g = hier->group[class_id]; int offset = hier->group_offset[g]; for(i = 0; i < hier->group_size[g]; ++i){ delta[index + offset + i] = scale * (0 - output[index + offset + i]); } delta[index + class_id] = scale * (1 - output[index + class_id]); class_id = hier->parent[class_id]; } *avg_cat += pred; } else { // Focal loss if (focal_loss) { // Focal Loss float alpha = 0.5; // 0.25 or 0.5 //float gamma = 2; // hardcoded in many places of the grad-formula int ti = index + class_id; float pt = output[ti] + 0.000000000000001F; // http://fooplot.com/#W3sidHlwZSI6MCwiZXEiOiItKDEteCkqKDIqeCpsb2coeCkreC0xKSIsImNvbG9yIjoiIzAwMDAwMCJ9LHsidHlwZSI6MTAwMH1d float grad = -(1 - pt) * (2 * pt*logf(pt) + pt - 1); // http://blog.csdn.net/linmingan/article/details/77885832 //float grad = (1 - pt) * (2 * pt*logf(pt) + pt - 1); // https://github.com/unsky/focal-loss for (n = 0; n < classes; ++n) { delta[index + n] = scale * (((n == class_id) ? 1 : 0) - output[index + n]); delta[index + n] *= alpha*grad; if (n == class_id) *avg_cat += output[index + n]; } } else { // default for (n = 0; n < classes; ++n) { delta[index + n] = scale * (((n == class_id) ? 1 : 0) - output[index + n]); if (n == class_id) *avg_cat += output[index + n]; } } } } float logit(float x) { return log(x/(1.-x)); } float tisnan(float x) { return (x != x); } static int entry_index(layer l, int batch, int location, int entry) { int n = location / (l.w*l.h); int loc = location % (l.w*l.h); return batch*l.outputs + n*l.w*l.h*(l.coords + l.classes + 1) + entry*l.w*l.h + loc; } void softmax_tree(float *input, int batch, int inputs, float temp, tree *hierarchy, float *output); void forward_region_layer(const region_layer l, network_state state) { int i,j,b,t,n; int size = l.coords + l.classes + 1; memcpy(l.output, state.input, l.outputs*l.batch*sizeof(float)); #ifndef GPU flatten(l.output, l.w*l.h, size*l.n, l.batch, 1); #endif for (b = 0; b < l.batch; ++b){ for(i = 0; i < l.h*l.w*l.n; ++i){ int index = size*i + b*l.outputs; l.output[index + 4] = logistic_activate(l.output[index + 4]); } } #ifndef GPU if (l.softmax_tree){ for (b = 0; b < l.batch; ++b){ for(i = 0; i < l.h*l.w*l.n; ++i){ int index = size*i + b*l.outputs; softmax_tree(l.output + index + 5, 1, 0, 1, l.softmax_tree, l.output + index + 5); } } } else if (l.softmax){ for (b = 0; b < l.batch; ++b){ for(i = 0; i < l.h*l.w*l.n; ++i){ int index = size*i + b*l.outputs; softmax(l.output + index + 5, l.classes, 1, l.output + index + 5, 1); } } } #endif if(!state.train) return; memset(l.delta, 0, l.outputs * l.batch * sizeof(float)); float avg_iou = 0; float recall = 0; float avg_cat = 0; float avg_obj = 0; float avg_anyobj = 0; int count = 0; int class_count = 0; *(l.cost) = 0; for (b = 0; b < l.batch; ++b) { if(l.softmax_tree){ int onlyclass_id = 0; for(t = 0; t < l.max_boxes; ++t){ box truth = float_to_box(state.truth + t*l.truth_size + b*l.truths); if(!truth.x) break; // continue; int class_id = state.truth[t*l.truth_size + b*l.truths + 4]; float maxp = 0; int maxi = 0; if(truth.x > 100000 && truth.y > 100000){ for(n = 0; n < l.n*l.w*l.h; ++n){ int index = size*n + b*l.outputs + 5; float scale = l.output[index-1]; float p = scale*get_hierarchy_probability(l.output + index, l.softmax_tree, class_id); if(p > maxp){ maxp = p; maxi = n; } } int index = size*maxi + b*l.outputs + 5; delta_region_class(l.output, l.delta, index, class_id, l.classes, l.softmax_tree, l.class_scale, &avg_cat, l.focal_loss); ++class_count; onlyclass_id = 1; break; } } if(onlyclass_id) continue; } for (j = 0; j < l.h; ++j) { for (i = 0; i < l.w; ++i) { for (n = 0; n < l.n; ++n) { int index = size*(j*l.w*l.n + i*l.n + n) + b*l.outputs; box pred = get_region_box(l.output, l.biases, n, index, i, j, l.w, l.h); float best_iou = 0; int best_class_id = -1; for(t = 0; t < l.max_boxes; ++t){ box truth = float_to_box(state.truth + t*l.truth_size + b*l.truths); int class_id = state.truth[t * l.truth_size + b*l.truths + 4]; if (class_id >= l.classes) continue; // if label contains class_id more than number of classes in the cfg-file if(!truth.x) break; // continue; float iou = box_iou(pred, truth); if (iou > best_iou) { best_class_id = state.truth[t*l.truth_size + b*l.truths + 4]; best_iou = iou; } } avg_anyobj += l.output[index + 4]; l.delta[index + 4] = l.noobject_scale * ((0 - l.output[index + 4]) * logistic_gradient(l.output[index + 4])); if(l.classfix == -1) l.delta[index + 4] = l.noobject_scale * ((best_iou - l.output[index + 4]) * logistic_gradient(l.output[index + 4])); else{ if (best_iou > l.thresh) { l.delta[index + 4] = 0; if(l.classfix > 0){ delta_region_class(l.output, l.delta, index + 5, best_class_id, l.classes, l.softmax_tree, l.class_scale*(l.classfix == 2 ? l.output[index + 4] : 1), &avg_cat, l.focal_loss); ++class_count; } } } if(*(state.net.seen) < 12800){ box truth = {0}; truth.x = (i + .5)/l.w; truth.y = (j + .5)/l.h; truth.w = l.biases[2*n]; truth.h = l.biases[2*n+1]; if(DOABS){ truth.w = l.biases[2*n]/l.w; truth.h = l.biases[2*n+1]/l.h; } delta_region_box(truth, l.output, l.biases, n, index, i, j, l.w, l.h, l.delta, .01); } } } } for(t = 0; t < l.max_boxes; ++t){ box truth = float_to_box(state.truth + t*l.truth_size + b*l.truths); int class_id = state.truth[t * l.truth_size + b*l.truths + 4]; if (class_id >= l.classes) { printf("\n Warning: in txt-labels class_id=%d >= classes=%d in cfg-file. In txt-labels class_id should be [from 0 to %d] \n", class_id, l.classes, l.classes-1); getchar(); continue; // if label contains class_id more than number of classes in the cfg-file } if(!truth.x) break; // continue; float best_iou = 0; int best_index = 0; int best_n = 0; i = (truth.x * l.w); j = (truth.y * l.h); //printf("%d %f %d %f\n", i, truth.x*l.w, j, truth.y*l.h); box truth_shift = truth; truth_shift.x = 0; truth_shift.y = 0; //printf("index %d %d\n",i, j); for(n = 0; n < l.n; ++n){ int index = size*(j*l.w*l.n + i*l.n + n) + b*l.outputs; box pred = get_region_box(l.output, l.biases, n, index, i, j, l.w, l.h); if(l.bias_match){ pred.w = l.biases[2*n]; pred.h = l.biases[2*n+1]; if(DOABS){ pred.w = l.biases[2*n]/l.w; pred.h = l.biases[2*n+1]/l.h; } } //printf("pred: (%f, %f) %f x %f\n", pred.x, pred.y, pred.w, pred.h); pred.x = 0; pred.y = 0; float iou = box_iou(pred, truth_shift); if (iou > best_iou){ best_index = index; best_iou = iou; best_n = n; } } //printf("%d %f (%f, %f) %f x %f\n", best_n, best_iou, truth.x, truth.y, truth.w, truth.h); float iou = delta_region_box(truth, l.output, l.biases, best_n, best_index, i, j, l.w, l.h, l.delta, l.coord_scale); if(iou > .5) recall += 1; avg_iou += iou; //l.delta[best_index + 4] = iou - l.output[best_index + 4]; avg_obj += l.output[best_index + 4]; l.delta[best_index + 4] = l.object_scale * (1 - l.output[best_index + 4]) * logistic_gradient(l.output[best_index + 4]); if (l.rescore) { l.delta[best_index + 4] = l.object_scale * (iou - l.output[best_index + 4]) * logistic_gradient(l.output[best_index + 4]); } if (l.map) class_id = l.map[class_id]; delta_region_class(l.output, l.delta, best_index + 5, class_id, l.classes, l.softmax_tree, l.class_scale, &avg_cat, l.focal_loss); ++count; ++class_count; } } //printf("\n"); #ifndef GPU flatten(l.delta, l.w*l.h, size*l.n, l.batch, 0); #endif *(l.cost) = pow(mag_array(l.delta, l.outputs * l.batch), 2); printf("Region Avg IOU: %f, Class: %f, Obj: %f, No Obj: %f, Avg Recall: %f, count: %d\n", avg_iou/count, avg_cat/class_count, avg_obj/count, avg_anyobj/(l.w*l.h*l.n*l.batch), recall/count, count); } void backward_region_layer(const region_layer l, network_state state) { axpy_cpu(l.batch*l.inputs, 1, l.delta, 1, state.delta, 1); } void get_region_boxes(layer l, int w, int h, float thresh, float **probs, box *boxes, int only_objectness, int *map) { int i; float *const predictions = l.output; #pragma omp parallel for for (i = 0; i < l.w*l.h; ++i){ int j, n; int row = i / l.w; int col = i % l.w; for(n = 0; n < l.n; ++n){ int index = i*l.n + n; int p_index = index * (l.classes + 5) + 4; float scale = predictions[p_index]; if(l.classfix == -1 && scale < .5) scale = 0; int box_index = index * (l.classes + 5); boxes[index] = get_region_box(predictions, l.biases, n, box_index, col, row, l.w, l.h); boxes[index].x *= w; boxes[index].y *= h; boxes[index].w *= w; boxes[index].h *= h; int class_index = index * (l.classes + 5) + 5; if(l.softmax_tree){ hierarchy_predictions(predictions + class_index, l.classes, l.softmax_tree, 0); int found = 0; if(map){ for(j = 0; j < 200; ++j){ float prob = scale*predictions[class_index+map[j]]; probs[index][j] = (prob > thresh) ? prob : 0; } } else { for(j = l.classes - 1; j >= 0; --j){ if(!found && predictions[class_index + j] > .5){ found = 1; } else { predictions[class_index + j] = 0; } float prob = predictions[class_index+j]; probs[index][j] = (scale > thresh) ? prob : 0; } } } else { for(j = 0; j < l.classes; ++j){ float prob = scale*predictions[class_index+j]; probs[index][j] = (prob > thresh) ? prob : 0; } } if(only_objectness){ probs[index][0] = scale; } } } } #ifdef GPU void forward_region_layer_gpu(const region_layer l, network_state state) { /* if(!state.train){ copy_ongpu(l.batch*l.inputs, state.input, 1, l.output_gpu, 1); return; } */ flatten_ongpu(state.input, l.h*l.w, l.n*(l.coords + l.classes + 1), l.batch, 1, l.output_gpu); if(l.softmax_tree){ int i; int count = 5; for (i = 0; i < l.softmax_tree->groups; ++i) { int group_size = l.softmax_tree->group_size[i]; softmax_gpu(l.output_gpu+count, group_size, l.classes + 5, l.w*l.h*l.n*l.batch, 1, l.output_gpu + count); count += group_size; } }else if (l.softmax){ softmax_gpu(l.output_gpu+5, l.classes, l.classes + 5, l.w*l.h*l.n*l.batch, 1, l.output_gpu + 5); } float* in_cpu = (float*)xcalloc(l.batch * l.inputs, sizeof(float)); float *truth_cpu = 0; if(state.truth){ int num_truth = l.batch*l.truths; truth_cpu = (float*)xcalloc(num_truth, sizeof(float)); cuda_pull_array(state.truth, truth_cpu, num_truth); } cuda_pull_array(l.output_gpu, in_cpu, l.batch*l.inputs); //cudaStreamSynchronize(get_cuda_stream()); network_state cpu_state = state; cpu_state.train = state.train; cpu_state.truth = truth_cpu; cpu_state.input = in_cpu; forward_region_layer(l, cpu_state); //cuda_push_array(l.output_gpu, l.output, l.batch*l.outputs); free(cpu_state.input); if(!state.train) return; cuda_push_array(l.delta_gpu, l.delta, l.batch*l.outputs); //cudaStreamSynchronize(get_cuda_stream()); if(cpu_state.truth) free(cpu_state.truth); } void backward_region_layer_gpu(region_layer l, network_state state) { flatten_ongpu(l.delta_gpu, l.h*l.w, l.n*(l.coords + l.classes + 1), l.batch, 0, state.delta); } #endif void correct_region_boxes(detection *dets, int n, int w, int h, int netw, int neth, int relative) { int i; int new_w = 0; int new_h = 0; if (((float)netw / w) < ((float)neth / h)) { new_w = netw; new_h = (h * netw) / w; } else { new_h = neth; new_w = (w * neth) / h; } for (i = 0; i < n; ++i) { box b = dets[i].bbox; b.x = (b.x - (netw - new_w) / 2. / netw) / ((float)new_w / netw); b.y = (b.y - (neth - new_h) / 2. / neth) / ((float)new_h / neth); b.w *= (float)netw / new_w; b.h *= (float)neth / new_h; if (!relative) { b.x *= w; b.w *= w; b.y *= h; b.h *= h; } dets[i].bbox = b; } } void get_region_detections(layer l, int w, int h, int netw, int neth, float thresh, int *map, float tree_thresh, int relative, detection *dets) { int i, j, n, z; float *predictions = l.output; if (l.batch == 2) { float *flip = l.output + l.outputs; for (j = 0; j < l.h; ++j) { for (i = 0; i < l.w / 2; ++i) { for (n = 0; n < l.n; ++n) { for (z = 0; z < l.classes + l.coords + 1; ++z) { int i1 = z*l.w*l.h*l.n + n*l.w*l.h + j*l.w + i; int i2 = z*l.w*l.h*l.n + n*l.w*l.h + j*l.w + (l.w - i - 1); float swap = flip[i1]; flip[i1] = flip[i2]; flip[i2] = swap; if (z == 0) { flip[i1] = -flip[i1]; flip[i2] = -flip[i2]; } } } } } for (i = 0; i < l.outputs; ++i) { l.output[i] = (l.output[i] + flip[i]) / 2.; } } for (i = 0; i < l.w*l.h; ++i) { int row = i / l.w; int col = i % l.w; for (n = 0; n < l.n; ++n) { int index = n*l.w*l.h + i; for (j = 0; j < l.classes; ++j) { dets[index].prob[j] = 0; } int obj_index = entry_index(l, 0, n*l.w*l.h + i, l.coords); int box_index = entry_index(l, 0, n*l.w*l.h + i, 0); int mask_index = entry_index(l, 0, n*l.w*l.h + i, 4); float scale = l.background ? 1 : predictions[obj_index]; dets[index].bbox = get_region_box(predictions, l.biases, n, box_index, col, row, l.w, l.h);// , l.w*l.h); dets[index].objectness = scale > thresh ? scale : 0; if (dets[index].mask) { for (j = 0; j < l.coords - 4; ++j) { dets[index].mask[j] = l.output[mask_index + j*l.w*l.h]; } } int class_index = entry_index(l, 0, n*l.w*l.h + i, l.coords + !l.background); if (l.softmax_tree) { hierarchy_predictions(predictions + class_index, l.classes, l.softmax_tree, 0);// , l.w*l.h); if (map) { for (j = 0; j < 200; ++j) { int class_index = entry_index(l, 0, n*l.w*l.h + i, l.coords + 1 + map[j]); float prob = scale*predictions[class_index]; dets[index].prob[j] = (prob > thresh) ? prob : 0; } } else { int j = hierarchy_top_prediction(predictions + class_index, l.softmax_tree, tree_thresh, l.w*l.h); dets[index].prob[j] = (scale > thresh) ? scale : 0; } } else { if (dets[index].objectness) { for (j = 0; j < l.classes; ++j) { int class_index = entry_index(l, 0, n*l.w*l.h + i, l.coords + 1 + j); float prob = scale*predictions[class_index]; dets[index].prob[j] = (prob > thresh) ? prob : 0; } } } } } correct_region_boxes(dets, l.w*l.h*l.n, w, h, netw, neth, relative); } void zero_objectness(layer l) { int i, n; for (i = 0; i < l.w*l.h; ++i) { for (n = 0; n < l.n; ++n) { int obj_index = entry_index(l, 0, n*l.w*l.h + i, l.coords); l.output[obj_index] = 0; } } }
interp_kernel_arm.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2021, OPEN AI LAB * Author: haitao@openailab.com */ #include "interp_kernel_arm.h" #include "utility/sys_port.h" #include <math.h> #include <arm_neon.h> #define MIN(a, b) ((a) < (b) ? (a) : (b)) static void linear_coeffs(int w, int outw, int* xofs, float* alpha, int align_corner) { double scale = (double)w / outw; if (align_corner) { scale = (double)(w - 1) / (outw - 1); } for (int dx = 0; dx < outw; dx++) { float fx = (float)((dx + 0.5) * scale - 0.5); if (align_corner) { fx = (float)(dx * scale); } int sx = floor(fx); fx -= sx; if (sx < 0) { sx = 0; fx = 0.f; } if (sx >= w - 1) { sx = w - 2; fx = 1.f; } xofs[dx] = sx; alpha[dx * 2] = 1.f - fx; alpha[dx * 2 + 1] = fx; } } static void resize_bilinear_image(float* src, float* dst, float* alpha, int* xofs, float* beta, int* yofs, int out_h, int out_w, int in_h, int in_w) { int w = out_w; // dst.w; int h = out_h; // dst.h; // loop body float* rowsbuf0 = (float*)sys_malloc(w * sizeof(float)); float* rowsbuf1 = (float*)sys_malloc(w * sizeof(float)); float* rows0 = rowsbuf0; float* rows1 = rowsbuf1; int prev_sy1 = -2; for (int dy = 0; dy < h; dy++) { int sy = yofs[dy]; if (sy == prev_sy1) { // reuse all rows } else if (sy == prev_sy1 + 1) { // hresize one row float* rows0_old = rows0; rows0 = rows1; rows1 = rows0_old; const float* S1 = src + (sy + 1) * in_w; // src.row(sy+1); const float* alphap = alpha; float* rows1p = rows1; // neon for (int dx = 0; dx + 1 < w; dx += 2) { int sx = xofs[dx]; int sxn = xofs[dx + 1]; const float* S1p = S1 + sx; const float* S1np = S1 + sxn; float32x4_t _a = vld1q_f32(alphap); float32x2_t _S1 = vld1_f32(S1p); float32x2_t _S1n = vld1_f32(S1np); float32x4_t _S1S1n = vcombine_f32(_S1, _S1n); float32x4_t _ms1 = vmulq_f32(_S1S1n, _a); float32x2_t _rows1 = vpadd_f32(vget_low_f32(_ms1), vget_high_f32(_ms1)); vst1_f32(rows1p + dx, _rows1); alphap += 4; } } else { // hresize two rows const float* S0 = src + sy * in_w; // src.row(sy); const float* S1 = src + (sy + 1) * in_w; // src.row(sy+1); const float* alphap = alpha; float* rows0p = rows0; float* rows1p = rows1; for (int dx = 0; dx + 1 < w; dx += 2) { int sx = xofs[dx]; int sxn = xofs[dx + 1]; const float* S0p = S0 + sx; const float* S1p = S1 + sx; const float* S0np = S0 + sxn; const float* S1np = S1 + sxn; float32x4_t _a = vld1q_f32(alphap); float32x2_t _S0 = vld1_f32(S0p); float32x2_t _S1 = vld1_f32(S1p); float32x2_t _S0n = vld1_f32(S0np); float32x2_t _S1n = vld1_f32(S1np); float32x4_t _S0S0n = vcombine_f32(_S0, _S0n); float32x4_t _S1S1n = vcombine_f32(_S1, _S1n); float32x4_t _ms0 = vmulq_f32(_S0S0n, _a); float32x4_t _ms1 = vmulq_f32(_S1S1n, _a); float32x2_t _rows0 = vpadd_f32(vget_low_f32(_ms0), vget_high_f32(_ms0)); float32x2_t _rows1 = vpadd_f32(vget_low_f32(_ms1), vget_high_f32(_ms1)); vst1_f32(rows0p + dx, _rows0); vst1_f32(rows1p + dx, _rows1); alphap += 4; } } prev_sy1 = sy; // vresize float b0 = beta[0]; float b1 = beta[1]; float* rows0p = rows0; float* rows1p = rows1; float* Dp = dst + dy * out_w; // dst.row(dy); int nn = w >> 3; int remain = w - (nn << 3); float32x4_t _b0 = vdupq_n_f32(b0); float32x4_t _b1 = vdupq_n_f32(b1); for (; nn > 0; nn--) { float32x4_t _rows0 = vld1q_f32(rows0p); float32x4_t _rows1 = vld1q_f32(rows1p); float32x4_t _D = vmulq_f32(_rows0, _b0); _D = vmlaq_f32(_D, _rows1, _b1); vst1q_f32(Dp, _D); float32x4_t _rows0n = vld1q_f32(rows0p + 4); float32x4_t _rows1n = vld1q_f32(rows1p + 4); float32x4_t _Dn = vmulq_f32(_rows0n, _b0); _Dn = vmlaq_f32(_Dn, _rows1n, _b1); vst1q_f32(Dp + 4, _Dn); Dp += 8; rows0p += 8; rows1p += 8; } for (; remain; --remain) { *Dp++ = *rows0p++ * b0 + *rows1p++ * b1; } beta += 2; } sys_free(rowsbuf0); sys_free(rowsbuf1); } static inline void interpolate_cubic(float fx, float* coeffs) { const float A = -0.75f; float fx0 = fx + 1; float fx1 = fx; float fx2 = 1 - fx; coeffs[0] = A * fx0 * fx0 * fx0 - 5 * A * fx0 * fx0 + 8 * A * fx0 - 4 * A; coeffs[1] = (A + 2) * fx1 * fx1 * fx1 - (A + 3) * fx1 * fx1 + 1; coeffs[2] = (A + 2) * fx2 * fx2 * fx2 - (A + 3) * fx2 * fx2 + 1; coeffs[3] = 1.f - coeffs[0] - coeffs[1] - coeffs[2]; } static void cubic_coeffs(int w, int outw, int* xofs, float* alpha) { double scale = (double)w / outw; for (int dx = 0; dx < outw; dx++) { float fx = (float)((dx + 0.5) * scale - 0.5); int sx = floor(fx); fx -= sx; interpolate_cubic(fx, alpha + dx * 4); if (sx <= -1) { sx = 1; alpha[dx * 4 + 0] = 1.f - alpha[dx * 4 + 3]; alpha[dx * 4 + 1] = alpha[dx * 4 + 3]; alpha[dx * 4 + 2] = 0.f; alpha[dx * 4 + 3] = 0.f; } if (sx == 0) { sx = 1; alpha[dx * 4 + 0] = alpha[dx * 4 + 0] + alpha[dx * 4 + 1]; alpha[dx * 4 + 1] = alpha[dx * 4 + 2]; alpha[dx * 4 + 2] = alpha[dx * 4 + 3]; alpha[dx * 4 + 3] = 0.f; } if (sx == w - 2) { sx = w - 3; alpha[dx * 4 + 3] = alpha[dx * 4 + 2] + alpha[dx * 4 + 3]; alpha[dx * 4 + 2] = alpha[dx * 4 + 1]; alpha[dx * 4 + 1] = alpha[dx * 4 + 0]; alpha[dx * 4 + 0] = 0.f; } if (sx >= w - 1) { sx = w - 3; alpha[dx * 4 + 3] = 1.f - alpha[dx * 4 + 0]; alpha[dx * 4 + 2] = alpha[dx * 4 + 0]; alpha[dx * 4 + 1] = 0.f; alpha[dx * 4 + 0] = 0.f; } xofs[dx] = sx; } } static void resize_bicubic_image(float* src, float* dst, float* alpha, int* xofs, float* beta, int* yofs, int out_h, int out_w, int in_h, int in_w) { int w = out_w; // dst.w; int h = out_h; // dst.h; // loop body float* rowsbuf0 = (float*)sys_malloc(w * sizeof(float)); float* rowsbuf1 = (float*)sys_malloc(w * sizeof(float)); float* rowsbuf2 = (float*)sys_malloc(w * sizeof(float)); float* rowsbuf3 = (float*)sys_malloc(w * sizeof(float)); float* rows0 = rowsbuf0; float* rows1 = rowsbuf1; float* rows2 = rowsbuf2; float* rows3 = rowsbuf3; int prev_sy1 = -3; for (int dy = 0; dy < h; dy++) { int sy = yofs[dy]; if (sy == prev_sy1) { // reuse all rows } else if (sy == prev_sy1 + 1) { // hresize one row float* rows0_old = rows0; rows0 = rows1; rows1 = rows2; rows2 = rows3; rows3 = rows0_old; const float* S3 = src + (sy + 2) * in_w; // src.row(sy+2); const float* alphap = alpha; float* rows3p = rows3; for (int dx = 0; dx < w; dx++) { int sx = xofs[dx]; const float* S3p = S3 + sx; float a0 = alphap[0]; float a1 = alphap[1]; float a2 = alphap[2]; float a3 = alphap[3]; rows3p[dx] = S3p[-1] * a0 + S3p[0] * a1 + S3p[1] * a2 + S3p[2] * a3; alphap += 4; } } else if (sy == prev_sy1 + 2) { // hresize two rows float* rows0_old = rows0; float* rows1_old = rows1; rows0 = rows2; rows1 = rows3; rows2 = rows0_old; rows3 = rows1_old; const float* S2 = src + (sy + 1) * in_w; // src.row(sy+1); const float* S3 = src + (sy + 2) * in_w; // src.row(sy+2); const float* alphap = alpha; float* rows2p = rows2; float* rows3p = rows3; for (int dx = 0; dx < w; dx++) { int sx = xofs[dx]; const float* S2p = S2 + sx; const float* S3p = S3 + sx; float a0 = alphap[0]; float a1 = alphap[1]; float a2 = alphap[2]; float a3 = alphap[3]; rows2p[dx] = S2p[-1] * a0 + S2p[0] * a1 + S2p[1] * a2 + S2p[2] * a3; rows3p[dx] = S3p[-1] * a0 + S3p[0] * a1 + S3p[1] * a2 + S3p[2] * a3; alphap += 4; } } else if (sy == prev_sy1 + 3) { // hresize three rows float* rows0_old = rows0; float* rows1_old = rows1; float* rows2_old = rows2; rows0 = rows3; rows1 = rows0_old; rows2 = rows1_old; rows3 = rows2_old; const float* S1 = src + sy * in_w; // src.row(sy); const float* S2 = src + (sy + 1) * in_w; // src.row(sy+1); const float* S3 = src + (sy + 2) * in_w; // src.row(sy+2); const float* alphap = alpha; float* rows1p = rows1; float* rows2p = rows2; float* rows3p = rows3; for (int dx = 0; dx < w; dx++) { int sx = xofs[dx]; const float* S1p = S1 + sx; const float* S2p = S2 + sx; const float* S3p = S3 + sx; float a0 = alphap[0]; float a1 = alphap[1]; float a2 = alphap[2]; float a3 = alphap[3]; rows1p[dx] = S1p[-1] * a0 + S1p[0] * a1 + S1p[1] * a2 + S1p[2] * a3; rows2p[dx] = S2p[-1] * a0 + S2p[0] * a1 + S2p[1] * a2 + S2p[2] * a3; rows3p[dx] = S3p[-1] * a0 + S3p[0] * a1 + S3p[1] * a2 + S3p[2] * a3; alphap += 4; } } else { // hresize four rows const float* S0 = src + (sy - 1) * in_w; // src.row(sy-1); const float* S1 = src + sy * in_w; // src.row(sy); const float* S2 = src + (sy + 1) * in_w; // src.row(sy+1); const float* S3 = src + (sy + 2) * in_w; // src.row(sy+2); const float* alphap = alpha; float* rows0p = rows0; float* rows1p = rows1; float* rows2p = rows2; float* rows3p = rows3; for (int dx = 0; dx < w; dx++) { int sx = xofs[dx]; const float* S0p = S0 + sx; const float* S1p = S1 + sx; const float* S2p = S2 + sx; const float* S3p = S3 + sx; float a0 = alphap[0]; float a1 = alphap[1]; float a2 = alphap[2]; float a3 = alphap[3]; rows0p[dx] = S0p[-1] * a0 + S0p[0] * a1 + S0p[1] * a2 + S0p[2] * a3; rows1p[dx] = S1p[-1] * a0 + S1p[0] * a1 + S1p[1] * a2 + S1p[2] * a3; rows2p[dx] = S2p[-1] * a0 + S2p[0] * a1 + S2p[1] * a2 + S2p[2] * a3; rows3p[dx] = S3p[-1] * a0 + S3p[0] * a1 + S3p[1] * a2 + S3p[2] * a3; alphap += 4; } } prev_sy1 = sy; // vresize float b0 = beta[0]; float b1 = beta[1]; float b2 = beta[2]; float b3 = beta[3]; float* rows0p = rows0; float* rows1p = rows1; float* rows2p = rows2; float* rows3p = rows3; float* Dp = dst + dy * out_w; // dst.row(dy); for (int dx = 0; dx < w; dx++) { *Dp++ = *rows0p++ * b0 + *rows1p++ * b1 + *rows2p++ * b2 + *rows3p++ * b3; } beta += 4; } sys_free(rowsbuf0); sys_free(rowsbuf1); sys_free(rowsbuf2); sys_free(rowsbuf3); } int interp_run(struct tensor* output_tensor, struct tensor* input_tensor, struct interp_param* interp_param, int num_thread) { int resize_type = interp_param->resize_type; int out_w = interp_param->output_width; int out_h = interp_param->output_height; float width_scale = interp_param->width_scale; float height_scale = interp_param->height_scale; int in_c = input_tensor->dims[1]; int in_h = input_tensor->dims[2]; int in_w = input_tensor->dims[3]; float* data = (float*)input_tensor->data; float* out_data = (float*)output_tensor->data; if (out_h == 0 || out_w == 0) { out_h = in_h * height_scale; out_w = in_w * width_scale; } if (out_h == in_h && out_w == in_w) { out_data = data; return 0; } int out_channel_size = out_h * out_w; int in_channel_size = in_h * in_w; if (input_tensor->dim_num == 1) { #pragma omp parallel for num_threads(num_thread) for (int q = 0; q < input_tensor->dims[0]; ++q) { for (int i = 0; i < out_h * out_w; i++) { out_data[q * out_h * out_w + i] = data[q]; } } return 0; } if (resize_type == 1) // nearest { #pragma omp parallel for num_threads(num_thread) for (int q = 0; q < in_c; q++) { for (int y = 0; y < out_h; ++y) { const int in_y = MIN((int)(y / height_scale), (in_h - 1)); for (int x = 0; x < out_w; ++x) { const int in_x = MIN((int)(x / width_scale), (in_w - 1)); out_data[out_w * y + x + out_w * out_h * q] = data[in_y * in_w + in_x + q * in_w * in_h]; } } } } else if (resize_type == 2 || resize_type == 4) // bilinear { int* buf = (int*)sys_malloc((out_w + out_h + out_w * 2 + out_h * 2) * sizeof(int)); int* xofs = buf; // new int[ow]; int* yofs = buf + out_w; // new int[oh]; float* alpha = (float*)(buf + out_w + out_h); // new float[ow * 2]; float* beta = (float*)(buf + out_w + out_h + out_w * 2); // new float[oh * 2]; int align_corner = interp_param->resize_type == 2 ? 0 : 1; linear_coeffs(in_w, out_w, xofs, alpha, align_corner); linear_coeffs(in_h, out_h, yofs, beta, align_corner); #pragma omp parallel for num_threads(num_thread) for (int q = 0; q < in_c; ++q) { resize_bilinear_image(data + in_channel_size * q, out_data + out_channel_size * q, alpha, xofs, beta, yofs, out_h, out_w, in_h, in_w); } sys_free(buf); } else if (resize_type == 3) // bicubic { int* buf = (int*)sys_malloc((out_w + out_h + out_w * 4 + out_h * 4) * sizeof(int)); int* xofs = buf; // new int[ow]; int* yofs = buf + out_w; // new int[oh]; float* alpha = (float*)(buf + out_w + out_h); // new float[ow * 4]; float* beta = (float*)(buf + out_w + out_h + out_w * 4); // new float[oh * 4]; cubic_coeffs(in_w, out_w, xofs, alpha); cubic_coeffs(in_h, out_h, yofs, beta); #pragma omp parallel for num_threads(num_thread) for (int q = 0; q < in_c; q++) { resize_bicubic_image(data + in_channel_size * q, out_data + out_channel_size * q, alpha, xofs, beta, yofs, out_h, out_w, in_h, in_w); } sys_free(buf); return 0; } return 0; }
gi_extrema_region_builder.h
/* * * Copyright (C) 2018 Attila Gyulassy <jediati@sci.utah.edu> * All rights reserved. * * This software may be modified and distributed under the terms * of the BSD license. See the LICENSE file for details. */ #ifndef EXTREMAL_REGION_BUILDER #define EXTREMAL_REGION_BUILDER #include <set> #include <queue> #include <vector> #include <stack> #include <unordered_map> #include "gi_basic_types.h" #include "gi_vectors.h" #include "gi_labeling.h" //#include "gi_regular_grid.h" //#include "gi_regular_grid_trilinear_function.h" #include "gi_timing.h" #include "gi_union_find_labeling.h" //#include "gi_topological_regular_grid.h" #include "gi_array_index_partition.h" #include "omp.h" //#include "gi_numeric_integrator_expanding_region_stop_filtered2.h" //#define OUTPUTINTERMEDIATE namespace GInt { template<class FUNC_TYPE, class MESH_TYPE> class UFMergeGraph { protected: FUNC_TYPE* mFunc; MESH_TYPE* mMesh; struct ExtremumSet { INDEX_TYPE extremum_cell_index; INT_TYPE representative_list_id; typename FUNC_TYPE::DType extremum_fval; ExtremumSet(INDEX_TYPE gid, INT_TYPE lid, typename FUNC_TYPE::DType val) : extremum_cell_index(gid), representative_list_id(lid), extremum_fval(val) {} void printme() const { printf("extremum_cell_index=%llu, rep=%d, val=%f\n", extremum_cell_index, representative_list_id, extremum_fval); } }; struct MergeArc { INDEX_TYPE saddle_cell_index; INT_TYPE extremum1_list_id; INT_TYPE extremum2_list_id; typename FUNC_TYPE::DType merge_persistence; typename FUNC_TYPE::DType merge_saddle_value; void printme() const { printf("saddle_cell_index=%llu, e1=%d, e2=%d, p=%f, s=%f\n", saddle_cell_index, extremum1_list_id, extremum2_list_id, merge_persistence, merge_saddle_value); } bool operator()(const MergeArc& _Left, const MergeArc& _Right) { if (_Left.merge_persistence < _Right.merge_persistence) return false; if (_Left.merge_persistence > _Right.merge_persistence) return true; return _Left.saddle_cell_index > _Right.saddle_cell_index; } }; int countcancels; public: std::unordered_map<INDEX_TYPE, INT_TYPE> mCellIndexToListIdMap; std::vector<ExtremumSet> mExtrema; std::priority_queue<MergeArc, std::vector< MergeArc>, MergeArc > mMergesToCancel; void MakeSet(INDEX_TYPE gid) { //printf("%llu gid\n", gid); INT_TYPE lid = mExtrema.size(); //printf("%d lid\n", lid); mExtrema.push_back(ExtremumSet{ gid, lid, mFunc->cellValue(gid) }); //printf("extrema size %d\n", mExtrema.size()); //printf("value = %f\n", mExtrema[mExtrema.size()-1].extremum_fval); mCellIndexToListIdMap[gid] = lid; //printf("idmap size %d\n", mCellIndexToListIdMap.size()); } INT_TYPE FindRepresentative(INT_TYPE lid) { ExtremumSet& e = mExtrema[lid]; //printf("%d->%d\n", lid, e.representative_list_id); if (e.representative_list_id == lid) return lid; INT_TYPE i = FindRepresentative(e.representative_list_id); e.representative_list_id = i; return i; } bool mDoMinHierarchy; void MergeByVal(INT_TYPE lid1, INT_TYPE lid2) { ExtremumSet& e1 = mExtrema[lid1]; ExtremumSet& e2 = mExtrema[lid2]; // if reverse order is false, set the lower one to point // to the higher one - so reverse order true used for basins if (mFunc->lessThan(e1.extremum_cell_index, e2.extremum_cell_index) ^ mDoMinHierarchy) { e1.representative_list_id = e2.representative_list_id; //printf("%f -> %f\n", e1.extremum_fval, e2.extremum_fval); } else { e2.representative_list_id = e1.representative_list_id; //printf("%f -> %f\n", e2.extremum_fval, e1.extremum_fval); } } void PerformCancel(MergeArc& m) { if (m.merge_persistence > gThreshold) { #ifdef DEBUGGSERB printf("threshold too great %f %f\n", m.merge_persistence, gThreshold); #endif return; } //printf("e1=%d, e2=%d\n", m.extremum1_list_id, m.extremum2_list_id); INT_TYPE current_extrep1_lid = FindRepresentative(m.extremum1_list_id); INT_TYPE current_extrep2_lid = FindRepresentative(m.extremum2_list_id); //printf("found %d %d\n", current_extrep1_lid, current_extrep2_lid); if (current_extrep1_lid == current_extrep2_lid) return; // do nothing for loops ExtremumSet& current_extrep1 = mExtrema[current_extrep2_lid]; ExtremumSet& current_extrep2 = mExtrema[current_extrep1_lid]; // if this arc is no longer valid then insert the new arc // if the current_extrep2 is not the same then we need to modify if (m.extremum1_list_id != current_extrep1_lid || m.extremum2_list_id != current_extrep2_lid) { // reinsert? typename FUNC_TYPE::DType d1 = abs(m.merge_saddle_value - current_extrep1.extremum_fval); typename FUNC_TYPE::DType d2 = abs(m.merge_saddle_value - current_extrep2.extremum_fval); if (d1 < d2) { if (d1 > gThreshold) { #ifdef DEBUGGSERB printf("thresha too big %f, %f, %f\n", d1, d2, gThreshold); #endif return; } m.merge_persistence = d1; m.extremum1_list_id = current_extrep2_lid; m.extremum2_list_id = current_extrep1_lid; mMergesToCancel.push(m); //struct MergeArc { // INDEX_TYPE saddle_cell_index; // INT_TYPE extremum1_list_id; // INT_TYPE headExtId; // typename FUNC_TYPE::DType merge_persistence; // typename FUNC_TYPE::DType merge_saddle_value; } else { if (d2 > gThreshold) { #ifdef DEBUGGSERB printf("threshb too big %f, %f, %f\n", d1, d2, gThreshold); #endif return; } m.merge_persistence = d2; m.extremum1_list_id = current_extrep1_lid; m.extremum2_list_id = current_extrep2_lid; mMergesToCancel.push(m); } //printf("merged pers %f\n", m.merge_persistence); return; } //if (m.merge_saddle_value == -1) { // printf("pers = %f, sad = %f\n", m.merge_persistence, m.merge_saddle_value); // m.printme(); // current_extrep1.printme(); // current_extrep2.printme(); // printf("frommesh: dim=%d, val=%f, boundary=%d\n", mMesh->dimension(m.saddle_cell_index), mFunc->cellValue(m.saddle_cell_index), mMesh->boundaryValue(m.saddle_cell_index)); // MESH_TYPE::FacetsIterator fit(mMesh); // for (fit.begin(m.saddle_cell_index); fit.valid(); fit.advance()) { // printf(" --> %f\n", mFunc->cellValue(fit.value())); // } //} //return; // otherwise actually do the merge MergeByVal(current_extrep1_lid, current_extrep2_lid); countcancels++; } typename FUNC_TYPE::DType gThreshold; public: enum Direction { MAXIMAL, MINIMAL }; UFMergeGraph(FUNC_TYPE* func, MESH_TYPE* mesh, Direction d ) : mFunc(func), mMesh(mesh) { if (d == MINIMAL) { mDoMinHierarchy = true; } else { mDoMinHierarchy = false; } countcancels = 0; } INT_TYPE NumExtrema() const { return this->mExtrema.size(); } INT_TYPE Representative(INT_TYPE nid) const { return this->mExtrema[nid].representative_list_id; } INDEX_TYPE TopoIndex(INT_TYPE nid) const { return this->mExtrema[nid].extremum_cell_index; } void AddNode(INDEX_TYPE key) { MakeSet(key); } void AddArc(INDEX_TYPE a, INDEX_TYPE b, INDEX_TYPE s) { if (a == b) return; // ignore these // ignore connectors that span different boundary types DIM_TYPE bva = mMesh->boundaryValue(a); DIM_TYPE bvb = mMesh->boundaryValue(b); //if (bvb != bva) return; DIM_TYPE bvs = mMesh->boundaryValue(s); //if (bva != bvs) return; // so this could be a valid cancellation so add the mergearc MergeArc m; m.saddle_cell_index = s; m.extremum1_list_id = mCellIndexToListIdMap[a]; m.extremum2_list_id = mCellIndexToListIdMap[b]; m.merge_saddle_value = mFunc->cellValue(s); //printf("sadval = %f\n", m.merge_saddle_value); // reinsert? typename FUNC_TYPE::DType d1 = mExtrema[m.extremum1_list_id].extremum_fval - m.merge_saddle_value ; typename FUNC_TYPE::DType d2 = mExtrema[m.extremum2_list_id].extremum_fval - m.merge_saddle_value ; if (d1 < 0) d1 *= -1; if (d2 < 0) d2 *= -1; if (d1 < d2) { m.merge_persistence = d1; } else { m.merge_persistence = d2; } // printf("found %f pers d1=%f d2=%f s=%f e1=%f e2=%f\n", m.merge_persistence, d1, d2, m.merge_saddle_value, mExtrema[m.extremum1_list_id].extremum_fval, mExtrema[m.extremum2_list_id].extremum_fval); mMergesToCancel.push(m); } int SimplifyToThreshold(typename FUNC_TYPE::DType threshold) { gThreshold = threshold; ///printf("simplifying to %f\n", threshold); while (!mMergesToCancel.empty()) { MergeArc edge = mMergesToCancel.top(); mMergesToCancel.pop(); //printf("got %d left\n", mMergesToCancel.size()); PerformCancel(edge); } //printf("gothere - siplified now flattening\n"); // flatten UF so all subsequent queries are CONST for (INT_TYPE esid = 0; esid < mExtrema.size(); esid++) { FindRepresentative(esid); } //printf("done!\n"); //printf("performed %d merges\n", countcancels); return countcancels; } }; // we will use FUNC_TYPE to access index comparator template<class MESH_TYPE, class FUNC_TYPE, class GRAD_TYPE> class SimplifiedExtremumGraph { public: // global context GRAD_TYPE* mGrad; MESH_TYPE* mMesh; FUNC_TYPE* mFunc; UFMergeGraph<FUNC_TYPE, MESH_TYPE>* mMinGraph; UFMergeGraph<FUNC_TYPE, MESH_TYPE>* mMaxGraph; bool do_mins; bool do_maxs; INDEX_TYPE rec_td( INDEX_TYPE cellid) const { //printf("a %llu d=%d\n", cellid, mMesh->dimension(cellid)); INDEX_TYPE current = cellid; if (mGrad->getCritical(current)) return current; //printf("b\n"); INDEX_TYPE pair = mGrad->getPair(current); //printf("c pair = %llu, d=%d\n", pair, mMesh->dimension(pair)); if (mGrad->getCritical(pair)) return pair; // should NEVER happen //printf("c.1\n"); typename MESH_TYPE::FacetsIterator facets(mMesh); facets.begin(pair); INDEX_TYPE next = facets.value(); //printf("next = %llu\n", next); if (next == current) { facets.advance(); next = facets.value(); } //printf("next = %llu\n", next); //printf("d\n"); return rec_td(next); } void trace_down_1saddle(INDEX_TYPE start, INDEX_TYPE& min1, INDEX_TYPE& min2) const { typename MESH_TYPE::FacetsIterator facets(mMesh); facets.begin(start); min1 = rec_td(facets.value()); facets.advance(); if (!facets.valid()) { // error printf("should never get here in tarce down 1saddle\n"); min2 = min1; return; } min2 = rec_td(facets.value()); return; } INDEX_TYPE rec_tu(INDEX_TYPE cellid) const { INDEX_TYPE current = cellid; if (mGrad->getCritical(current)) return current; INDEX_TYPE pair = mGrad->getPair(current); if (mGrad->getCritical(pair)) return pair; // should NEVER happen typename MESH_TYPE::CofacetsIterator cofacets(mMesh); cofacets.begin(pair); INDEX_TYPE next = cofacets.value(); if (next == current) { cofacets.advance(); if (!cofacets.valid()) { printf("WHOATHERE\n"); } next = cofacets.value(); } return rec_tu(next); } void trace_up_2saddle(const INDEX_TYPE& start, INDEX_TYPE& max1, INDEX_TYPE& max2) const { if (mMesh->boundaryValue(start) != 0) { max1 = max2 = -1; return; } typename MESH_TYPE::CofacetsIterator cofacets(mMesh); cofacets.begin(start); max1 = rec_tu(cofacets.value()); cofacets.advance(); if (!cofacets.valid()) { // error printf("should never get here in tarce up 2saddle\n"); max2 = max1; return; } max2 = rec_tu(cofacets.value()); return; } public: SimplifiedExtremumGraph(MESH_TYPE* mesh, FUNC_TYPE* func, GRAD_TYPE* grad) : mGrad(grad), mMesh(mesh), mFunc(func){ mMinGraph = new UFMergeGraph < FUNC_TYPE, MESH_TYPE >(func, mesh, UFMergeGraph < FUNC_TYPE, MESH_TYPE >::MINIMAL); mMaxGraph = new UFMergeGraph < FUNC_TYPE, MESH_TYPE >(func, mesh, UFMergeGraph < FUNC_TYPE, MESH_TYPE >::MAXIMAL); } enum EXTGRAPHMODE { NONE, BOTH, MINS, MAXS }; void SetMode(EXTGRAPHMODE m) { switch (m) { case NONE: this->do_maxs = false; this->do_mins = false; return; case BOTH: this->do_maxs = true; this->do_mins = true; return; case MINS: this->do_maxs = false; this->do_mins = true; return; case MAXS: this->do_maxs = true; this->do_mins = false; return; } } void ComputeMinMapFromGradient(typename FUNC_TYPE::DType THRESHOLD) { std::vector<INDEX_TYPE> saddles1; std::vector<INDEX_TYPE> saddles2; std::vector<INDEX_TYPE> topo_index_partition; int num_threads; #pragma omp parallel { // START PARALLEL CONSTRUCTION // divide index space #pragma omp single { num_threads = omp_get_num_threads(); ArrayIndexPartitioner::EvenChunkSplit(mMesh->numCells(), num_threads, topo_index_partition); } #pragma omp barrier // SCAN ALL CELLS COLLECT MINIMA, MAXIMA, 1SADDLES, 2SADDLES int thread_num = omp_get_thread_num(); // in parallel go through and find all 2-saddles std::vector<INDEX_TYPE> lminima; std::vector<INDEX_TYPE> l1saddles; std::vector<INDEX_TYPE> lmaxima; std::vector<INDEX_TYPE> l2saddles; //#pragma omp critical // { // printf("thread %d doing index %d-%d\n", thread_num, topo_index_partition[thread_num], topo_index_partition[thread_num + 1]); // } typename MESH_TYPE::AllCellsIterator cellit(mMesh, topo_index_partition[thread_num], topo_index_partition[thread_num + 1]); for (cellit.begin(); cellit.valid(); cellit.advance()) { INDEX_TYPE cell_id = cellit.value(); if (mGrad->getCritical(cell_id)) { DIM_TYPE d = mMesh->dimension(cell_id); if (do_mins) { if (d == 0) { lminima.push_back(cell_id); } else if (d == 1) { l1saddles.push_back(cell_id); } } if (do_maxs) { if (d == 2) { l2saddles.push_back(cell_id); } else if (d == 3) { lmaxima.push_back(cell_id); } } } } #pragma omp barrier // COMBINE CRITICAL POINT LISTS #pragma omp critical { //printf("gothere mins %d, saddles %d\n", lminima.size(), l1saddles.size()); for (auto id : lminima) { mMinGraph->AddNode(id); } //printf("hothere\n"); saddles1.insert(saddles1.end(), l1saddles.begin(), l1saddles.end() ); //printf("asdfasdfasdfasdf\n"); } #pragma omp critical { //printf("gothere maxs\n"); for (auto id : lmaxima) { mMaxGraph->AddNode(id); } saddles2.insert(saddles2.end(), l2saddles.begin(), l2saddles.end() ); } #pragma omp barrier int num1s = saddles1.size(); #pragma omp single { //printf("gothere2 \n"); ArrayIndexPartitioner::EvenChunkSplit(num1s, num_threads, topo_index_partition); } #pragma omp barrier thread_num = omp_get_thread_num(); for (int index = topo_index_partition[thread_num]; index < topo_index_partition[thread_num + 1]; index++) { INDEX_TYPE sad1 = saddles1[index]; INDEX_TYPE min1, min2; trace_down_1saddle(sad1, min1, min2); if (min1 != min2) { #pragma omp critical { //printf("adding %llu, %llu, %llu\n", sad1, min1, min2); mMinGraph->AddArc(min1, min2, sad1); //printf("added!\n"); } } } #pragma omp barrier int num2s = saddles2.size(); #pragma omp single { //printf("gothere3 \n"); ArrayIndexPartitioner::EvenChunkSplit(num2s, num_threads, topo_index_partition); } #pragma omp barrier thread_num = omp_get_thread_num(); for (int index = topo_index_partition[thread_num]; index < topo_index_partition[thread_num + 1]; index++) { INDEX_TYPE sad2 = saddles2[index]; INDEX_TYPE max1, max2; trace_up_2saddle(sad2, max1, max2); if (max1 != max2) { #pragma omp critical { mMaxGraph->AddArc(max1, max2, sad2); } } } #pragma omp barrier #pragma omp single { printf("built minmaxgraph:\n mingraph = %d nodes, %d arcs\n maxgraph = %d nodes, %d arcs\n", this->mMinGraph->NumExtrema(), this->mMinGraph->mMergesToCancel.size(), this->mMaxGraph->NumExtrema(), this->mMaxGraph->mMergesToCancel.size()); } #pragma omp single { int numc = mMinGraph->SimplifyToThreshold(THRESHOLD); printf("MinGraph did %d cancellations\n", numc); } #pragma omp single { int numc = mMaxGraph->SimplifyToThreshold(THRESHOLD); printf("MaxGraph did %d cancellations\n", numc); } } // END OMP PARALLEL } // phase 0: (build nodes) // -- gather extrema // -- for each extremum, set itself as the representative_list_id // Phase 1: (build directed graph) // -- for each saddle s, take less extreme node nl, if |v(s) - v(nl)| < t // where t is max simp thresh, if s is "closer" to nl than nl's current saddle // replace nl's current saddle with s // Phase 2: (union-find representatives) //need map<INDEX_TYPE> -> int to find extrema when tracing paths //need struct arc { saddle_value; more_extreme_node_id; } }; //typedef IndexCompareLessThan Comparer; template< class Comparer> class GridExtremalRegionBuilder { protected: DenseLabeling<int>* m_dest_label; RegularGrid3D* m_grid; RegularGridTrilinearFunction* m_func; Vec3i m_xyz; Vec3b m_periodic; inline bool AComesBeforeB(INDEX_TYPE a, INDEX_TYPE b) const { return mCompare->Compare(a, b); } bool IsExtremeVertexIn6Neighborhood(INDEX_TYPE id) const { Vec3l t_neighbors[6]; Vec3l t_coords = m_grid->XYZ3d(id); int t_num_neighbors = m_grid->GatherExistingNeighborsSameBdry6(t_coords, t_neighbors); INDEX_TYPE t_current_lowest = id; for (int i = 0; i < t_num_neighbors; i++) { INDEX_TYPE t_neighbor_vertex = m_grid->Index3d(t_neighbors[i]); if (AComesBeforeB(t_neighbor_vertex, t_current_lowest)) { return false; } } return true; } void Enqueue_Later_Neighbors(Vec3l xyz, std::priority_queue<INDEX_TYPE, std::vector<INDEX_TYPE>, Comparer > &expansion, std::set<INDEX_TYPE>&enqueued_set) { INDEX_TYPE currentVID = m_grid->Index3d(xyz); Vec3l neighbors[6]; int num_neighbors = m_grid->GatherExistingNeighborsSameBdry6(xyz, neighbors); for (int i = 0; i < num_neighbors; i++) { INDEX_TYPE neighborVID = m_grid->Index3d(neighbors[i]); if (m_dest_label->GetLabel(neighborVID) == -1 && AComesBeforeB(currentVID, neighborVID) && enqueued_set.count(neighborVID) == 0) { enqueued_set.insert(neighborVID); expansion.push(neighborVID); } } } // look at neighborhood of currentVID, for each neighbor, if it's "earlier" // check if it has been assigned- if not, then this point is unassigned // if it is assigned, check that each point has same assignment, // if there are no neighbors, return its original label int InspectPriorRegions(INDEX_TYPE currentVID) { INDEX_TYPE neighborVID; int extremal_certain = m_dest_label->GetLabel(currentVID); bool has_extremal = false; Vec3l neighbors[6]; int num_neighbors = m_grid->GatherExistingNeighborsSameBdry6(m_grid->XYZ3d(currentVID), neighbors); for (int i = 0; i < num_neighbors; i++) { INDEX_TYPE neighborVID = m_grid->Index3d(neighbors[i]); if (AComesBeforeB(neighborVID, currentVID)) { if (m_dest_label->GetLabel(neighborVID) == -1) return -1; // if a extremal one is uncertain, we are uncertain if (!has_extremal) { extremal_certain = m_dest_label->GetLabel(neighborVID); has_extremal = true; } else { if (extremal_certain != m_dest_label->GetLabel(neighborVID)) return -1; } } } //if (!has_extremal) { // printf("ERROR should never get here2\n"); // Vec3l coords = m_grid->XYZ3d(currentVID); coords.PrintInt(); // return -1; //} return extremal_certain; } void Expand_Lower_Neighborhood(INDEX_TYPE startid, int start_label) { Vec3l xyz = m_grid->XYZ3d(startid); std::set<INDEX_TYPE> enqueued_set; INDEX_TYPE currentVID = startid; // the natural ordering using the < operator on pairs will give us the highest // element first, simulating region growing from high to low std::priority_queue<INDEX_TYPE, std::vector<INDEX_TYPE>, Comparer > growing_front(*mCompare); enqueued_set.insert(startid); m_dest_label->SetLabel(startid, start_label); Enqueue_Later_Neighbors(xyz, growing_front, enqueued_set); while (!growing_front.empty()) { INDEX_TYPE currid = growing_front.top(); growing_front.pop(); int cellvale = InspectPriorRegions(currid); // find extremals // cellvalue >=0 indicates that there is certainty here, so lets expand if (cellvale >= 0) { m_dest_label->SetLabel(currid, cellvale); Enqueue_Later_Neighbors(m_grid->XYZ3d(currid), growing_front, enqueued_set); } } } Comparer* mCompare; std::vector<INDEX_TYPE> mExtrema; public: GridExtremalRegionBuilder(RegularGridTrilinearFunction* func, RegularGrid3D* grid) : m_xyz(func->GetGrid()->XYZ()), m_periodic(func->GetGrid()->Periodic()), m_func(func), m_grid(grid) { mCompare = new Comparer(func); } DenseLabeling<int>* GetOutputLabels() { return m_dest_label; } RegularGrid3D* GetGrid() { return m_grid; } RegularGridTrilinearFunction* GetFunction() { return m_func; } const std::vector<INDEX_TYPE>& GetExtrema() const { return mExtrema; } void BeginIntegration(bool verbose = false) { ThreadedTimer gtimer(1); gtimer.StartGlobal(); m_dest_label = new DenseLabeling<int>(m_grid->NumElements()); const INDEX_TYPE t_num_vertices = m_grid->NumElements(); #pragma omp parallel for for (INDEX_TYPE i = 0; i < t_num_vertices; i++) { m_dest_label->SetLabel(i, -1); if (IsExtremeVertexIn6Neighborhood(i)) { #pragma omp critical { mExtrema.push_back(i); } } } int num_extrema = mExtrema.size(); printf("Found and expanding %d regions\n", num_extrema); #pragma omp parallel { #pragma omp for schedule(dynamic) nowait for (int m = 0; m < num_extrema; m++) { INDEX_TYPE maximum = mExtrema[m]; Expand_Lower_Neighborhood(maximum, m); } } } }; //#define DEBUGGSERB //typedef IndexCompareLessThan Comparer; template< class Comparer, class GridFuncType, class TopoGridType> class GridSimplifiedExtremalRegionBuilder { protected: DenseLabeling<int>* m_dest_label; RegularGrid3D* m_grid; GridFuncType* m_func; inline bool AComesBeforeB(INDEX_TYPE a, INDEX_TYPE b) const { return mCompare->Compare(a, b); } bool IsExtremeVertexIn6Neighborhood(INDEX_TYPE id) const { Vec3l t_neighbors[6]; Vec3l t_coords = m_grid->XYZ3d(id); int t_num_neighbors = m_grid->GatherExistingNeighborsSameBdry6(t_coords, t_neighbors); INDEX_TYPE t_current_lowest = id; for (int i = 0; i < t_num_neighbors; i++) { INDEX_TYPE t_neighbor_vertex = m_grid->Index3d(t_neighbors[i]); if (AComesBeforeB(t_neighbor_vertex, t_current_lowest)) { return false; } } return true; } void Enqueue_Later_Neighbors(Vec3l xyz, std::priority_queue<INDEX_TYPE, std::vector<INDEX_TYPE>, Comparer > &expansion, std::set<INDEX_TYPE>&enqueued_set) const { INDEX_TYPE currentVID = m_grid->Index3d(xyz); Vec3l neighbors[6]; int num_neighbors = m_grid->GatherExistingNeighborsAll6(xyz, neighbors); // we want to grow towards the middle? for (int i = 0; i < num_neighbors; i++) { INDEX_TYPE neighborVID = m_grid->Index3d(neighbors[i]); if (m_dest_label->GetLabel(neighborVID) == -1 && AComesBeforeB(currentVID, neighborVID) && enqueued_set.count(neighborVID) == 0) { #ifdef DEBUGGSERB testout->SetLabel(neighborVID, 1); #endif enqueued_set.insert(neighborVID); expansion.push(neighborVID); } } } // look at neighborhood of currentVID, for each neighbor, if it's "earlier" // check if it has been assigned- if not, then this point is unassigned // if it is assigned, check that each point has same assignment, // if there are no neighbors, return its original label #ifdef DEBUGGSERB int InspectPriorRegions(INDEX_TYPE currentVID, std::set<INDEX_TYPE>& enqueued_set) #else int InspectPriorRegions(INDEX_TYPE currentVID) #endif { INDEX_TYPE neighborVID; int extremal_certain = m_dest_label->GetLabel(currentVID); bool has_extremal = false; Vec3l neighbors[6]; int num_neighbors = m_grid->GatherExistingNeighborsAll6(m_grid->XYZ3d(currentVID), neighbors); for (int i = 0; i < num_neighbors; i++) { INDEX_TYPE neighborVID = m_grid->Index3d(neighbors[i]); if (AComesBeforeB(neighborVID, currentVID)) { if (m_dest_label->GetLabel(neighborVID) == -1) { #ifdef DEBUGGSERB //printf("%f 's before neighbor %f has -1 label : c%d, n%d\n", // m_func->SampleImage(currentVID), // m_func->SampleImage(neighborVID), // enqueued_set.count(currentVID), // enqueued_set.count(neighborVID)); //auto v1 = m_grid->XYZ3d(currentVID); //auto v2 = m_grid->XYZ3d(neighborVID); //v1.PrintInt(); //v2.PrintInt(); #endif return -1; // if a extremal one is uncertain, we are uncertain } if (!has_extremal) { extremal_certain = m_dest_label->GetLabel(neighborVID); has_extremal = true; } else { if (extremal_certain != m_dest_label->GetLabel(neighborVID)) { return -1; } } } } //if (!has_extremal) { // printf("ERROR should never get here2\n"); // Vec3l coords = m_grid->XYZ3d(currentVID); coords.PrintInt(); // return -1; //} return extremal_certain; } #ifdef DEBUGGSERB DenseLabeling<int>* testout; #endif void Expand_Lower_Neighborhood(const std::vector<INDEX_TYPE>& startids, int start_label) { std::set<INDEX_TYPE> enqueued_set; std::priority_queue<INDEX_TYPE, std::vector<INDEX_TYPE>, Comparer > growing_front(*mCompare); for (auto startid : startids) { //if (! this->IsExtremeVertexIn6Neighborhood(startid)) { // printf("NONMAX START ID\n"); //} // the natural ordering using the < operator on pairs will give us the highest // element first, simulating region growing from high to low enqueued_set.insert(startid); m_dest_label->SetLabel(startid, start_label); #ifdef DEBUGGSERB testout->SetLabel(startid, 2); #endif } #ifdef DEBUGGSERB //int numvalid = 0; //for (INDEX_TYPE i = 0; i < m_grid->NumElements(); i++) { // if (this->IsExtremeVertexIn6Neighborhood(i)) { // if (m_dest_label->GetLabel(i) == -1) { // printf("UNFOUND EXTREMA!! %llu\n", i); // } // numvalid++;// printf("extremum validated: %llu\n", i); // } // else { // //growing_front.push(i); // //enqueued_set.insert(i); // } //} //printf("validated %d extrema\n", numvalid); //while (!growing_front.empty()) { // INDEX_TYPE currid = growing_front.top(); // growing_front.pop(); // int cellvale = InspectPriorRegions(currid, enqueued_set); // if (cellvale >= 0) { // m_dest_label->SetLabel(currid, cellvale); // testout->SetLabel(currid, 2); // } //} //return; #endif for (auto startid : startids) { #ifdef DEBUGGSERB INDEX_TYPE TESTID = InspectPriorRegions(startid, enqueued_set); if (TESTID == -1) printf("WHOATHERE %ll %llu\n", TESTID, startid); #endif Vec3l xyz = m_grid->XYZ3d(startid); Enqueue_Later_Neighbors(xyz, growing_front, enqueued_set); } #ifdef DEBUGGSERB printf("growing front size %d\n", growing_front.size()); #endif while (!growing_front.empty()) { INDEX_TYPE currid = growing_front.top(); growing_front.pop(); //printf("asdf %f\n", m_func->SampleImage(currid)); #ifdef DEBUGGSERB int cellvale = InspectPriorRegions(currid, enqueued_set); #else int cellvale = InspectPriorRegions(currid); #endif // find extremals //continue; // cellvalue >=0 indicates that there is certainty here, so lets expand if (cellvale >= 0) { m_dest_label->SetLabel(currid, cellvale); #ifdef DEBUGGSERB testout->SetLabel(currid, 2); #endif Enqueue_Later_Neighbors(m_grid->XYZ3d(currid), growing_front, enqueued_set); } } } std::unordered_map<INT_TYPE, std::vector<INDEX_TYPE> > mIdToVIndexMap; Comparer* mCompare; //std::vector<INDEX_TYPE> mExtrema; TopoGridType* mMesh; public: GridSimplifiedExtremalRegionBuilder(GridFuncType* func, RegularGrid3D* grid, TopoGridType* tgrid) : m_func(func), m_grid(grid), mMesh(tgrid) { mCompare = new Comparer(func); #ifdef DEBUGGSERB testout = new DenseLabeling<int>(grid->NumElements()); #endif } DenseLabeling<int>* GetOutputLabels() { return m_dest_label; } RegularGrid3D* GetGrid() { return m_grid; } GridFuncType* GetFunction() { return m_func; } const std::vector<INDEX_TYPE>& GetExtrema() const { return this->mExtrema; } const std::unordered_map<INT_TYPE, std::vector<INDEX_TYPE> >& GetIdMap() { return mIdToVIndexMap; } // the extremum map takes in the GID of a VERTEX (actually any cell but it gets the vertex number by just // rounding down. to get the right vertex in a cell, pass in a map which uses max labeling to replace each // cell with its highest vertex. void BeginIntegration(const std::unordered_map<INDEX_TYPE, INT_TYPE>& extremaGIDtoLabelMap, bool verbose = false) { #ifdef DEBUGGSERB printf("gothere1 %d\n", extremaGIDtoLabelMap.size()); mCompare->PrintRule(); testout->SetAll(0); #endif ThreadedTimer gtimer(1); gtimer.StartGlobal(); m_dest_label = new DenseLabeling<int>(m_grid->NumElements()); mIdToVIndexMap.clear(); const INDEX_TYPE t_num_vertices = m_grid->NumElements(); m_dest_label->SetAll(-1); printf("starting expansion\n"); //#pragma omp parallel for // for (INDEX_TYPE i = 0; i < t_num_vertices; i++) { // m_dest_label->SetLabel(i, -1); // // if (IsExtremeVertexIn6Neighborhood(i)) { //#pragma omp critical // { // mExtrema.push_back(i); // } // } // } #ifdef DEBUGGSERB printf("gothere2\n"); #endif std::vector<INT_TYPE> uniqueids; for (auto p : extremaGIDtoLabelMap) { if (mIdToVIndexMap.count(p.second) == 0) uniqueids.push_back(p.second); mIdToVIndexMap[p.second].push_back(mMesh->VertexNumberFromCellID(p.first)); } #ifdef DEBUGGSERB for (auto vv : mIdToVIndexMap) { printf("%d: %d ids\n", vv.first, vv.second.size()); } #endif //for (auto m : mExtrema) { // INDEX_TYPE tgridid = mMesh->CellIDFromVertexNumber(m); // if (extremaGIDtoLabelMap.count(tgridid) == 0) { // printf("WHOA THERE EXTMAP DOES NOT CONTAIN CP\n"); // } //} int num_unique = uniqueids.size(); printf("Found and expanding %d regions\n", num_unique); #pragma omp parallel { #pragma omp for schedule(dynamic) nowait for (int m = 0; m < num_unique; m++) { Expand_Lower_Neighborhood(mIdToVIndexMap[uniqueids[m]], m); } } #ifdef DEBUGGSERB printf("gothere3\n"); testout->OutputToIntFile("testoutasdfasdfasdf.raw"); #endif } }; } #endif
threshold.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % TTTTT H H RRRR EEEEE SSSSS H H OOO L DDDD % % T H H R R E SS H H O O L D D % % T HHHHH RRRR EEE SSS HHHHH O O L D D % % T H H R R E SS H H O O L D D % % T H H R R EEEEE SSSSS H H OOO LLLLL DDDD % % % % % % MagickCore Image Threshold Methods % % % % Software Design % % Cristy % % October 1996 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/property.h" #include "MagickCore/blob.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/configure.h" #include "MagickCore/constitute.h" #include "MagickCore/decorate.h" #include "MagickCore/draw.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/effect.h" #include "MagickCore/fx.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/montage.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/random-private.h" #include "MagickCore/resize.h" #include "MagickCore/resource_.h" #include "MagickCore/segment.h" #include "MagickCore/shear.h" #include "MagickCore/signature-private.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/token.h" #include "MagickCore/transform.h" #include "MagickCore/xml-tree.h" #include "MagickCore/xml-tree-private.h" /* Define declarations. */ #define ThresholdsFilename "thresholds.xml" /* Typedef declarations. */ struct _ThresholdMap { char *map_id, *description; size_t width, height; ssize_t divisor, *levels; }; /* Static declarations. */ static const char *MinimalThresholdMap = "<?xml version=\"1.0\"?>" "<thresholds>" " <threshold map=\"threshold\" alias=\"1x1\">" " <description>Threshold 1x1 (non-dither)</description>" " <levels width=\"1\" height=\"1\" divisor=\"2\">" " 1" " </levels>" " </threshold>" " <threshold map=\"checks\" alias=\"2x1\">" " <description>Checkerboard 2x1 (dither)</description>" " <levels width=\"2\" height=\"2\" divisor=\"3\">" " 1 2" " 2 1" " </levels>" " </threshold>" "</thresholds>"; /* Forward declarations. */ static ThresholdMap *GetThresholdMapFile(const char *,const char *,const char *,ExceptionInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveThresholdImage() selects an individual threshold for each pixel % based on the range of intensity values in its local neighborhood. This % allows for thresholding of an image whose global intensity histogram % doesn't contain distinctive peaks. % % The format of the AdaptiveThresholdImage method is: % % Image *AdaptiveThresholdImage(const Image *image,const size_t width, % const size_t height,const double bias,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o width: the width of the local neighborhood. % % o height: the height of the local neighborhood. % % o bias: the mean bias. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveThresholdImage(const Image *image, const size_t width,const size_t height,const double bias, ExceptionInfo *exception) { #define AdaptiveThresholdImageTag "AdaptiveThreshold/Image" CacheView *image_view, *threshold_view; Image *threshold_image; MagickBooleanType status; MagickOffsetType progress; MagickSizeType number_pixels; ssize_t y; /* Initialize threshold image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); threshold_image=CloneImage(image,0,0,MagickTrue,exception); if (threshold_image == (Image *) NULL) return((Image *) NULL); status=SetImageStorageClass(threshold_image,DirectClass,exception); if (status == MagickFalse) { threshold_image=DestroyImage(threshold_image); return((Image *) NULL); } /* Threshold image. */ status=MagickTrue; progress=0; number_pixels=(MagickSizeType) width*height; image_view=AcquireVirtualCacheView(image,exception); threshold_view=AcquireAuthenticCacheView(threshold_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,threshold_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double channel_bias[MaxPixelChannels], channel_sum[MaxPixelChannels]; register const Quantum *magick_restrict p, *magick_restrict pixels; register Quantum *magick_restrict q; register ssize_t i, x; ssize_t center, u, v; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t) (height/2L),image->columns+width,height,exception); q=QueueCacheViewAuthenticPixels(threshold_view,0,y,threshold_image->columns, 1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } center=(ssize_t) GetPixelChannels(image)*(image->columns+width)*(height/2L)+ GetPixelChannels(image)*(width/2); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait threshold_traits=GetPixelChannelTraits(threshold_image, channel); if ((traits == UndefinedPixelTrait) || (threshold_traits == UndefinedPixelTrait)) continue; if ((threshold_traits & CopyPixelTrait) != 0) { SetPixelChannel(threshold_image,channel,p[center+i],q); continue; } pixels=p; channel_bias[channel]=0.0; channel_sum[channel]=0.0; for (v=0; v < (ssize_t) height; v++) { for (u=0; u < (ssize_t) width; u++) { if (u == (ssize_t) (width-1)) channel_bias[channel]+=pixels[i]; channel_sum[channel]+=pixels[i]; pixels+=GetPixelChannels(image); } pixels+=GetPixelChannels(image)*image->columns; } } for (x=0; x < (ssize_t) image->columns; x++) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double mean; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait threshold_traits=GetPixelChannelTraits(threshold_image, channel); if ((traits == UndefinedPixelTrait) || (threshold_traits == UndefinedPixelTrait)) continue; if ((threshold_traits & CopyPixelTrait) != 0) { SetPixelChannel(threshold_image,channel,p[center+i],q); continue; } channel_sum[channel]-=channel_bias[channel]; channel_bias[channel]=0.0; pixels=p; for (v=0; v < (ssize_t) height; v++) { channel_bias[channel]+=pixels[i]; pixels+=(width-1)*GetPixelChannels(image); channel_sum[channel]+=pixels[i]; pixels+=GetPixelChannels(image)*(image->columns+1); } mean=(double) (channel_sum[channel]/number_pixels+bias); SetPixelChannel(threshold_image,channel,(Quantum) ((double) p[center+i] <= mean ? 0 : QuantumRange),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(threshold_image); } if (SyncCacheViewAuthenticPixels(threshold_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,AdaptiveThresholdImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } threshold_image->type=image->type; threshold_view=DestroyCacheView(threshold_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) threshold_image=DestroyImage(threshold_image); return(threshold_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoThresholdImage() automatically performs image thresholding % dependent on which method you specify. % % The format of the AutoThresholdImage method is: % % MagickBooleanType AutoThresholdImage(Image *image, % const AutoThresholdMethod method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image to auto-threshold. % % o method: choose from Kapur, OTSU, or Triangle. % % o exception: return any errors or warnings in this structure. % */ static double KapurThreshold(const Image *image,const double *histogram, ExceptionInfo *exception) { #define MaxIntensity 255 double *black_entropy, *cumulative_histogram, entropy, epsilon, maximum_entropy, *white_entropy; register ssize_t i, j; size_t threshold; /* Compute optimal threshold from the entopy of the histogram. */ cumulative_histogram=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*cumulative_histogram)); black_entropy=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*black_entropy)); white_entropy=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*white_entropy)); if ((cumulative_histogram == (double *) NULL) || (black_entropy == (double *) NULL) || (white_entropy == (double *) NULL)) { if (white_entropy != (double *) NULL) white_entropy=(double *) RelinquishMagickMemory(white_entropy); if (black_entropy != (double *) NULL) black_entropy=(double *) RelinquishMagickMemory(black_entropy); if (cumulative_histogram != (double *) NULL) cumulative_histogram=(double *) RelinquishMagickMemory(cumulative_histogram); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(-1.0); } /* Entropy for black and white parts of the histogram. */ cumulative_histogram[0]=histogram[0]; for (i=1; i <= MaxIntensity; i++) cumulative_histogram[i]=cumulative_histogram[i-1]+histogram[i]; epsilon=MagickMinimumValue; for (j=0; j <= MaxIntensity; j++) { /* Black entropy. */ black_entropy[j]=0.0; if (cumulative_histogram[j] > epsilon) { entropy=0.0; for (i=0; i <= j; i++) if (histogram[i] > epsilon) entropy-=histogram[i]/cumulative_histogram[j]* log(histogram[i]/cumulative_histogram[j]); black_entropy[j]=entropy; } /* White entropy. */ white_entropy[j]=0.0; if ((1.0-cumulative_histogram[j]) > epsilon) { entropy=0.0; for (i=j+1; i <= MaxIntensity; i++) if (histogram[i] > epsilon) entropy-=histogram[i]/(1.0-cumulative_histogram[j])* log(histogram[i]/(1.0-cumulative_histogram[j])); white_entropy[j]=entropy; } } /* Find histogram bin with maximum entropy. */ maximum_entropy=black_entropy[0]+white_entropy[0]; threshold=0; for (j=1; j <= MaxIntensity; j++) if ((black_entropy[j]+white_entropy[j]) > maximum_entropy) { maximum_entropy=black_entropy[j]+white_entropy[j]; threshold=(size_t) j; } /* Free resources. */ white_entropy=(double *) RelinquishMagickMemory(white_entropy); black_entropy=(double *) RelinquishMagickMemory(black_entropy); cumulative_histogram=(double *) RelinquishMagickMemory(cumulative_histogram); return(100.0*threshold/MaxIntensity); } static double OTSUThreshold(const Image *image,const double *histogram, ExceptionInfo *exception) { double max_sigma, *myu, *omega, *probability, *sigma, threshold; register ssize_t i; /* Compute optimal threshold from maximization of inter-class variance. */ myu=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*myu)); omega=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*omega)); probability=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*probability)); sigma=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*sigma)); if ((myu == (double *) NULL) || (omega == (double *) NULL) || (probability == (double *) NULL) || (sigma == (double *) NULL)) { if (sigma != (double *) NULL) sigma=(double *) RelinquishMagickMemory(sigma); if (probability != (double *) NULL) probability=(double *) RelinquishMagickMemory(probability); if (omega != (double *) NULL) omega=(double *) RelinquishMagickMemory(omega); if (myu != (double *) NULL) myu=(double *) RelinquishMagickMemory(myu); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(-1.0); } /* Calculate probability density. */ for (i=0; i <= (ssize_t) MaxIntensity; i++) probability[i]=histogram[i]; /* Generate probability of graylevels and mean value for separation. */ omega[0]=probability[0]; myu[0]=0.0; for (i=1; i <= (ssize_t) MaxIntensity; i++) { omega[i]=omega[i-1]+probability[i]; myu[i]=myu[i-1]+i*probability[i]; } /* Sigma maximization: inter-class variance and compute optimal threshold. */ threshold=0; max_sigma=0.0; for (i=0; i < (ssize_t) MaxIntensity; i++) { sigma[i]=0.0; if ((omega[i] != 0.0) && (omega[i] != 1.0)) sigma[i]=pow(myu[MaxIntensity]*omega[i]-myu[i],2.0)/(omega[i]*(1.0- omega[i])); if (sigma[i] > max_sigma) { max_sigma=sigma[i]; threshold=(double) i; } } /* Free resources. */ myu=(double *) RelinquishMagickMemory(myu); omega=(double *) RelinquishMagickMemory(omega); probability=(double *) RelinquishMagickMemory(probability); sigma=(double *) RelinquishMagickMemory(sigma); return(100.0*threshold/MaxIntensity); } static double TriangleThreshold(const double *histogram) { double a, b, c, count, distance, inverse_ratio, max_distance, segment, x1, x2, y1, y2; register ssize_t i; ssize_t end, max, start, threshold; /* Compute optimal threshold with triangle algorithm. */ start=0; /* find start bin, first bin not zero count */ for (i=0; i <= (ssize_t) MaxIntensity; i++) if (histogram[i] > 0.0) { start=i; break; } end=0; /* find end bin, last bin not zero count */ for (i=(ssize_t) MaxIntensity; i >= 0; i--) if (histogram[i] > 0.0) { end=i; break; } max=0; /* find max bin, bin with largest count */ count=0.0; for (i=0; i <= (ssize_t) MaxIntensity; i++) if (histogram[i] > count) { max=i; count=histogram[i]; } /* Compute threshold at split point. */ x1=(double) max; y1=histogram[max]; x2=(double) end; if ((max-start) >= (end-max)) x2=(double) start; y2=0.0; a=y1-y2; b=x2-x1; c=(-1.0)*(a*x1+b*y1); inverse_ratio=1.0/sqrt(a*a+b*b+c*c); threshold=0; max_distance=0.0; if (x2 == (double) start) for (i=start; i < max; i++) { segment=inverse_ratio*(a*i+b*histogram[i]+c); distance=sqrt(segment*segment); if ((distance > max_distance) && (segment > 0.0)) { threshold=i; max_distance=distance; } } else for (i=end; i > max; i--) { segment=inverse_ratio*(a*i+b*histogram[i]+c); distance=sqrt(segment*segment); if ((distance > max_distance) && (segment < 0.0)) { threshold=i; max_distance=distance; } } return(100.0*threshold/MaxIntensity); } MagickExport MagickBooleanType AutoThresholdImage(Image *image, const AutoThresholdMethod method,ExceptionInfo *exception) { CacheView *image_view; char property[MagickPathExtent]; double gamma, *histogram, sum, threshold; MagickBooleanType status; register ssize_t i; ssize_t y; /* Form histogram. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); histogram=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*histogram)); if (histogram == (double *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=MagickTrue; (void) memset(histogram,0,(MaxIntensity+1UL)*sizeof(*histogram)); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { double intensity = GetPixelIntensity(image,p); histogram[ScaleQuantumToChar(ClampToQuantum(intensity))]++; p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); /* Normalize histogram. */ sum=0.0; for (i=0; i <= (ssize_t) MaxIntensity; i++) sum+=histogram[i]; gamma=PerceptibleReciprocal(sum); for (i=0; i <= (ssize_t) MaxIntensity; i++) histogram[i]=gamma*histogram[i]; /* Discover threshold from histogram. */ switch (method) { case KapurThresholdMethod: { threshold=KapurThreshold(image,histogram,exception); break; } case OTSUThresholdMethod: default: { threshold=OTSUThreshold(image,histogram,exception); break; } case TriangleThresholdMethod: { threshold=TriangleThreshold(histogram); break; } } histogram=(double *) RelinquishMagickMemory(histogram); if (threshold < 0.0) status=MagickFalse; if (status == MagickFalse) return(MagickFalse); /* Threshold image. */ (void) FormatLocaleString(property,MagickPathExtent,"%g%%",threshold); (void) SetImageProperty(image,"auto-threshold:threshold",property,exception); return(BilevelImage(image,QuantumRange*threshold/100.0,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B i l e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BilevelImage() changes the value of individual pixels based on the % intensity of each pixel channel. The result is a high-contrast image. % % More precisely each channel value of the image is 'thresholded' so that if % it is equal to or less than the given value it is set to zero, while any % value greater than that give is set to it maximum or QuantumRange. % % This function is what is used to implement the "-threshold" operator for % the command line API. % % If the default channel setting is given the image is thresholded using just % the gray 'intensity' of the image, rather than the individual channels. % % The format of the BilevelImage method is: % % MagickBooleanType BilevelImage(Image *image,const double threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: define the threshold values. % % o exception: return any errors or warnings in this structure. % % Aside: You can get the same results as operator using LevelImages() % with the 'threshold' value for both the black_point and the white_point. % */ MagickExport MagickBooleanType BilevelImage(Image *image,const double threshold, ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace,exception); /* Bilevel threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double pixel; register ssize_t i; pixel=GetPixelIntensity(image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (image->channel_mask != DefaultChannels) pixel=(double) q[i]; q[i]=(Quantum) (pixel <= threshold ? 0 : QuantumRange); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B l a c k T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BlackThresholdImage() is like ThresholdImage() but forces all pixels below % the threshold into black while leaving all pixels at or above the threshold % unchanged. % % The format of the BlackThresholdImage method is: % % MagickBooleanType BlackThresholdImage(Image *image, % const char *threshold,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: define the threshold value. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType BlackThresholdImage(Image *image, const char *thresholds,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType progress; PixelInfo threshold; MagickStatusType flags; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (thresholds == (const char *) NULL) return(MagickTrue); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace,exception); GetPixelInfo(image,&threshold); flags=ParseGeometry(thresholds,&geometry_info); threshold.red=geometry_info.rho; threshold.green=geometry_info.rho; threshold.blue=geometry_info.rho; threshold.black=geometry_info.rho; threshold.alpha=100.0; if ((flags & SigmaValue) != 0) threshold.green=geometry_info.sigma; if ((flags & XiValue) != 0) threshold.blue=geometry_info.xi; if ((flags & PsiValue) != 0) threshold.alpha=geometry_info.psi; if (threshold.colorspace == CMYKColorspace) { if ((flags & PsiValue) != 0) threshold.black=geometry_info.psi; if ((flags & ChiValue) != 0) threshold.alpha=geometry_info.chi; } if ((flags & PercentValue) != 0) { threshold.red*=(MagickRealType) (QuantumRange/100.0); threshold.green*=(MagickRealType) (QuantumRange/100.0); threshold.blue*=(MagickRealType) (QuantumRange/100.0); threshold.black*=(MagickRealType) (QuantumRange/100.0); threshold.alpha*=(MagickRealType) (QuantumRange/100.0); } /* White threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double pixel; register ssize_t i; pixel=GetPixelIntensity(image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (image->channel_mask != DefaultChannels) pixel=(double) q[i]; if (pixel < GetPixelInfoChannel(&threshold,channel)) q[i]=(Quantum) 0; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ThresholdImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l a m p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClampImage() set each pixel whose value is below zero to zero and any the % pixel whose value is above the quantum range to the quantum range (e.g. % 65535) otherwise the pixel value remains unchanged. % % The format of the ClampImage method is: % % MagickBooleanType ClampImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ClampImage(Image *image,ExceptionInfo *exception) { #define ClampImageTag "Clamp/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { register ssize_t i; register PixelInfo *magick_restrict q; q=image->colormap; for (i=0; i < (ssize_t) image->colors; i++) { q->red=(double) ClampPixel(q->red); q->green=(double) ClampPixel(q->green); q->blue=(double) ClampPixel(q->blue); q->alpha=(double) ClampPixel(q->alpha); q++; } return(SyncImage(image,exception)); } /* Clamp image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampPixel((MagickRealType) q[i]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ClampImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y T h r e s h o l d M a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyThresholdMap() de-allocate the given ThresholdMap % % The format of the ListThresholdMaps method is: % % ThresholdMap *DestroyThresholdMap(Threshold *map) % % A description of each parameter follows. % % o map: Pointer to the Threshold map to destroy % */ MagickExport ThresholdMap *DestroyThresholdMap(ThresholdMap *map) { assert(map != (ThresholdMap *) NULL); if (map->map_id != (char *) NULL) map->map_id=DestroyString(map->map_id); if (map->description != (char *) NULL) map->description=DestroyString(map->description); if (map->levels != (ssize_t *) NULL) map->levels=(ssize_t *) RelinquishMagickMemory(map->levels); map=(ThresholdMap *) RelinquishMagickMemory(map); return(map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t T h r e s h o l d M a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetThresholdMap() loads and searches one or more threshold map files for the % map matching the given name or alias. % % The format of the GetThresholdMap method is: % % ThresholdMap *GetThresholdMap(const char *map_id, % ExceptionInfo *exception) % % A description of each parameter follows. % % o map_id: ID of the map to look for. % % o exception: return any errors or warnings in this structure. % */ MagickExport ThresholdMap *GetThresholdMap(const char *map_id, ExceptionInfo *exception) { ThresholdMap *map; map=GetThresholdMapFile(MinimalThresholdMap,"built-in",map_id,exception); if (map != (ThresholdMap *) NULL) return(map); #if !defined(MAGICKCORE_ZERO_CONFIGURATION_SUPPORT) { const StringInfo *option; LinkedListInfo *options; options=GetConfigureOptions(ThresholdsFilename,exception); option=(const StringInfo *) GetNextValueInLinkedList(options); while (option != (const StringInfo *) NULL) { map=GetThresholdMapFile((const char *) GetStringInfoDatum(option), GetStringInfoPath(option),map_id,exception); if (map != (ThresholdMap *) NULL) break; option=(const StringInfo *) GetNextValueInLinkedList(options); } options=DestroyConfigureOptions(options); } #endif return(map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t T h r e s h o l d M a p F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetThresholdMapFile() look for a given threshold map name or alias in the % given XML file data, and return the allocated the map when found. % % The format of the ListThresholdMaps method is: % % ThresholdMap *GetThresholdMap(const char *xml,const char *filename, % const char *map_id,ExceptionInfo *exception) % % A description of each parameter follows. % % o xml: The threshold map list in XML format. % % o filename: The threshold map XML filename. % % o map_id: ID of the map to look for in XML list. % % o exception: return any errors or warnings in this structure. % */ static ThresholdMap *GetThresholdMapFile(const char *xml,const char *filename, const char *map_id,ExceptionInfo *exception) { char *p; const char *attribute, *content; double value; register ssize_t i; ThresholdMap *map; XMLTreeInfo *description, *levels, *threshold, *thresholds; (void) LogMagickEvent(ConfigureEvent,GetMagickModule(), "Loading threshold map file \"%s\" ...",filename); map=(ThresholdMap *) NULL; thresholds=NewXMLTree(xml,exception); if (thresholds == (XMLTreeInfo *) NULL) return(map); for (threshold=GetXMLTreeChild(thresholds,"threshold"); threshold != (XMLTreeInfo *) NULL; threshold=GetNextXMLTreeTag(threshold)) { attribute=GetXMLTreeAttribute(threshold,"map"); if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0)) break; attribute=GetXMLTreeAttribute(threshold,"alias"); if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0)) break; } if (threshold == (XMLTreeInfo *) NULL) { thresholds=DestroyXMLTree(thresholds); return(map); } description=GetXMLTreeChild(threshold,"description"); if (description == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement", "<description>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); return(map); } levels=GetXMLTreeChild(threshold,"levels"); if (levels == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement", "<levels>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); return(map); } map=(ThresholdMap *) AcquireCriticalMemory(sizeof(*map)); map->map_id=(char *) NULL; map->description=(char *) NULL; map->levels=(ssize_t *) NULL; attribute=GetXMLTreeAttribute(threshold,"map"); if (attribute != (char *) NULL) map->map_id=ConstantString(attribute); content=GetXMLTreeContent(description); if (content != (char *) NULL) map->description=ConstantString(content); attribute=GetXMLTreeAttribute(levels,"width"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<levels width>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->width=StringToUnsignedLong(attribute); if (map->width == 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute", "<levels width>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } attribute=GetXMLTreeAttribute(levels,"height"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<levels height>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->height=StringToUnsignedLong(attribute); if (map->height == 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute", "<levels height>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } attribute=GetXMLTreeAttribute(levels,"divisor"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<levels divisor>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->divisor=(ssize_t) StringToLong(attribute); if (map->divisor < 2) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute", "<levels divisor>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } content=GetXMLTreeContent(levels); if (content == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingContent", "<levels>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->levels=(ssize_t *) AcquireQuantumMemory((size_t) map->width,map->height* sizeof(*map->levels)); if (map->levels == (ssize_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"UnableToAcquireThresholdMap"); for (i=0; i < (ssize_t) (map->width*map->height); i++) { map->levels[i]=(ssize_t) strtol(content,&p,10); if (p == content) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent", "<level> too few values, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } if ((map->levels[i] < 0) || (map->levels[i] > map->divisor)) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent", "<level> %.20g out of range, map \"%s\"", (double) map->levels[i],map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } content=p; } value=(double) strtol(content,&p,10); (void) value; if (p != content) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent", "<level> too many values, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } thresholds=DestroyXMLTree(thresholds); return(map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + L i s t T h r e s h o l d M a p F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ListThresholdMapFile() lists the threshold maps and their descriptions % in the given XML file data. % % The format of the ListThresholdMaps method is: % % MagickBooleanType ListThresholdMaps(FILE *file,const char*xml, % const char *filename,ExceptionInfo *exception) % % A description of each parameter follows. % % o file: An pointer to the output FILE. % % o xml: The threshold map list in XML format. % % o filename: The threshold map XML filename. % % o exception: return any errors or warnings in this structure. % */ MagickBooleanType ListThresholdMapFile(FILE *file,const char *xml, const char *filename,ExceptionInfo *exception) { const char *alias, *content, *map; XMLTreeInfo *description, *threshold, *thresholds; assert( xml != (char *) NULL ); assert( file != (FILE *) NULL ); (void) LogMagickEvent(ConfigureEvent,GetMagickModule(), "Loading threshold map file \"%s\" ...",filename); thresholds=NewXMLTree(xml,exception); if ( thresholds == (XMLTreeInfo *) NULL ) return(MagickFalse); (void) FormatLocaleFile(file,"%-16s %-12s %s\n","Map","Alias","Description"); (void) FormatLocaleFile(file, "----------------------------------------------------\n"); threshold=GetXMLTreeChild(thresholds,"threshold"); for ( ; threshold != (XMLTreeInfo *) NULL; threshold=GetNextXMLTreeTag(threshold)) { map=GetXMLTreeAttribute(threshold,"map"); if (map == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<map>"); thresholds=DestroyXMLTree(thresholds); return(MagickFalse); } alias=GetXMLTreeAttribute(threshold,"alias"); description=GetXMLTreeChild(threshold,"description"); if (description == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement", "<description>, map \"%s\"",map); thresholds=DestroyXMLTree(thresholds); return(MagickFalse); } content=GetXMLTreeContent(description); if (content == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingContent", "<description>, map \"%s\"", map); thresholds=DestroyXMLTree(thresholds); return(MagickFalse); } (void) FormatLocaleFile(file,"%-16s %-12s %s\n",map,alias ? alias : "", content); } thresholds=DestroyXMLTree(thresholds); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L i s t T h r e s h o l d M a p s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ListThresholdMaps() lists the threshold maps and their descriptions % as defined by "threshold.xml" to a file. % % The format of the ListThresholdMaps method is: % % MagickBooleanType ListThresholdMaps(FILE *file,ExceptionInfo *exception) % % A description of each parameter follows. % % o file: An pointer to the output FILE. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ListThresholdMaps(FILE *file, ExceptionInfo *exception) { const StringInfo *option; LinkedListInfo *options; MagickStatusType status; status=MagickTrue; if (file == (FILE *) NULL) file=stdout; options=GetConfigureOptions(ThresholdsFilename,exception); (void) FormatLocaleFile(file, "\n Threshold Maps for Ordered Dither Operations\n"); option=(const StringInfo *) GetNextValueInLinkedList(options); while (option != (const StringInfo *) NULL) { (void) FormatLocaleFile(file,"\nPath: %s\n\n",GetStringInfoPath(option)); status&=ListThresholdMapFile(file,(const char *) GetStringInfoDatum(option), GetStringInfoPath(option),exception); option=(const StringInfo *) GetNextValueInLinkedList(options); } options=DestroyConfigureOptions(options); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O r d e r e d D i t h e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OrderedDitherImage() will perform a ordered dither based on a number % of pre-defined dithering threshold maps, but over multiple intensity % levels, which can be different for different channels, according to the % input argument. % % The format of the OrderedDitherImage method is: % % MagickBooleanType OrderedDitherImage(Image *image, % const char *threshold_map,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold_map: A string containing the name of the threshold dither % map to use, followed by zero or more numbers representing the number % of color levels tho dither between. % % Any level number less than 2 will be equivalent to 2, and means only % binary dithering will be applied to each color channel. % % No numbers also means a 2 level (bitmap) dither will be applied to all % channels, while a single number is the number of levels applied to each % channel in sequence. More numbers will be applied in turn to each of % the color channels. % % For example: "o3x3,6" will generate a 6 level posterization of the % image with a ordered 3x3 diffused pixel dither being applied between % each level. While checker,8,8,4 will produce a 332 colormaped image % with only a single checkerboard hash pattern (50% grey) between each % color level, to basically double the number of color levels with % a bare minimim of dithering. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType OrderedDitherImage(Image *image, const char *threshold_map,ExceptionInfo *exception) { #define DitherImageTag "Dither/Image" CacheView *image_view; char token[MagickPathExtent]; const char *p; double levels[CompositePixelChannel]; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; ThresholdMap *map; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (threshold_map == (const char *) NULL) return(MagickTrue); p=(char *) threshold_map; while (((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) && (*p != '\0')) p++; threshold_map=p; while (((isspace((int) ((unsigned char) *p)) == 0) && (*p != ',')) && (*p != '\0')) { if ((p-threshold_map) >= (MagickPathExtent-1)) break; token[p-threshold_map]=(*p); p++; } token[p-threshold_map]='\0'; map=GetThresholdMap(token,exception); if (map == (ThresholdMap *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : '%s'","ordered-dither",threshold_map); return(MagickFalse); } for (i=0; i < MaxPixelChannels; i++) levels[i]=2.0; p=strchr((char *) threshold_map,','); if ((p != (char *) NULL) && (isdigit((int) ((unsigned char) *(++p))) != 0)) { GetNextToken(p,&p,MagickPathExtent,token); for (i=0; (i < MaxPixelChannels); i++) levels[i]=StringToDouble(token,(char **) NULL); for (i=0; (*p != '\0') && (i < MaxPixelChannels); i++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); levels[i]=StringToDouble(token,(char **) NULL); } } for (i=0; i < MaxPixelChannels; i++) if (fabs(levels[i]) >= 1) levels[i]-=1.0; if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; ssize_t n; n=0; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { ssize_t level, threshold; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (fabs(levels[n]) < MagickEpsilon) { n++; continue; } threshold=(ssize_t) (QuantumScale*q[i]*(levels[n]*(map->divisor-1)+1)); level=threshold/(map->divisor-1); threshold-=level*(map->divisor-1); q[i]=ClampToQuantum((double) (level+(threshold >= map->levels[(x % map->width)+map->width*(y % map->height)]))* QuantumRange/levels[n]); n++; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,DitherImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); map=DestroyThresholdMap(map); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P e r c e p t i b l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PerceptibleImage() set each pixel whose value is less than |epsilon| to % epsilon or -epsilon (whichever is closer) otherwise the pixel value remains % unchanged. % % The format of the PerceptibleImage method is: % % MagickBooleanType PerceptibleImage(Image *image,const double epsilon, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o epsilon: the epsilon threshold (e.g. 1.0e-9). % % o exception: return any errors or warnings in this structure. % */ static inline Quantum PerceptibleThreshold(const Quantum quantum, const double epsilon) { double sign; sign=(double) quantum < 0.0 ? -1.0 : 1.0; if ((sign*quantum) >= epsilon) return(quantum); return((Quantum) (sign*epsilon)); } MagickExport MagickBooleanType PerceptibleImage(Image *image, const double epsilon,ExceptionInfo *exception) { #define PerceptibleImageTag "Perceptible/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { register ssize_t i; register PixelInfo *magick_restrict q; q=image->colormap; for (i=0; i < (ssize_t) image->colors; i++) { q->red=(double) PerceptibleThreshold(ClampToQuantum(q->red), epsilon); q->green=(double) PerceptibleThreshold(ClampToQuantum(q->green), epsilon); q->blue=(double) PerceptibleThreshold(ClampToQuantum(q->blue), epsilon); q->alpha=(double) PerceptibleThreshold(ClampToQuantum(q->alpha), epsilon); q++; } return(SyncImage(image,exception)); } /* Perceptible image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; q[i]=PerceptibleThreshold(q[i],epsilon); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,PerceptibleImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R a n d o m T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RandomThresholdImage() changes the value of individual pixels based on the % intensity of each pixel compared to a random threshold. The result is a % low-contrast, two color image. % % The format of the RandomThresholdImage method is: % % MagickBooleanType RandomThresholdImage(Image *image, % const char *thresholds,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o low,high: Specify the high and low thresholds. These values range from % 0 to QuantumRange. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RandomThresholdImage(Image *image, const double min_threshold, const double max_threshold,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo threshold; RandomInfo **magick_restrict random_info; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); GetPixelInfo(image,&threshold); /* Random threshold image. */ status=MagickTrue; progress=0; random_info=AcquireRandomInfoThreadSet(); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double threshold; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if ((double) q[i] < min_threshold) threshold=min_threshold; else if ((double) q[i] > max_threshold) threshold=max_threshold; else threshold=(double) (QuantumRange* GetPseudoRandomValue(random_info[id])); q[i]=(double) q[i] <= threshold ? 0 : QuantumRange; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ThresholdImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R a n g e T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RangeThresholdImage() applies soft and hard thresholding. % % The format of the RangeThresholdImage method is: % % MagickBooleanType RangeThresholdImage(Image *image, % const double low_black,const double low_white,const double high_white, % const double high_black,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o low_black: Define the minimum black threshold value. % % o low_white: Define the minimum white threshold value. % % o high_white: Define the maximum white threshold value. % % o high_black: Define the maximum black threshold value. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RangeThresholdImage(Image *image, const double low_black,const double low_white,const double high_white, const double high_black,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) TransformImageColorspace(image,sRGBColorspace,exception); /* Range threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double pixel; register ssize_t i; pixel=GetPixelIntensity(image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (image->channel_mask != DefaultChannels) pixel=(double) q[i]; if (pixel < low_black) q[i]=0; else if ((pixel >= low_black) && (pixel < low_white)) q[i]=ClampToQuantum(QuantumRange* PerceptibleReciprocal(low_white-low_black)*(pixel-low_black)); else if ((pixel >= low_white) && (pixel <= high_white)) q[i]=QuantumRange; else if ((pixel > high_white) && (pixel <= high_black)) q[i]=ClampToQuantum(QuantumRange*PerceptibleReciprocal( high_black-high_white)*(high_black-pixel)); else if (pixel > high_black) q[i]=0; else q[i]=0; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ThresholdImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W h i t e T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WhiteThresholdImage() is like ThresholdImage() but forces all pixels above % the threshold into white while leaving all pixels at or below the threshold % unchanged. % % The format of the WhiteThresholdImage method is: % % MagickBooleanType WhiteThresholdImage(Image *image, % const char *threshold,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: Define the threshold value. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType WhiteThresholdImage(Image *image, const char *thresholds,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType progress; PixelInfo threshold; MagickStatusType flags; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (thresholds == (const char *) NULL) return(MagickTrue); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) TransformImageColorspace(image,sRGBColorspace,exception); GetPixelInfo(image,&threshold); flags=ParseGeometry(thresholds,&geometry_info); threshold.red=geometry_info.rho; threshold.green=geometry_info.rho; threshold.blue=geometry_info.rho; threshold.black=geometry_info.rho; threshold.alpha=100.0; if ((flags & SigmaValue) != 0) threshold.green=geometry_info.sigma; if ((flags & XiValue) != 0) threshold.blue=geometry_info.xi; if ((flags & PsiValue) != 0) threshold.alpha=geometry_info.psi; if (threshold.colorspace == CMYKColorspace) { if ((flags & PsiValue) != 0) threshold.black=geometry_info.psi; if ((flags & ChiValue) != 0) threshold.alpha=geometry_info.chi; } if ((flags & PercentValue) != 0) { threshold.red*=(MagickRealType) (QuantumRange/100.0); threshold.green*=(MagickRealType) (QuantumRange/100.0); threshold.blue*=(MagickRealType) (QuantumRange/100.0); threshold.black*=(MagickRealType) (QuantumRange/100.0); threshold.alpha*=(MagickRealType) (QuantumRange/100.0); } /* White threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double pixel; register ssize_t i; pixel=GetPixelIntensity(image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (image->channel_mask != DefaultChannels) pixel=(double) q[i]; if (pixel > GetPixelInfoChannel(&threshold,channel)) q[i]=QuantumRange; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ThresholdImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); }
GB_binop__isgt_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isgt_int16) // A.*B function (eWiseMult): GB (_AemultB_01__isgt_int16) // A.*B function (eWiseMult): GB (_AemultB_02__isgt_int16) // A.*B function (eWiseMult): GB (_AemultB_03__isgt_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isgt_int16) // A*D function (colscale): GB (_AxD__isgt_int16) // D*A function (rowscale): GB (_DxB__isgt_int16) // C+=B function (dense accum): GB (_Cdense_accumB__isgt_int16) // C+=b function (dense accum): GB (_Cdense_accumb__isgt_int16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isgt_int16) // C=scalar+B GB (_bind1st__isgt_int16) // C=scalar+B' GB (_bind1st_tran__isgt_int16) // C=A+scalar GB (_bind2nd__isgt_int16) // C=A'+scalar GB (_bind2nd_tran__isgt_int16) // C type: int16_t // A type: int16_t // B,b type: int16_t // BinaryOp: cij = (aij > bij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int16_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int16_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x > y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISGT || GxB_NO_INT16 || GxB_NO_ISGT_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__isgt_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isgt_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isgt_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isgt_int16) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isgt_int16) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isgt_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__isgt_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isgt_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__isgt_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isgt_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isgt_int16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = GBX (Bx, p, false) ; Cx [p] = (x > bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isgt_int16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = GBX (Ax, p, false) ; Cx [p] = (aij > y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x > aij) ; \ } GrB_Info GB (_bind1st_tran__isgt_int16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij > y) ; \ } GrB_Info GB (_bind2nd_tran__isgt_int16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
pdplgsy.c
/** * * @file pdplgsy.c * * PLASMA auxiliary routines * PLASMA is a software package provided by Univ. of Tennessee, * Univ. of California Berkeley and Univ. of Colorado Denver * * @version 2.6.0 * @author Mathieu Faverge * @date 2010-11-15 * @generated d Tue Jan 7 11:45:12 2014 * **/ #include "common.h" #if defined(USE_OMPEXT) #include <omp_ext.h> #endif #define A(m,n) BLKADDR(A, double, m, n) /***************************************************************************//** * Parallel tile Cholesky factorization - dynamic scheduling **/ void plasma_pdplgsy_quark( double bump, PLASMA_desc A, unsigned long long int seed) { int m, n; int ldam; int tempmm, tempnn; for (m = 0; m < A.mt; m++) { tempmm = m == A.mt-1 ? A.m-m*A.mb : A.mb; ldam = BLKLDD(A, m); for (n = 0; n < A.nt; n++) { tempnn = n == A.nt-1 ? A.n-n*A.nb : A.nb; double *dA = A(m, n); #if defined(USE_OMPEXT) omp_set_task_affinity( (n%4)*6+(m%6) ); #endif #pragma omp task depend(out:dA[0:ldam*tempnn]) CORE_dplgsy( bump, tempmm, tempnn, dA, ldam, A.m, m*A.mb, n*A.nb, seed ); } } }
matrix_test.c
// Uses OpenMP for parallelization and enables vectorization through use // of GCC ivdep pragma // Based on the approach in Ulrich Drepper's What Every Programmer Should // Know About Memory: // https://people.freebsd.org/~lstewart/articles/cpumemory.pdf #include <errno.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #include <unistd.h> #include <emmintrin.h> // Print message describing the error and die #define handle_error(msg) \ do { \ fprintf(stderr, "%s: %s (%s:%d)\n", (msg), strerror(errno), __FILE__, \ __LINE__); \ exit(EXIT_FAILURE); \ } while (0) // Convenience macro for indexing matrix #define IDX(N, r, c) ((N) * (r) + (c)) // Cache line size long g_linesize; // Used for tic() and toc() struct timeval g_t1, g_t2; // Simple timer function. tic() records the current time, and toc() returns // the elapsed time since the last tic() void tic() { gettimeofday(&g_t1, NULL); } // Return time since last invocation of tic() in milliseconds double toc() { double ret; gettimeofday(&g_t2, NULL); ret = (g_t2.tv_sec - g_t1.tv_sec) * 1000.0; ret += (g_t2.tv_usec - g_t1.tv_usec) / 1000.0; return ret; } // Generate random double in range [min, max] double rand_double(double min, double max) { return min + (max - min) * ((double)rand() / (double)RAND_MAX); } // Simple struct to hold a square matrix struct matrix { double *data; size_t N; }; void matrix_init(struct matrix *m, size_t N) { m->N = N; if ((m->data = malloc(N * N * sizeof(*m->data))) == NULL) { handle_error("malloc"); } } // Set every element of m to a random value in the range [-1,1] void matrix_randomize(struct matrix *m) { size_t N = m->N; for (size_t i = 0; i < N * N; ++i) { m->data[i] = rand_double(-1.0, 1.0); } } // Zero-out matrix m void matrix_zero(struct matrix *m) { size_t N = m->N; #pragma omp parallel for for (size_t i = 0; i < N * N; ++i) { m->data[i] = 0.0; } } // Naive implementation of matrix multiplication // Computes a*b and stores result in res // a, b, and res must all have the same dimension void matrix_mult_naive(struct matrix *a, struct matrix *b, struct matrix *res) { size_t N = a->N; matrix_zero(res); #pragma omp parallel for for (size_t i = 0; i < N; ++i) for (size_t j = 0; j < N; ++j) for (size_t k = 0; k < N; ++k) res->data[IDX(N, i, j)] += a->data[IDX(N, i, k)] * b->data[IDX(N, k, j)]; } // Cache-friendly implementation of matrix multiplication // Computes a*b and stores result in res // a, b, and res must all have the same dimension, and the dimension must // be a power of 2 void matrix_mult_fast(struct matrix *a, struct matrix *b, struct matrix *res) { size_t N = a->N; matrix_zero(res); size_t SM = 4 * g_linesize / sizeof(double); size_t i, j, k, i2, j2, k2; double *ra, *rb, *rres; #pragma omp parallel for for (i = 0; i < N; i += SM) { for (j = 0; j < N; j += SM) { for (k = 0; k < N; k += SM) { for (i2 = 0, rres = &res->data[IDX(N, i, j)], ra = &a->data[IDX(N, i, k)]; i2 < SM; ++i2, rres += N, ra += N) { for (k2 = 0, rb = &b->data[IDX(N, k, j)]; k2 < SM; ++k2, rb += N) { #pragma GCC ivdep for (j2 = 0; j2 < SM; ++j2) { rres[j2] += ra[k2] * rb[j2]; } } } } } } } // Return 1 if a == b, 0 otherwise int matrix_is_equal(struct matrix *a, struct matrix *b) { size_t N = a->N; if (b->N != N) { return 0; } for (size_t i = 0; i < N * N; ++i) { if (a->data[i] != b->data[i]) return 0; } return 1; } int main(int argc, char *argv[]) { if (argc != 2) { printf("Usage: %s dim\n", argv[0]); printf("DIM must be a power of 2\n"); exit(EXIT_FAILURE); } errno = 0; size_t N = strtoul(argv[1], NULL, 0); if (errno) { handle_error("atoul"); } if (N < 2 || (N & (N - 1))) { printf("DIM must be >= 2 and be a power of 2\n"); exit(EXIT_FAILURE); } printf("Matrix dimension N=%lu\n", N); //if ((g_linesize = sysconf(_SC_LEVEL1_DCACHE_LINESIZE)) == -1) { // handle_error("sysconf"); //} g_linesize = 64; printf("Cache line size: %ld\n", g_linesize); struct matrix a, b, res_naive, res_fast; printf("Preparing matrices ...\n"); tic(); matrix_init(&a, N); matrix_randomize(&a); matrix_init(&b, N); matrix_randomize(&b); matrix_init(&res_naive, N); matrix_init(&res_fast, N); printf("Done. %.3lf ms\n", toc()); printf("Performing naive multiplication ...\n"); tic(); matrix_mult_naive(&a, &b, &res_naive); printf("Done. %.3lf ms\n", toc()); printf("Performing fast multiplication ...\n"); tic(); matrix_mult_fast(&a, &b, &res_fast); printf("Done. %.3lf ms\n", toc()); printf("Verifying ...\n"); tic(); printf("%s", matrix_is_equal(&res_naive, &res_fast) ? "PASSED" : "FAILED"); printf(" %lf ms\n", toc()); return 0; }
life.c
#include <stdio.h> /* Standard I/O Library: printf */ #include <stdlib.h> /* Standard Library: malloc, calloc, free, ralloc */ #define MOD(a,b) ((((a)%(b))+(b))%(b)) #ifndef Generations #define Generations 50 #endif #ifndef N #define N 20 #endif #define RowDim N #define ColDim N struct Neighborhood { bool up; bool center; bool down; bool left; bool left_up; bool left_down; bool right; bool right_up; bool right_down; }; void initialize(bool ** matrix){ for (int i=0; i<RowDim; ++i){ for (int j=0; j<ColDim; ++j) matrix[i][j] = 0; } matrix[10][10] = 1; matrix[10][11] = 1; matrix[11][11] = 1; matrix[10][12] = 1; } struct Neighborhood neighborhood(bool ** matrix, int row, int col){ struct Neighborhood nbhd; nbhd.up = matrix[ MOD(row - 1,RowDim) ][ col ]; nbhd.down = matrix[ MOD(row + 1,RowDim) ][ col ]; nbhd.center = matrix[row][col]; nbhd.left = matrix[ row ][ MOD(col - 1,ColDim) ]; nbhd.left_up = matrix[ MOD(row - 1,RowDim) ][ MOD(col - 1,ColDim) ]; nbhd.left_down = matrix[ MOD(row + 1,RowDim) ][ MOD(col - 1,ColDim) ]; nbhd.right = matrix[ row ][ MOD(col + 1,ColDim) ]; nbhd.right_up = matrix[ MOD(row - 1,RowDim) ][ MOD(col + 1,ColDim) ]; nbhd.right_down = matrix[ MOD(row + 1,RowDim) ][ MOD(col + 1,ColDim) ]; // printf("Neighborhood of %d,%d\n", row, col); // printf("%d\t%d\t%d\n%d\t%d\t%d\n%d\t%d\t%d\n", // nbhd.left_up,nbhd.up,nbhd.right_up, // nbhd.left,nbhd.center,nbhd.right, // nbhd.left_down,nbhd.down,nbhd.right_down); return nbhd; } bool function(struct Neighborhood nbhd){ int sum = nbhd.left_up + nbhd.up + nbhd.right_up + nbhd.left + nbhd.right + nbhd.left_down + nbhd.down + nbhd.right_down; int site = nbhd.center; int life = ( site == 1 ) ? ( sum == 2 || sum == 3 ) ? 1:0 : ( sum == 3 ) ? 1:0; return life; } void see(bool ** matrix){ printf("\n"); int i = 0; for (i=0; i<RowDim; ++i){ int j = 0; for (j=0; j<ColDim; ++j){ if(matrix[i][j] == 0) printf(" \t"); else printf("%d\t",matrix[i][j]); } printf("\n"); } printf("\n"); } void save(bool ** matrix, const char * name){ FILE * pf = fopen(name,"w"); for (int i=0; i<RowDim; ++i){ for (int j=0; j<ColDim; ++j) fprintf(pf, "%d\t", matrix[i][j]); fprintf(pf, "\n"); } fclose(pf); } void evolve(bool ** in,bool ** out){ for (int g = 1; g <= Generations; ++g){ #pragma omp parallel for num_threads(4) for (int i = 0; i < RowDim; ++i){ for (int j = 0; j < ColDim; ++j){ struct Neighborhood nbhd = neighborhood(in,i,j); out[i][j] = function(nbhd); } } #pragma omp parallel for num_threads(4) for (int i = 0; i < RowDim; ++i){ for (int j = 0; j < ColDim; ++j){ in[i][j] = out[i][j]; } } } } void check(bool ** in){ bool a = (in[6][11] == 1); bool b = (in[7][11] == 1); bool c = (in[8][11] == 1); bool d = (in[12][11] == 1); bool e = (in[13][11] == 1); bool f = (in[14][11] == 1); bool g = (in[10][7] == 1); bool h = (in[10][8] == 1); bool i = (in[10][9] == 1); bool j = (in[10][13] == 1); bool k = (in[10][14] == 1); bool l = (in[10][15] == 1); bool result = a && b && c && d && e && f && g && h && i && j && k && l; printf("%d \t %d \t %s \t ", RowDim , ColDim, (result==1) ? "Ok": "Fail"); } int main(int argc, char const **argv) { bool ** in = (bool **) malloc(RowDim*sizeof( bool *)); bool ** out = (bool **) malloc(RowDim*sizeof( bool *)); for (int i=0; i<RowDim; ++i){ in[i] = (bool *) malloc(ColDim*sizeof(bool)); out[i] = (bool *) malloc(ColDim*sizeof(bool)); } initialize(in); initialize(out); evolve(in,out); check(in); /* -- Releasing resources -- */ for (int i=0; i<RowDim; ++i) free(in[i]); free(in); for (int i=0; i<RowDim; ++i) free(out[i]); free(out); return EXIT_SUCCESS; }
pdf_fmt_plug.c
/* PDF cracker patch for JtR. Hacked together during Monsoon of 2012 by * Dhiru Kholia <dhiru.kholia at gmail.com> . * * This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com> * * Uses code from Sumatra PDF and MuPDF which are under GPL * * Edited by Shane Quigley 2013 */ #if FMT_EXTERNS_H extern struct fmt_main fmt_pdf; #elif FMT_REGISTERS_H john_register_one(&fmt_pdf); #else #include <string.h> #include "arch.h" #include "params.h" #include "common.h" #include "formats.h" #include "misc.h" #include "md5.h" #include "rc4.h" #include "pdfcrack_md5.h" #include "aes.h" #include "sha2.h" #include "loader.h" #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 64 #endif #endif #include "memdbg.h" #define FORMAT_LABEL "PDF" #define FORMAT_NAME "" #define FORMAT_TAG "$pdf$" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #define FORMAT_TAG_OLD "$pdf$Standard*" #define FORMAT_TAG_OLD_LEN (sizeof(FORMAT_TAG_OLD)-1) #define ALGORITHM_NAME "MD5 SHA2 RC4/AES 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1000 #define PLAINTEXT_LENGTH 32 #define BINARY_SIZE 0 #define SALT_SIZE sizeof(struct custom_salt) #define BINARY_ALIGN 1 #define SALT_ALIGN sizeof(int) #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #if defined (_OPENMP) static int omp_t = 1; #endif static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static int *cracked; static int any_cracked; static size_t cracked_size; static struct custom_salt { int V; int R; int P; char encrypt_metadata; unsigned char u[127]; unsigned char o[127]; unsigned char ue[32]; unsigned char oe[32]; unsigned char id[32]; int length; int length_id; int length_u; int length_o; int length_ue; int length_oe; } *crypt_out; static struct fmt_tests pdf_tests[] = { {"$pdf$4*4*128*-1028*1*16*e03460febe17a048b0adc7f7631bcc56*32*3456205208ad52066d5604018d498a6400000000000000000000000000000000*32*6d598152b22f8fa8085b19a866dce1317f645788a065a74831588a739a579ac4", "openwall"}, {"$pdf$2*3*128*-4*1*16*34b1b6e593787af681a9b63fa8bf563b*32*289ece9b5ce451a5d7064693dab3badf101112131415161718191a1b1c1d1e1f*32*badad1e86442699427116d3e5d5271bc80a27814fc5e80f815efeef839354c5f", "test"}, {"$pdf$4*4*128*-1028*1*16*c015cff8dbf99345ac91c84a45667784*32*0231a4c9cae29b53892874e168cfae9600000000000000000000000000000000*32*137ad7063db5114a66ce1900d47e5cab9c5d7053487d92ac978f54db86eca393", "testpassword"}, {"$pdf$5*6*256*-1028*1*16*05e5abeb21ad2e47adac1c2b2c7b7a31*127*51d3a6a09a675503383e5bc0b53da77ec5d5ea1d1998fb94e00a02a1c2e49313c177905272a4e8e68b382254ec8ed74800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000*127*dc38f01ef129aae2fca847396465ed518f9c7cf4f2c8cb4399a849d0fe9110227739ab88ddc9a6cf388ae11941270af500000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000*32*b8e137baf316e0789ffa73f888d26495c14d31f2cfff3799e339e2fa078649f5*32*835a9e07461992791914c3d62d37493e07d140937529ab43e26ac2a657152c3c", "testpassword"}, {"$pdf$5*5*256*-1028*1*16*762896ef582ca042a15f380c63ab9f2c*127*8713e2afdb65df1d3801f77a4c4da4905c49495e7103afc2deb06d9fba7949a565143288823871270d9d882075a75da600000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000*127*15d0b992974ff80529e4b616b8c4c79d787705b6c8a9e0f85446498ae2432e0027d8406b57f78b60b11341a0757d7c4a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000*32*a7a0f3891b469ba7261ce04752dad9c6de0db9c4155c4180e721938a7d9666c7*32*2fa9a0c52badebae2c19dfa7b0005a9cfc909b92babbe7db66a794e96a9f91e3", "openwall"}, /* following are old-style hashes */ {"$pdf$Standard*badad1e86442699427116d3e5d5271bc80a27814fc5e80f815efeef839354c5f*289ece9b5ce451a5d7064693dab3badf101112131415161718191a1b1c1d1e1f*16*34b1b6e593787af681a9b63fa8bf563b*1*1*0*1*4*128*-4*3*2", "test"}, {"$pdf$Standard*9a1156c38ab8177598d1608df7d7e340ae639679bd66bc4cda9bc9a4eedeb170*1f300cd939dd5cf0920c787f12d16be22205e55a5bec5c9c6d563ab4fd0770d7*16*c015cff8dbf99345ac91c84a45667784*1*1*0*1*6*40*-4*2*1", "testpassword"}, {"$pdf$Standard*7303809eaf677bdb5ca64b9d8cb0ccdd47d09a7b28ad5aa522c62685c6d9e499*bf38d7a59daaf38365a338e1fc07976102f1dfd6bdb52072032f57920109b43a*16*c56bbc4145d25b468a873618cd71c2d3*1*1*0*1*6*40*-4*2*1", "test"}, {"$pdf$Standard*137ad7063db5114a66ce1900d47e5cab9c5d7053487d92ac978f54db86eca393*0231a4c9cae29b53892874e168cfae9600000000000000000000000000000000*16*c015cff8dbf99345ac91c84a45667784*1*1*0*1*6*128*-1028*3*2", "testpassword"}, {"$pdf$Standard*d83a8ab680f144dfb2ff2334c206a6060779e007701ab881767f961aecda7984*a5ed4de7e078cb75dfdcd63e8da7a25800000000000000000000000000000000*16*06a7f710cf8dfafbd394540d40984ae2*1*1*0*1*4*128*-1028*3*2", "July2099"}, {"$pdf$Standard*6a80a547b8b8b7636fcc5b322f1c63ce4b670c9b01f2aace09e48d85e1f19f83*e64eb62fc46be66e33571d50a29b464100000000000000000000000000000000*16*14a8c53ffa4a79b3ed9421ef15618420*1*1*0*1*4*128*-1028*3*2", "38r285a9"}, {"$pdf$Standard*2446dd5ed2e18b3ce1ac9b56733226018e3f5c2639051eb1c9b2b215b30bc820*fa3af175d761963c8449ee7015b7770800000000000000000000000000000000*16*12a4da1abe6b7a1ceb84610bad87236d*1*1*0*1*4*128*-1028*3*2", "WHATwhatWHERE?"}, {"$pdf$Standard*e600ecc20288ad8b0d64a929c6a83ee2517679aa0218beceea8b7986726a8cdb*38aca54678d67c003a8193381b0fa1cc101112131415161718191a1b1c1d1e1f*16*1521fbe61419fcad51878cc5d478d5ff*1*1*0*1*4*128*-3904*3*2", ""}, /* CMIYC 2013 "pro" hashes */ {"$pdf$4*4*128*-4*1*16*f7bc2744e1652cf61ca83cac8fccb535*32*f55cc5032f04b985c5aeacde5ec4270f0122456a91bae5134273a6db134c87c4*32*785d891cdcb5efa59893c78f37e7b75acef8924951039b4fa13f62d92bb3b660", "L4sV3g4z"}, {"$pdf$4*4*128*-4*1*16*ec8ea2af2977db1faa4a955904dc956f*32*fc413edb049720b1f8eac87a358faa740122456a91bae5134273a6db134c87c4*32*1ba7aed2f19c77ac6b5061230b62e80b48fc42918f92aef689ceb07d26204991", "ZZt0pr0x"}, {"$pdf$4*4*128*-4*1*16*56761d6da774d8d47387dccf1a84428c*32*640782cab5b7c8f6cf5eab82c38016540122456a91bae5134273a6db134c87c4*32*b5720d5f3d9675a280c6bb8050cbb169e039b578b2de4a42a40dc14765e064cf", "24Le`m0ns"}, {NULL} }; static void init(struct fmt_main *self) { #if defined (_OPENMP) omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt); any_cracked = 0; cracked_size = sizeof(*cracked) * self->params.max_keys_per_crypt; cracked = mem_calloc(sizeof(*cracked), self->params.max_keys_per_crypt); } static void done(void) { MEM_FREE(cracked); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy, *keeptr; char *p; int res; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += FORMAT_TAG_LEN; if ((p = strtokm(ctcopy, "*")) == NULL) /* V */ goto err; if (!isdec(p)) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* R */ goto err; if (!isdec(p)) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* length */ goto err; if (!isdec(p)) goto err; res = atoi(p); if (res > 256) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* P */ goto err; if (!isdec_negok(p)) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* encrypt_metadata */ goto err; if ((p = strtokm(NULL, "*")) == NULL) /* length_id */ goto err; if (!isdec(p)) goto err; res = atoi(p); if (res > 32) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* id */ goto err; if (strlen(p) != res * 2) goto err; if (!ishexlc(p)) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* length_u */ goto err; if (!isdec(p)) goto err; res = atoi(p); if (res > 127) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* u */ goto err; if (strlen(p) != res * 2) goto err; if (!ishexlc(p)) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* length_o */ goto err; if (!isdec(p)) goto err; res = atoi(p); if (res > 127) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* o */ goto err; if (strlen(p) != res * 2) goto err; if (!ishexlc(p)) goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static int old_valid(char *ciphertext, struct fmt_main *self) { char *ctcopy, *ptr, *keeptr; int res; if (strncmp(ciphertext, FORMAT_TAG_OLD, FORMAT_TAG_OLD_LEN)) return 0; if (!(ctcopy = strdup(ciphertext))) return 0; keeptr = ctcopy; ctcopy += FORMAT_TAG_OLD_LEN; if (!(ptr = strtokm(ctcopy, "*"))) /* o_string */ goto error; if (!ishexlc(ptr)) goto error; if (!(ptr = strtokm(NULL, "*"))) /* u_string */ goto error; if (!ishexlc(ptr)) goto error; if (!(ptr = strtokm(NULL, "*"))) /* fileIDLen */ goto error; if (strncmp(ptr, "16", 2)) goto error; if (!(ptr = strtokm(NULL, "*"))) /* fileID */ goto error; if (!ishexlc(ptr)) goto error; if (!(ptr = strtokm(NULL, "*"))) /* encryptMetaData */ goto error; res = atoi(ptr); if (res != 0 && res != 1) goto error; if (!(ptr = strtokm(NULL, "*"))) /* work_with_user */ goto error; res = atoi(ptr); if (res != 0 && res != 1) goto error; if (!(ptr = strtokm(NULL, "*"))) /* have_userpassword */ goto error; res = atoi(ptr); if (res != 0 && res != 1) goto error; if (!(ptr = strtokm(NULL, "*"))) /* version_major */ goto error; if (!(ptr = strtokm(NULL, "*"))) /* version_minor */ goto error; if (!(ptr = strtokm(NULL, "*"))) /* length */ goto error; res = atoi(ptr); if (res < 0 || res > 256) goto error; if (!(ptr = strtokm(NULL, "*"))) /* permissions */ goto error; if (!(ptr = strtokm(NULL, "*"))) /* revision */ goto error; if (!(ptr = strtokm(NULL, "*"))) /* version */ goto error; MEM_FREE(keeptr); return 1; error: MEM_FREE(keeptr); return 0; } char * convert_old_to_new(char ciphertext[]) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; char *out = mem_alloc_tiny(strlen(ctcopy), MEM_ALIGN_NONE); const char *fields[14]; char *p; int c = 0; p = strtokm(ctcopy, "*"); for (c = 0; c < 14; c++) { fields[c] = p; p = strtokm(NULL, "*"); } strcpy(out,FORMAT_TAG); strcat(out,fields[13]); strcat(out,"*"); strcat(out,fields[12]); strcat(out,"*"); strcat(out,fields[10]); strcat(out,"*"); strcat(out,fields[11]); strcat(out,"*"); strcat(out,fields[5]); strcat(out,"*"); strcat(out,fields[3]); strcat(out,"*"); strcat(out,fields[4]); strcat(out,"*32*"); strcat(out,fields[2]); strcat(out,"*32*"); strcat(out,fields[1]); MEM_FREE(keeptr); return out; } static char *prepare(char *split_fields[10], struct fmt_main *self) { // if it is the old format if (strncmp(split_fields[1], FORMAT_TAG_OLD, FORMAT_TAG_OLD_LEN) == 0){ if(old_valid(split_fields[1], self)) { char * in_new_format = convert_old_to_new(split_fields[1]); // following line segfaults! // strcpy(split_fields[1], in_new_format); return in_new_format; }else{ //Return something invalid return ""; } } return split_fields[1]; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; int i; char *p; static struct custom_salt cs; memset(&cs, 0, sizeof(cs)); ctcopy += FORMAT_TAG_LEN; /* skip over "$pdf$" marker */ p = strtokm(ctcopy, "*"); cs.V = atoi(p); p = strtokm(NULL, "*"); cs.R = atoi(p); p = strtokm(NULL, "*"); cs.length = atoi(p); p = strtokm(NULL, "*"); cs.P = atoi(p); p = strtokm(NULL, "*"); cs.encrypt_metadata = atoi(p); p = strtokm(NULL, "*"); cs.length_id = atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < cs.length_id; i++) cs.id[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); cs.length_u = atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < cs.length_u; i++) cs.u[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); cs.length_o = atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < cs.length_o; i++) cs.o[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; MEM_FREE(keeptr); return (void *)&cs; } static void set_salt(void *salt) { crypt_out = (struct custom_salt *)salt; } static void pdf_set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char *get_key(int index) { return saved_key[index]; } static const unsigned char padding[32] = { 0x28, 0xbf, 0x4e, 0x5e, 0x4e, 0x75, 0x8a, 0x41, 0x64, 0x00, 0x4e, 0x56, 0xff, 0xfa, 0x01, 0x08, 0x2e, 0x2e, 0x00, 0xb6, 0xd0, 0x68, 0x3e, 0x80, 0x2f, 0x0c, 0xa9, 0xfe, 0x64, 0x53, 0x69, 0x7a }; /* Compute an encryption key (PDF 1.7 algorithm 3.2) */ static void pdf_compute_encryption_key(unsigned char *password, int pwlen, unsigned char *key) { unsigned char buf[32]; unsigned int p; int n; MD5_CTX md5; n = crypt_out->length / 8; /* Step 1 - copy and pad password string */ if (pwlen > 32) pwlen = 32; memcpy(buf, password, pwlen); memcpy(buf + pwlen, padding, 32 - pwlen); /* Step 2 - init md5 and pass value of step 1 */ MD5_Init(&md5); MD5_Update(&md5, buf, 32); /* Step 3 - pass O value */ MD5_Update(&md5, crypt_out->o, 32); /* Step 4 - pass P value as unsigned int, low-order byte first */ p = (unsigned int) crypt_out->P; buf[0] = (p) & 0xFF; buf[1] = (p >> 8) & 0xFF; buf[2] = (p >> 16) & 0xFF; buf[3] = (p >> 24) & 0xFF; MD5_Update(&md5, buf, 4); /* Step 5 - pass first element of ID array */ MD5_Update(&md5, crypt_out->id, crypt_out->length_id); /* Step 6 (revision 4 or greater) - if metadata is not encrypted pass 0xFFFFFFFF */ if (crypt_out->R >= 4) { if (!crypt_out->encrypt_metadata) { buf[0] = 0xFF; buf[1] = 0xFF; buf[2] = 0xFF; buf[3] = 0xFF; MD5_Update(&md5, buf, 4); } } /* Step 7 - finish the hash */ MD5_Final(buf, &md5); /* Step 8 (revision 3 or greater) - do some voodoo 50 times */ if (crypt_out->R >= 3) { /* for (i = 0; i < 50; i++) { MD5_Init(&md5); MD5_Update(&md5, buf, n); MD5_Final(buf, &md5); } */ md5_50(buf); } /* Step 9 - the key is the first 'n' bytes of the result */ memcpy(key, buf, n); } /* Compute an encryption key (PDF 1.7 ExtensionLevel 3 algorithm 3.2a) */ static void pdf_compute_encryption_key_r5(unsigned char *password, int pwlen, int ownerkey, unsigned char *validationkey) { unsigned char buffer[128 + 8 + 48]; SHA256_CTX sha256; /* Step 2 - truncate UTF-8 password to 127 characters */ if (pwlen > 127) pwlen = 127; /* Step 3/4 - test password against owner/user key and compute encryption key */ memcpy(buffer, password, pwlen); if (ownerkey) { memcpy(buffer + pwlen, crypt_out->o + 32, 8); memcpy(buffer + pwlen + 8, crypt_out->u, 48); } else memcpy(buffer + pwlen, crypt_out->u + 32, 8); SHA256_Init(&sha256); SHA256_Update(&sha256, buffer, pwlen + 8 + (ownerkey ? 48 : 0)); SHA256_Final(validationkey, &sha256); } /* SumatraPDF: support crypt version 5 revision 6 */ /* * Compute an encryption key (PDF 1.7 ExtensionLevel 8 algorithm 3.2b) * http://esec-lab.sogeti.com/post/The-undocumented-password-validation-algorithm-of-Adobe-Reader-X */ static void pdf_compute_hardened_hash_r6(unsigned char *password, int pwlen, unsigned char salt[8], unsigned char *ownerkey, unsigned char hash[32]) { unsigned char data[(128 + 64 + 48) * 64]; unsigned char block[64]; int block_size = 32; int data_len = 0; int i, j, sum; SHA256_CTX sha256; SHA512_CTX sha384; SHA512_CTX sha512; AES_KEY aes; /* Step 1: calculate initial data block */ SHA256_Init(&sha256); SHA256_Update(&sha256, password, pwlen); SHA256_Update(&sha256, salt, 8); if (ownerkey) SHA256_Update(&sha256, ownerkey, 48); SHA256_Final(block, &sha256); for (i = 0; i < 64 || i < data[data_len * 64 - 1] + 32; i++) { /* Step 2: repeat password and data block 64 times */ memcpy(data, password, pwlen); memcpy(data + pwlen, block, block_size); // ownerkey is always NULL // memcpy(data + pwlen + block_size, ownerkey, ownerkey ? 48 : 0); data_len = pwlen + block_size + (ownerkey ? 48 : 0); for (j = 1; j < 64; j++) memcpy(data + j * data_len, data, data_len); /* Step 3: encrypt data using data block as key and iv */ AES_set_encrypt_key(block, 128, &aes); // aes_crypt_cbc(&aes, AES_ENCRYPT, data_len * 64, block + 16, data, data); AES_cbc_encrypt(data, data, data_len * 64, &aes, block + 16, AES_ENCRYPT); /* Step 4: determine SHA-2 hash size for this round */ for (j = 0, sum = 0; j < 16; j++) sum += data[j]; /* Step 5: calculate data block for next round */ block_size = 32 + (sum % 3) * 16; switch (block_size) { case 32: SHA256_Init(&sha256); SHA256_Update(&sha256, data, data_len * 64); SHA256_Final(block, &sha256); break; case 48: SHA384_Init(&sha384); SHA384_Update(&sha384, data, data_len * 64); SHA384_Final(block, &sha384); break; case 64: SHA512_Init(&sha512); SHA512_Update(&sha512, data, data_len * 64); SHA512_Final(block, &sha512); break; } } memset(data, 0, sizeof(data)); memcpy(hash, block, 32); } /* Computing the user password (PDF 1.7 algorithm 3.4 and 3.5) */ static void pdf_compute_user_password(unsigned char *password, unsigned char *output) { int pwlen = strlen((char*)password); unsigned char key[128]; if (crypt_out->R == 2) { RC4_KEY arc4; int n; n = crypt_out->length / 8; pdf_compute_encryption_key(password, pwlen, key); RC4_set_key(&arc4, n, key); RC4(&arc4, 32, padding, output); } if (crypt_out->R == 3 || crypt_out->R == 4) { unsigned char xor[32]; unsigned char digest[16]; MD5_CTX md5; RC4_KEY arc4; int i, x, n; n = crypt_out->length / 8; pdf_compute_encryption_key(password, pwlen, key); MD5_Init(&md5); MD5_Update(&md5, (char*)padding, 32); MD5_Update(&md5, crypt_out->id, crypt_out->length_id); MD5_Final(digest, &md5); RC4_set_key(&arc4, n, key); RC4(&arc4, 16, digest, output); for (x = 1; x <= 19; x++) { for (i = 0; i < n; i++) xor[i] = key[i] ^ x; RC4_set_key(&arc4, n, xor); RC4(&arc4, 16, output, output); } memcpy(output + 16, padding, 16); } if (crypt_out->R == 5) { pdf_compute_encryption_key_r5(password, pwlen, 0, output); } /* SumatraPDF: support crypt version 5 revision 6 */ if (crypt_out->R == 6) pdf_compute_hardened_hash_r6(password, pwlen, crypt_out->u + 32, NULL, output); } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; if (any_cracked) { memset(cracked, 0, cracked_size); any_cracked = 0; } #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { #if !defined(_OPENMP) && defined (__CYGWIN32__) && defined (MEMDBG_ON) static /* work around for some 'unknown' bug in cygwin gcc when using memdbg.h code. I have NO explanation, JimF. */ #endif unsigned char output[32]; pdf_compute_user_password((unsigned char*)saved_key[index], output); if (crypt_out->R == 2 || crypt_out->R == 5 || crypt_out->R == 6) if(memcmp(output, crypt_out->u, 32) == 0) { cracked[index] = 1; #ifdef _OPENMP #pragma omp atomic #endif any_cracked |= 1; } if (crypt_out->R == 3 || crypt_out->R == 4) if(memcmp(output, crypt_out->u, 16) == 0) { cracked[index] = 1; #ifdef _OPENMP #pragma omp atomic #endif any_cracked |= 1; } } return count; } static int cmp_all(void *binary, int count) { return any_cracked; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return cracked[index]; } /* * Report revision as tunable cost, since between revisions 2 and 6, * only revisions 3 and 4 seem to have a similar c/s rate. */ static unsigned int pdf_revision(void *salt) { struct custom_salt *my_salt; my_salt = salt; return (unsigned int) my_salt->R; } struct fmt_main fmt_pdf = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { "revision", }, { FORMAT_TAG, FORMAT_TAG_OLD }, pdf_tests }, { init, done, fmt_default_reset, prepare, valid, fmt_default_split, fmt_default_binary, get_salt, { pdf_revision, }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, pdf_set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
RelativeNeighborhoodGraph.h
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #ifndef _SPTAG_COMMON_RNG_H_ #define _SPTAG_COMMON_RNG_H_ #include "NeighborhoodGraph.h" namespace SPTAG { namespace COMMON { class RelativeNeighborhoodGraph: public NeighborhoodGraph { public: RelativeNeighborhoodGraph() { m_pNeighborhoodGraph.SetName("RNG"); } void RebuildNeighbors(VectorIndex* index, const SizeType node, SizeType* nodes, const BasicResult* queryResults, const int numResults) { DimensionType count = 0; for (int j = 0; j < numResults && count < m_iNeighborhoodSize; j++) { const BasicResult& item = queryResults[j]; if (item.VID < 0) break; if (item.VID == node) continue; bool good = true; for (DimensionType k = 0; k < count; k++) { if (index->ComputeDistance(index->GetSample(nodes[k]), index->GetSample(item.VID)) <= item.Dist) { good = false; break; } } if (good) nodes[count++] = item.VID; } for (DimensionType j = count; j < m_iNeighborhoodSize; j++) nodes[j] = -1; } void InsertNeighbors(VectorIndex* index, const SizeType node, SizeType insertNode, float insertDist) { std::lock_guard<std::mutex> lock(m_dataUpdateLock[node]); SizeType* nodes = m_pNeighborhoodGraph[node]; SizeType tmpNode; float tmpDist; for (DimensionType k = 0; k < m_iNeighborhoodSize; k++) { tmpNode = nodes[k]; if (tmpNode < -1) break; if (tmpNode < 0 || (tmpDist = index->ComputeDistance(index->GetSample(node), index->GetSample(tmpNode))) > insertDist || (insertDist == tmpDist && insertNode < tmpNode)) { bool good = true; for (DimensionType t = 0; t < k; t++) { if (index->ComputeDistance(index->GetSample(insertNode), index->GetSample(nodes[t])) < insertDist) { good = false; break; } } if (good) { nodes[k] = insertNode; while (tmpNode >= 0 && ++k < m_iNeighborhoodSize && nodes[k] >= -1 && index->ComputeDistance(index->GetSample(tmpNode), index->GetSample(insertNode)) >= index->ComputeDistance(index->GetSample(node), index->GetSample(tmpNode))) { std::swap(tmpNode, nodes[k]); } } break; } } } float GraphAccuracyEstimation(VectorIndex* index, const SizeType samples, const std::unordered_map<SizeType, SizeType>* idmap = nullptr) { DimensionType* correct = new DimensionType[samples]; #pragma omp parallel for schedule(dynamic) for (SizeType i = 0; i < samples; i++) { SizeType x = COMMON::Utils::rand(m_iGraphSize); //int x = i; COMMON::QueryResultSet<void> query(nullptr, m_iCEF); for (SizeType y = 0; y < m_iGraphSize; y++) { if ((idmap != nullptr && idmap->find(y) != idmap->end())) continue; float dist = index->ComputeDistance(index->GetSample(x), index->GetSample(y)); query.AddPoint(y, dist); } query.SortResult(); SizeType * exact_rng = new SizeType[m_iNeighborhoodSize]; RebuildNeighbors(index, x, exact_rng, query.GetResults(), m_iCEF); correct[i] = 0; for (DimensionType j = 0; j < m_iNeighborhoodSize; j++) { if (exact_rng[j] == -1) { correct[i] += m_iNeighborhoodSize - j; break; } for (DimensionType k = 0; k < m_iNeighborhoodSize; k++) if ((m_pNeighborhoodGraph)[x][k] == exact_rng[j]) { correct[i]++; break; } } delete[] exact_rng; } float acc = 0; for (SizeType i = 0; i < samples; i++) acc += float(correct[i]); acc = acc / samples / m_iNeighborhoodSize; delete[] correct; return acc; } }; } } #endif
rectangle.h
// This is AutoMine/GraphZero implementation #if 0 #pragma omp parallel for schedule(dynamic,1) reduction(+:counter) for (vidType v0 = 0; v0 < g.V(); v0++) { for (vidType v1 : g.N(v0)) { if (v1 >= v0) break; for (vidType v2 : g.N(v0)) { if (v2 >= v1) break; //counter += bounded_intersect(g, v1, v2, v0); counter += intersection_num(g.N(v1), g.N(v2), v0); } } } #else #pragma omp parallel for schedule(dynamic,1) reduction(+:counter) for (vidType v0 = 0; v0 < g.V(); v0++) { VertexSet y0 = g.N(v0); VertexSet y0f0 = bounded(y0,v0); for(vidType idx1 = 0; idx1 < y0f0.size(); idx1++) { vidType v1 = y0f0.begin()[idx1]; VertexSet y1 = g.N(v1); VertexSet n0f0y1; difference_set(n0f0y1,y1, y0); VertexSet y0f0n1f1 = difference_set(y0f0, y1, v1); for (vidType idx2 = 0; idx2 < y0f0n1f1.size(); idx2++) { vidType v2 = y0f0n1f1.begin()[idx2]; VertexSet y2 = g.N(v2); counter += intersection_num(n0f0y1, y2, v0); } } } #endif
dynamic_module.c
// RUN: %libomptarget-compile-generic -DSHARED -fPIC -shared -o %t.so && %libomptarget-compile-generic %t.so && %libomptarget-run-generic 2>&1 | %fcheck-generic #ifdef SHARED void foo() {} #else #include <stdio.h> int main() { #pragma omp target ; // CHECK: DONE. printf("%s\n", "DONE."); return 0; } #endif
hw_1.c
#include <stdio.h> #include <math.h> #include <stdlib.h> #include <omp.h> #define PI 3.141592653589793 #define FLOAT double #define BETA 1.0 void initialize(FLOAT *x, FLOAT *v, int N); FLOAT F(FLOAT x, FLOAT x_before, FLOAT x_after); FLOAT energy(FLOAT *x, FLOAT *v, int m, int k); int main(int argc, char **argv){ FLOAT delta_t = 5E-3; int N = 64; FLOAT T = 5.0 * pow(N,2.2); int N_T = T/delta_t; FLOAT E_k_1 = 0.0; FLOAT E_k_2 = 0.0; FLOAT E_k_3 = 0.0; FLOAT* x = malloc(N * sizeof(FLOAT)); FLOAT* v = malloc(N * sizeof(FLOAT)); int t; int n; int n_proc; FLOAT F_x; n_proc = atoi(argv[1]); initialize(x, v, N); omp_set_num_threads(n_proc); for(t = 0; t < N_T; t++){ #pragma omp parallel for shared(v,x) private(F_x) for(n = 1; n < N-1; n++){ F_x = F(x[n], x[n-1], x[n+1]); v[n] += F_x * delta_t * 0.5; } #pragma omp parallel for shared(v,x) for(n = 1; n < N-1; n++){ x[n] += v[n] * delta_t; } #pragma omp parallel for shared(v,x) private(F_x) for(n = 1; n < N-1; n++){ F_x = F(x[n], x[n-1], x[n+1]); v[n] += F_x * delta_t * 0.5; } if(!(t%(N_T/1000))){ E_k_1 = energy(x, v, N, 1); E_k_2 = energy(x, v, N, 2); E_k_3 = energy(x, v, N, 3); printf("%d %e %e %e\n", t, E_k_1, E_k_2, E_k_3); } } return(0); } void initialize(FLOAT *x, FLOAT *v, int m){ int i; for(i=0; i<m; i++){ x[i] = sin(1.0*PI*(i)/(FLOAT)(m-1)); v[i] = 0.0; } } FLOAT F(FLOAT x, FLOAT x_before, FLOAT x_after){ FLOAT F_value; F_value = (x_after - 2.0 * x + x_before); F_value += BETA * (x_after- x) * (x_after - x); F_value -= BETA * (x-x_before) * (x-x_before); return F_value; } FLOAT energy(FLOAT *x, FLOAT *v, int m, int k){ int i; FLOAT A_k, A_k_dot, omega_k_2, E_k; A_k = 0.0; for(i=0;i<m ;i++){ A_k += sqrt(2.0/(m+1)) * x[i] * sin(1.0 * (i) * k * PI /(m)); } A_k_dot = 0.0; for(i=0;i<m ;i++){ A_k_dot += sqrt(2.0/(m+1)) * v[i] * sin(1.0 * (i) * k * PI /(m)); } omega_k_2 = 4.0* pow(sin(1.0 * k * PI / (2.0*(m))), 2.0); E_k = 0.5 * (A_k_dot * A_k_dot + omega_k_2 * A_k * A_k); return E_k; }
fusedmmh.h
#ifndef DGL_ARRAY_CPU_FUSEDMMH_H_ #define DGL_ARRAY_CPU_FUSEDMMH_H_ #include <dgl/array.h> #include <dgl/bcast.h> #include <math.h> #include <iostream> #include <omp.h> #include "../selector.h" #include "sddmm.h" //#include "./FusedMM/kernels/include/kernels.h" #include "./FusedMM/fusedMM.h" //#include "fusedMM.h" #define SM_TABLE_SIZE 2048 #define SM_BOUND 5.0 #define SM_RESOLUTION SM_TABLE_SIZE/(2.0 * SM_BOUND) using namespace std; VALUETYPE *SM_TABLE; void uinit_SM_TABLE() { VALUETYPE x; SM_TABLE = (VALUETYPE*)malloc(SM_TABLE_SIZE*sizeof(VALUETYPE)); assert(SM_TABLE); for(INDEXTYPE i = 0; i < SM_TABLE_SIZE; i++) { x = 2.0 * SM_BOUND * i / SM_TABLE_SIZE - SM_BOUND; SM_TABLE[i] = 1.0 / (1 + exp(-x)); } } inline VALUETYPE uscale_SM(VALUETYPE val) { VALUETYPE sval; /* hopefully compiler will figure out and replace it max min instruction */ sval = (val > SM_BOUND) ? SM_BOUND : val; sval = (val < -SM_BOUND) ? -SM_BOUND : val; return(sval); } VALUETYPE ufast_SM(VALUETYPE v) { if (v > SM_BOUND) return 1.0; else if (v < -SM_BOUND) return 0.0; return SM_TABLE[(INDEXTYPE)((v + SM_BOUND) * SM_RESOLUTION)]; } int SOP_UDEF_FUNC(VALUETYPE val, VALUETYPE &out) { out = 1.0 - ufast_SM(val); return FUSEDMM_SUCCESS_RETURN; } VALUETYPE scale(VALUETYPE v){ if(v > SM_BOUND) return SM_BOUND; else if(v < -SM_BOUND) return -SM_BOUND; return v; } namespace dgl { namespace aten { namespace cpu { template <typename DType> DType scale(DType v){ if(v > SM_BOUND) return SM_BOUND; else if(v < -SM_BOUND) return -SM_BOUND; return v; } template <typename DType> DType fast_SM(DType v, DType *sm_table){ if(v > SM_BOUND) return 1.0; else if(v < -SM_BOUND) return 0.0; return sm_table[(int)((v + SM_BOUND) * SM_RESOLUTION)]; } template <typename IdType, typename DType> void init_SM_TABLE(DType *sm_table){ DType x; for(IdType i = 0; i < SM_TABLE_SIZE; i++){ x = 2.0 * SM_BOUND * i / SM_TABLE_SIZE - SM_BOUND; sm_table[i] = 1.0 / (1 + exp(-x)); } } template <typename IdType, typename DType> void FUSEDMMCsrTdist(const IdType *indptr, const IdType *indices, const IdType *edges, const DType *X, const DType *Y, DType *O, const IdType N, const int64_t dim) { #pragma omp parallel for for (IdType rid = 0; rid < N; ++rid) { const IdType row_start = indptr[rid], row_end = indptr[rid + 1]; const IdType iindex = rid * dim; DType T[dim]; for (IdType j = row_start; j < row_end; ++j){ const IdType cid = indices[j]; const IdType jindex = cid * dim; DType attrc = 0; for (int64_t k = 0; k < dim; ++k) { T[k] = X[iindex + k] - Y[jindex + k]; attrc += T[k] * T[k]; } DType d1 = -2.0 / (1.0 + attrc); for (int64_t k = 0; k < dim; ++k) { T[k] = scale<DType>(T[k] * d1); O[iindex+k] = O[iindex+k] + T[k]; } } } } template <typename IdType, typename DType> void FUSEDMMCsrSigmoid(const IdType *indptr, const IdType *indices, const IdType *edges, const DType *X, const DType *Y, DType *O, const IdType N, const int64_t dim) { DType *sm_table; sm_table = static_cast<DType *> (::operator new (sizeof(DType[SM_TABLE_SIZE]))); init_SM_TABLE<IdType, DType>(sm_table); cout << "Calling FUSEDMMCsrSigmoid..." << indptr[0] << ":" << indices[0] << ":" << edges[0] << ":" << X[0] << endl; #pragma omp parallel for for (IdType rid = 0; rid < N; ++rid){ const IdType row_start = indptr[rid], row_end = indptr[rid + 1]; const IdType iindex = rid * dim; for (IdType j = row_start; j < row_end; ++j){ const IdType cid = indices[j]; const IdType jindex = cid * dim; DType attrc = 0; for (int64_t k = 0; k < dim; ++k) { attrc += X[iindex + k] * X[jindex + k]; } DType d1 = 0.5;//fast_SM<DType>(attrc, sm_table); for (int64_t k = 0; k < dim; ++k) { O[iindex+k] = O[iindex+k] + (1.0 - d1) * X[jindex + k]; } } } cout << "End of Calling FUSEDMMCsrSigmoid..." << endl; } template <typename IdType, typename DType> void FUSEDMMCsrGCN(const IdType *indptr, const IdType *indices, const IdType *edges, const DType *X, const DType *Y, DType *O, const IdType N, const int64_t dim) { cout << "Calling FUSEDMMCsrGCN..." << indptr[0] << ":" << indices[0] << ":" << edges[0] << ":" << X[0] << endl; #pragma omp parallel for for (IdType rid = 0; rid < N; ++rid){ const IdType row_start = indptr[rid], row_end = indptr[rid + 1]; const IdType iindex = rid * dim; for (IdType j = row_start; j < row_end; ++j){ const IdType cid = indices[j]; const IdType jindex = cid * dim; for (int64_t k = 0; k < dim; ++k) { O[iindex+k] = O[iindex+k] + X[jindex + k]; } } } cout << "End of Calling FUSEDMMCsrGCN..." << endl; } template <typename IdType, typename DType> void FUSEDMMCsr(const BcastOff& bcast, const CSRMatrix& csr, NDArray lhs, NDArray rhs, NDArray out, int ftype = 1) { const IdType* indptr = csr.indptr.Ptr<IdType>(); const IdType* indices = csr.indices.Ptr<IdType>(); const IdType* edges = csr.data.Ptr<IdType>(); const DType* X = lhs.Ptr<DType>(); const DType* Y = rhs.Ptr<DType>(); const int32_t dim = bcast.out_len; std::cout << "From FusedMMCsr function...(dim):" << dim << ", num_rows:" << csr.num_rows << endl; DType* O = out.Ptr<DType>(); FUSEDMMCsrGCN<IdType, DType>(indptr, indices, edges, X, Y, O, csr.num_rows, dim); std::cout << "Returning from FusedMMCsr function..." << endl; /* if(ftype == 1){ uinit_SM_TABLE(); int32_t imsg; imsg = VOP_COPY_RHS | ROP_DOT | SOP_UDEF | VSC_MUL | AOP_ADD; fusedMM_csr(imsg, csr.num_rows, csr.num_rows, dim, 1.0, 0.0, csr.num_rows, csr.num_rows, NULL, (const long int*)indices, (const long int*)indptr, (const long int*)indptr+1, (const float*)X, dim, (const float*)Y, dim, 1.0, (float*)O, dim); }else{ int32_t imsg; imsg = VOP_SUBR | ROP_UDEF | SOP_UDEF | VSC_MUL | AOP_ADD; fusedMM_csr(imsg, csr.num_rows, csr.num_rows, dim, 1.0, 0.0, csr.num_rows, csr.num_rows, NULL, (const long int*)indices, (const long int*)indptr, (const long int*)indptr+1, (const float*)X, dim, (const float*)Y, dim, 1.0, (float*)O, dim); } */ } } } } #endif
rf_matrix.h
#ifndef RF_MATRIX_H #define RF_MATRIX_H // headers #include <cstdio> #include <cstdlib> #include <cstring> #include <algorithm> #include <vector> #include <cmath> #include <cstddef> #include <assert.h> #include <omp.h> #include <iostream> #include <fstream> #include <sstream> #if __cplusplus >= 201103L || (defined(_MSC_VER) && (_MSC_VER >= 1500)) // Visual Studio 2008 #define CPP11 #endif #ifdef _MSC_VER #if _MSC_VER >= 1600 #include <cstdint> #else typedef __int8 int8_t; typedef __int16 int16_t; typedef __int32 int32_t; typedef __int64 int64_t; typedef unsigned __int8 uint8_t; typedef unsigned __int16 uint16_t; typedef unsigned __int32 uint32_t; typedef unsigned __int64 uint64_t; #endif #else #if !defined(_MSC_VER) && defined(CPP11) #include <cstdint> #else typedef short int int16_t; typedef int int32_t; typedef long int64_t; typedef unsigned char uint8_t; typedef unsigned short int uint16_t; typedef unsigned int uint32_t; typedef unsigned long uint64_t; #endif #endif /* random number genrator: simulate the interface of python random module*/ #include <limits> #if defined(CPP11) #include <random> template<typename engine_t=std::mt19937> struct random_number_generator : public engine_t { typedef typename engine_t::result_type result_type; random_number_generator(unsigned seed=0): engine_t(seed){ } result_type randrange(result_type end=engine_t::max()) { return engine_t::operator()() % end; } template<class T=double, class T2=double> T uniform(T start=0.0, T2 end=1.0) { return std::uniform_real_distribution<T>(start, (T)end)(*this); } template<class T=double> T normal(T mean=0.0, T stddev=1.0) { return std::normal_distribution<T>(mean, stddev)(*this); } template<class T=int, class T2=T> T randint(T start=0, T2 end=std::numeric_limits<T>::max()) { return std::uniform_int_distribution<T>(start, end)(*this); } template<class RandIter> void shuffle(RandIter first, RandIter last) { std::shuffle(first, last, *this); } }; #else #include <tr1/random> template<typename engine_t=std::tr1::mt19937> struct random_number_generator : public engine_t { typedef typename engine_t::result_type result_type; random_number_generator(unsigned seed=0): engine_t(seed) { } result_type operator()() { return engine_t::operator()(); } result_type operator()(result_type n) { return randint(result_type(0), result_type(n-1)); } result_type randrange(result_type end=engine_t::max()) { return engine_t::operator()() % end; } template<class T, class T2> T uniform(T start=0.0, T2 end=1.0) { typedef std::tr1::uniform_real<T> dist_t; return std::tr1::variate_generator<engine_t*, dist_t>(this, dist_t(start,(T)end))(); } template<class T, class T2> T normal(T mean=0.0, T2 stddev=1.0) { typedef std::tr1::normal_distribution<T> dist_t; return std::tr1::variate_generator<engine_t*, dist_t>(this, dist_t(mean, (T)stddev))(); } template<class T, class T2> T randint(T start=0, T2 end=std::numeric_limits<T>::max()) { typedef std::tr1::uniform_int<T> dist_t; return std::tr1::variate_generator<engine_t*, dist_t>(this, dist_t(start,end))(); } template<class RandIter> void shuffle(RandIter first, RandIter last) { std::random_shuffle(first, last, *this); } }; #endif typedef random_number_generator<> rng_t; template<typename T> void gen_permutation_pair(size_t size, std::vector<T> &perm, std::vector<T> &inv_perm, int seed=0) { perm.resize(size); for(size_t i = 0; i < size; i++) perm[i] = i; rng_t rng(seed); rng.shuffle(perm.begin(), perm.end()); //std::srand(seed); //std::random_shuffle(perm.begin(), perm.end()); inv_perm.resize(size); for(size_t i = 0; i < size; i++) inv_perm[perm[i]] = i; } //#include "zlib_util.h" #define MALLOC(type, size) (type*)malloc(sizeof(type)*(size)) #define CALLOC(type, size) (type*)calloc((size), sizeof(type)) #define REALLOC(ptr, type, size) (type*)realloc((ptr), sizeof(type)*(size)) typedef unsigned major_t; const major_t ROWMAJOR = 1U; const major_t COLMAJOR = 2U; const major_t default_major = COLMAJOR; // Zip Iterator // Commom usage: std::sort(zip_iter(A.begin(),B.begin()), zip_iter(A.end(),B.end())); template<class T1, class T2> struct zip_body; template<class T1, class T2> struct zip_ref; template<class IterT1, class IterT2> struct zip_it; template<class IterT1, class IterT2> zip_it<IterT1, IterT2> zip_iter(IterT1 x, IterT2 y); #define dvec_t dense_vector template<typename val_type> class dvec_t; #define svec_t sparse_vector template<typename val_type> class svec_t; #define sdvec_t sparse_dense_vector template<typename val_type> class sdvec_t; // a dense vector with sparse indices #define gvec_t general_vector template<typename val_type> class gvec_t { public: size_t len; gvec_t(size_t len=0): len(len){} size_t size() const { return len; } virtual bool is_sparse() const {return false;} virtual bool is_dense() const {return false;} svec_t<val_type>& get_sparse() {assert(is_sparse()); return static_cast<svec_t<val_type>&>(*this);} const svec_t<val_type>& get_sparse() const {assert(is_sparse()); return static_cast<const svec_t<val_type>&>(*this);} dvec_t<val_type>& get_dense() {assert(is_dense()); return static_cast<dvec_t<val_type>&>(*this);} const dvec_t<val_type>& get_dense() const {assert(is_dense()); return static_cast<const dvec_t<val_type>&>(*this);} }; #define dmat_t dense_matrix template<typename val_type> class dmat_t; #define smat_t sparse_matrix template<typename val_type> class smat_t; #define eye_t identity_matrix template<typename val_type> class eye_t; #define gmat_t general_matrix template<typename val_type> class gmat_t { public: size_t rows, cols; gmat_t(size_t rows=0, size_t cols=0): rows(rows), cols(cols) { } size_t num_rows() const { return rows; } size_t num_cols() const { return cols; } virtual bool is_sparse() const { return false; } virtual bool is_dense() const { return false; } virtual bool is_identity() const { return false; } bool is_zero() const { return !is_sparse() && !is_dense() && !is_identity(); } smat_t<val_type>& get_sparse() { assert(is_sparse()); return static_cast<smat_t<val_type>&>(*this); } const smat_t<val_type>& get_sparse() const { assert(is_sparse()); return static_cast<const smat_t<val_type>&>(*this); } dmat_t<val_type>& get_dense() { assert(is_dense()); return static_cast<dmat_t<val_type>&>(*this); } const dmat_t<val_type>& get_dense() const { assert(is_dense()); return static_cast<const dmat_t<val_type>&>(*this); } virtual dvec_t<val_type>& Xv(const dvec_t<val_type>& v, dvec_t<val_type>& Xv, bool addson=0) const { if(addson == 0) memset(Xv.buf, 0, sizeof(val_type) * Xv.len); return Xv; } virtual dvec_t<val_type>& Xv(const svec_t<val_type>& v, dvec_t<val_type>& Xv, bool addson=0) const { if(addson == 0) memset(Xv.buf, 0, sizeof(val_type) * Xv.len); return Xv; } dvec_t<val_type>& Xv(const gvec_t<val_type>& v, dvec_t<val_type>& Xv, bool addson=0) const { if(v.is_sparse()) return this->Xv(v.get_sparse(), Xv, addson); else if(v.is_dense()) return this->Xv(v.get_dense(), Xv, addson); else // Should not be here return Xv; } virtual dvec_t<val_type>& XTu(const dvec_t<val_type>& u, dvec_t<val_type>& XTu, bool addson=0) const { if(addson == 0) memset(XTu.buf, 0, sizeof(val_type) * XTu.len); return XTu; } virtual dvec_t<val_type>& XTu(const svec_t<val_type>& u, dvec_t<val_type>& XTu, bool addson=0) const { if(addson == 0) memset(XTu.buf, 0, sizeof(val_type) * XTu.len); return XTu; } dvec_t<val_type>& XTu(const gvec_t<val_type>& u, dvec_t<val_type>& XTu, bool addson=0) const { if(u.is_sparse()) return this->XTu(u.get_sparse(), XTu, addson); else if(u.is_dense()) return this->XTu(u.get_dense(), XTu, addson); else // Should not be here return XTu; } }; #define coo_t coo_matrix template<typename val_type> class coo_t; template<typename val_type> class entry_t; template<typename val_type> class entry_iterator_t; // iterator base class template<typename val_type> class file_iterator_t; // iterator for files with (i,j,v) tuples template<typename val_type> class svmlight_file_iterator_t; // iterator for svmlight files template<typename val_type> class coo_iterator_t; //iterator for three vectors (I, J, V) tuples template<typename val_type> class smat_iterator_t; // iterator for nonzero entries in smat_t template<typename val_type> class smat_subset_iterator_t; // iterator for nonzero entries in a subset template<typename val_type> class dmat_iterator_t; // iterator for nonzero entries in dmat_t /*------------------- Essential Linear Algebra Operations -------------------*/ // H = X*W, (X: m*n, W: n*k, H: m*k) template<typename val_type> dmat_t<val_type>& dmat_x_dmat(const dmat_t<val_type> &X, const dmat_t<val_type> &W, dmat_t<val_type> &H); template<typename val_type> dmat_t<val_type> operator*(const dmat_t<val_type> &X, const dmat_t<val_type> &W); template<typename val_type> dmat_t<val_type>& smat_x_dmat(const smat_t<val_type>& X, const dmat_t<val_type> &W, dmat_t<val_type> &H); template<typename val_type> dmat_t<val_type>& gmat_x_dmat(const gmat_t<val_type>& X, const dmat_t<val_type> &W, dmat_t<val_type> &H); template<typename val_type> dmat_t<val_type> operator*(const smat_t<val_type> &X, const dmat_t<val_type> &W); // H = a*X*W + H0, (X: m*n, W: n*k, H: m*k) template<typename val_type, typename T2> dmat_t<val_type>& dmat_x_dmat(T2 a, const dmat_t<val_type> &X, const dmat_t<val_type> &W, const dmat_t<val_type> &H0, dmat_t<val_type> &H); template<typename val_type, typename T2> dmat_t<val_type>& smat_x_dmat(T2 a, const smat_t<val_type> &X, const dmat_t<val_type> &W, const dmat_t<val_type> &H0, dmat_t<val_type> &H); template<typename val_type, typename T2> dmat_t<val_type>& gmat_x_dmat(T2 a, const gmat_t<val_type> &X, const dmat_t<val_type> &W, const dmat_t<val_type> &H0, dmat_t<val_type> &H); // H = a*X*W + b*H0, (X: m*n, W: n*k, H: m*k) template<typename val_type, typename T2, typename T3> dmat_t<val_type>& dmat_x_dmat(T2 a, const dmat_t<val_type> &X, const dmat_t<val_type> &W, T3 b, const dmat_t<val_type> &H0, dmat_t<val_type> &H); template<typename val_type, typename T2, typename T3> dmat_t<val_type>& smat_x_dmat(T2 a, const smat_t<val_type> &X, const dmat_t<val_type> &W, T3 b, const dmat_t<val_type> &H0, dmat_t<val_type> &H); template<typename val_type, typename T2, typename T3> dmat_t<val_type>& gmat_x_dmat(T2 a, const gmat_t<val_type> &X, const dmat_t<val_type> &W, T3 b, const dmat_t<val_type> &H0, dmat_t<val_type> &H); // trace(W'*X*H) template<typename val_type> val_type trace_dmat_T_smat_dmat(const dmat_t<val_type>& W, const smat_t<val_type>& X, const dmat_t<val_type>& H); // trace(W'*diag(D)*H) template<typename val_type> val_type trace_dmat_T_diag_dmat(const dmat_t<val_type>& W, const dvec_t<val_type>& D, const dmat_t<val_type>& H); /*-------------- Essential Linear Algebra Solvers -------------------*/ // Solve AX = B using Cholesky Factorization (A: Positive Definite) template<typename val_type> dmat_t<val_type> ls_solve_chol(const dmat_t<val_type>& A, const dmat_t<val_type>& B, bool A_as_workspace); // Solve Ax = b using Cholesky Factorization (A: Positive Definite) template<typename val_type> dvec_t<val_type> ls_solve_chol(const dmat_t<val_type>& A, const dvec_t<val_type>& b, bool A_as_workspace); // SVD: A = USV' template<typename val_type> void svd(const dmat_t<val_type>& A, dmat_t<val_type>& U, dvec_t<val_type>& S, dmat_t<val_type>& V, bool reduced=true, bool A_as_workspace=false); /*-------------- Vectors & Matrices -------------------*/ // Dense Vector template<typename val_type> class dvec_t : public gvec_t<val_type> { friend class dmat_t<val_type>; private: bool mem_alloc_by_me; void zero_init() { len = 0; buf = NULL; mem_alloc_by_me = false; } public: // size_t len; inherited from gvec_t using gvec_t<val_type>::len; val_type *buf; // Default Constructor dvec_t() { zero_init(); } // Copy Constructor dvec_t(const dvec_t& v) { zero_init(); *this = v; } // Copy Assignment dvec_t& operator=(const dvec_t& other) { if(this == &other) { return *this; } if(other.is_view()) { // view to view copy if(mem_alloc_by_me) clear_space(); memcpy(static_cast<void*>(this), &other, sizeof(dvec_t)); } else { // deep to deep copy resize(other.size()); memcpy(buf, other.buf, sizeof(val_type)*len); } return *this; } // View Constructor: allocate space (w/ all 0) if buf == NULL explicit dvec_t(size_t len, val_type *buf=NULL): gvec_t<val_type>(len), mem_alloc_by_me(false), buf(buf) { if(buf == NULL && len != 0) { this->buf = MALLOC(val_type, len); memset(this->buf, 0, sizeof(val_type)*len); mem_alloc_by_me = true; } } // Fill Constructor explicit dvec_t(size_t len, const val_type &x) { zero_init(); resize(len, x); } // Constructor - dense_matrix => dense_vector: // Having the same status (view or deep) as m (the dense matrix). // (expand the matrix using row major) dvec_t(const dmat_t<val_type>& m) { zero_init(); if(m.is_view()) { len = m.rows * m.cols; buf = m.buf; } else { resize(m.rows * m.cols); memcpy(buf, m.buf, sizeof(val_type) * len); } } // Constructor - sparse_vector => dense_vector: // Always deep. dvec_t(const svec_t<val_type>& v) { zero_init(); resize(v.len); memset(buf, 0, sizeof(val_type) * len); for(size_t i = 0; i < v.nnz; i++) buf[v.idx[i]] = v.val[i]; } #if defined(CPP11) // Move Constructor dvec_t(dvec_t&& m) { zero_init(); *this = std::move(m); } // Move Assignment dvec_t& operator=(dvec_t&& other) { if(this == &other) { return *this; } clear_space(); memcpy(static_cast<void*>(this), &other, sizeof(dvec_t)); other.zero_init(); return *this; } #endif ~dvec_t() { clear_space(); } bool is_view() const { return mem_alloc_by_me == false; } bool is_dense() const { return true; } void clear_space() { if(mem_alloc_by_me) { free(buf); } zero_init(); } dvec_t get_view() const { return dvec_t(len, buf); // using view constructor } dvec_t& grow_body() { if(is_view()) { dvec_t tmp_view = *this; // Copy Assignment: View to view this->resize(len); memcpy(buf, tmp_view.buf, sizeof(val_type)*len); } return *this; } // Similar to operator=, but operator= uses view to view, deep to deep. // "assign" will directly change the underlying data, no matter view or deep. dvec_t& assign(const dvec_t& other) { assert(len == other.len); return assign((val_type)1.0, other); } // "assign" will directly change the underlying data, no matter view or deep. dvec_t& assign(val_type a, const dvec_t& other) { assert(len == other.len); if(a == val_type(0)) memset(buf, 0, sizeof(val_type)*len); else if(a == val_type(1)) { if(this == &other) return *this; #pragma omp parallel for schedule(static) for(size_t idx = 0; idx < len; idx++) at(idx) = other.at(idx); } else { #pragma omp parallel for schedule(static) for(size_t idx = 0; idx < len; idx++) at(idx) = a*other.at(idx); } return *this; } // resize will always grow body => is_view() becomes false void resize(size_t len_, const val_type &x) { resize(len_); if(x == 0) memset(buf, 0, sizeof(val_type) * len); else { std::fill_n(buf, len_, x); /* for(size_t i = 0; i < len; i++) { buf[i] = x; } */ } } // resize will always grow body => is_view() becomes false // (values in buf are not initialized) void resize(size_t len_) { if(mem_alloc_by_me) buf = REALLOC(buf, val_type, len_); else buf = MALLOC(val_type, len_); mem_alloc_by_me = true; len = len_; } val_type& at(size_t idx) { return buf[idx]; } const val_type& at(size_t idx) const { return buf[idx]; } val_type& operator[](size_t idx) { return buf[idx]; } const val_type& operator[](size_t idx) const { return buf[idx]; } val_type* data() { return buf; } const val_type* data() const { return buf; } val_type& back() { return buf[len - 1]; } const val_type& back() const { return buf[len - 1]; } void print(const char *str="") const { printf("%s dvec_t: len %lu, is_view %d, buf %p\n", str, len, is_view(), buf); for(size_t i = 0; i < len; i ++) printf("%.3f ", buf[i]); puts(""); } }; // Sparse Vector template<typename val_type> class svec_t : public gvec_t<val_type> { friend class smat_t<val_type>; private: bool mem_alloc_by_me; void zero_init() { len = nnz = 0; idx = NULL; val = NULL; mem_alloc_by_me = false; } public: // size_t len; inherited from gvec_t using gvec_t<val_type>::len; size_t nnz; unsigned *idx; val_type *val; // Default Constructor svec_t() { zero_init(); } // Copy Constructor svec_t(const svec_t& v) { zero_init(); *this = v; } // Copy Assignment svec_t& operator=(const svec_t& other) { if(this == &other) return *this; if(other.is_view()) { // view to view copy if(mem_alloc_by_me) clear_space(); memcpy(this, &other, sizeof(svec_t)); } else { // deep to deep copy resize(other.len, other.nnz); memcpy(idx, other.idx, sizeof(unsigned) * nnz); memcpy(val, other.val, sizeof(val_type) * nnz); } return *this; } // View Constructor: // If idx != NULL and val != NULL, we create a view copy. (view) // Otherwise, we will allocate nnz space for both idx and val. (deep) explicit svec_t(size_t len, size_t nnz, unsigned *idx, val_type *val) : gvec_t<val_type>(len), mem_alloc_by_me(false), nnz(nnz) { if(nnz == 0){ this->idx = NULL; this->val = NULL; } else { if(idx != NULL && val != NULL) { this->idx = idx; this->val = val; } else { zero_init(); resize(len, nnz); } } } /* (Don't delete yet, so can understand codes not yet adapted elsewhere) // Fill Constructor: // Always deep. // If idx == NULL, we fill this->idx with 0. // If idx != NULL, we still allocate this->idx and copy from idx. explicit svec_t(size_t len, size_t nnz, const unsigned *idx=NULL, const val_type &x=0) { zero_init(); resize(len, nnz, x, idx); } */ // Constructor - sparse_matrix => sparse_vector: // Always deep. (expand using row major) svec_t(const smat_t<val_type>& m) { zero_init(); resize(m.rows * m.cols, m.nnz); for(int i = 0; i < m.rows; i++) { for(int j = m.row_ptr[i]; j < m.row_ptr[i+1]; j++) { idx[j] = m.cols * i + m.col_idx[j]; val[j] = m.val_t[j]; } } } // Constructor - dense_vector => sparse_vector: // Always deep. svec_t(const dvec_t<val_type>& v, double threshold=1e-12) { zero_init(); len = v.size(); for(size_t i = 0; i < v.size(); i++) if(fabs((double)v.at(i)) >= threshold) nnz ++; resize(len, nnz); int k = 0; for(size_t i = 0; i < v.size(); i++) if(fabs((double)v.at(i)) >= threshold) { idx[k] = i; val[k] = v.at(i); k++; } } #if defined(CPP11) // Move Constructor svec_t(svec_t&& m) { zero_init(); *this = std::move(m); } // Move Assignment svec_t& operator=(svec_t&& other) { if(this == &other) return *this; clear_space(); memcpy(static_cast<void*>(this), &other, sizeof(svec_t)); other.zero_init(); return *this; } #endif ~svec_t() { clear_space(); } size_t get_nnz() const { return nnz; } bool is_view() const { return mem_alloc_by_me == false; } bool is_sparse() const { return true; } void clear_space() { if(mem_alloc_by_me){ free(idx); free(val); } zero_init(); } svec_t get_view() const { return svec_t(len, nnz, idx, val); // using view constructor } svec_t& grow_body() { if(is_view()) { svec_t tmp_view = *this; // Copy Assignment: View to view this->resize(len, nnz); memcpy(idx, tmp_view.idx, sizeof(unsigned)*nnz); memcpy(val, tmp_view.val, sizeof(val_type)*nnz); } return *this; } // Similar to operator=, but operator= uses view to view, deep to deep. // "assign" will directly change the underlying data, no matter view or deep. // (so we assert that the sparse vector is not a view on sparse matrix) svec_t& assign(const svec_t& other) { assert(len == other.len && nnz == other.nnz); return assign((val_type)1.0, other); } // "assign" will directly change the underlying data, no matter view or deep. // (so we assert that the sparse vector is not a view on sparse matrix) svec_t& assign(val_type a, const svec_t& other) { assert(len == other.len && nnz == other.nnz); if(a == val_type(0)) memset(val, 0, sizeof(val_type)*nnz); else if(a == val_type(1) && this == &other) { return *this; } else { #pragma omp parallel for schedule(static) for(int k = 0; k < nnz; k++){ idx[k] = other.idx[k]; val[k] = a*other.val[k]; } } } /* (Don't delete yet, so can understand codes not yet adapted elsewhere) // "resize" will always grow body => is_view() becomes false // (we will copy the whole idx to this->idx) void resize(size_t len_, size_t nnz_, const val_type &x, const unsigned *idx=NULL) { resize(len_, nnz_); if(idx == NULL) memset(this->idx, 0, sizeof(unsigned)*nnz); else memcpy(this->idx, idx, sizeof(unsigned)*nnz); for(size_t k = 0; k < nnz; k++) this->val[k] = x; } */ // "resize" will always grow body => is_view() becomes false // (values in idx, val are not initialized) void resize(size_t len_, size_t nnz_) { if(mem_alloc_by_me){ idx = REALLOC(idx, unsigned, nnz_); val = REALLOC(val, val_type, nnz_); } else{ idx = MALLOC(unsigned, nnz_); val = MALLOC(val_type, nnz_); } mem_alloc_by_me = true; len = len_; nnz = nnz_; } void print(const char *str="") const { printf("%s svec_t: len %lu, nnz %lu, is_view %d\n", str, len, nnz, is_view()); size_t j = 0; for(size_t i = 0; i < len; i++){ if(j < nnz && idx[j] == i){ printf("%.3f ", val[j]); j++; } else printf("0.000 "); } puts(""); } }; // Sparse Dense Vector template<typename val_type> class sdvec_t : public dvec_t<val_type> { friend class smat_t<val_type>; public: using gvec_t<val_type>::len; using dvec_t<val_type>::buf; std::vector<unsigned> nz_idx; std::vector<unsigned char> is_nonzero; size_t nnz; sdvec_t(size_t len=0) : dvec_t<val_type>(len), nz_idx(len), is_nonzero(len), nnz(0){ } size_t get_nnz() const { return nnz; } void resize(size_t len_) { if(len != len_) { dvec_t<val_type>::resize(len_, 0.0); nz_idx.clear(); nz_idx.resize(len_); is_nonzero.clear(); is_nonzero.resize(len_); nnz = 0; } } template<typename V> void init_with_svec(const svec_t<V>& svec) { clear(); nnz = svec.nnz; for(size_t t = 0; t < svec.nnz; t++) { size_t idx = svec.idx[t]; V val = svec.val[t]; is_nonzero[idx] = 1; nz_idx[t] = idx; buf[idx] = val; } } template<typename I, typename V> val_type& add_nonzero_at(I idx, V val) { buf[idx] += static_cast<val_type>(val); if(!is_nonzero[idx]) { is_nonzero[idx] = 1; nz_idx[nnz++] = static_cast<unsigned>(idx); } return buf[idx]; } sdvec_t& update_nz_idx() { for(size_t t = 0 ; t < nnz; t++) { if(buf[nz_idx[t]] == static_cast<val_type>(0)) { std::swap(nz_idx[t], nz_idx[nnz - 1]); is_nonzero[nz_idx[t]] = 0; t -= 1; nnz -= 1; } } std::sort(nz_idx.data(), nz_idx.data() + nnz); nnz = std::unique(nz_idx.data(), nz_idx.data() + nnz) - nz_idx.data(); return *this; } void clear() { if(nnz < (len >> 2)) { for(size_t t = 0; t < nnz; t++) { buf[nz_idx[t]] = 0; is_nonzero[nz_idx[t]] = 0; } } else { memset(buf, 0, sizeof(val_type) * len); memset(is_nonzero.data(), 0, sizeof(unsigned char) * len); } nnz = 0; } }; // Dense Matrix template<typename val_type> class dmat_t : public gmat_t<val_type> { friend class dvec_t<val_type>; public: // size_t rows, cols; inherited from gmat_t using gmat_t<val_type>::rows; using gmat_t<val_type>::cols; val_type *buf; static dmat_t rand(rng_t &rng, size_t m, size_t n, double lower=0.0, double upper=1.0, major_t major_type_=default_major) { dmat_t ret(m, n, major_type_); if(lower >= upper) lower = upper; for(size_t idx = 0; idx < m*n; idx++) ret.buf[idx] = (val_type)rng.uniform(lower, upper); return ret; } static dmat_t randn(rng_t &rng, size_t m, size_t n, double mean=0.0, double std=1.0, major_t major_type_=default_major) { dmat_t ret(m, n, major_type_); for(size_t idx = 0; idx < m*n; idx++) ret.buf[idx] = (val_type)rng.normal(mean, std); return ret; } private: bool mem_alloc_by_me; major_t major_type; typedef dvec_t<val_type> vec_t; void zero_init() { rows = 0; cols = 0; buf = NULL; major_type = default_major; mem_alloc_by_me = false; } public: // Default Constructor dmat_t() { zero_init(); } // Copy Constructor: // Having the same status (view or deep) as other. // Using the same major_type as other. dmat_t(const dmat_t& other) { zero_init(); *this = other; } // Copy Assignment: // Having the same status (view or deep) as other. // Using the same major_type as other. dmat_t& operator=(const dmat_t& other) { if(this == &other) return *this; if(other.is_view()) { // for view if(mem_alloc_by_me) clear_space(); rows = other.rows; cols = other.cols; buf = other.buf; major_type = other.major_type; mem_alloc_by_me = false; } else { // deep copy if(is_view() || rows!=other.rows || cols!=other.cols || major_type!=other.major_type) { major_type = other.major_type; resize(other.rows, other.cols); } memcpy(buf, other.buf, sizeof(val_type)*rows*cols); } return *this; } // View Constructor: // If buf != NULL, it creates a view on buf. // If buf == NULL, it creates a deep matrix w/ all 0. explicit dmat_t(size_t rows_, size_t cols_, major_t major_type_=default_major, val_type *buf=NULL): gmat_t<val_type>(rows_,cols_), buf(buf), mem_alloc_by_me(false), major_type(major_type_) { if(buf == NULL && rows * cols != 0){ this->buf = MALLOC(val_type, rows * cols); memset(this->buf, 0, sizeof(val_type) * rows * cols); mem_alloc_by_me = true; } } // Fill Constructor: fill in dense_vector based on the major_type. // Always Deep. explicit dmat_t(size_t nr_copy, const dvec_t<val_type>& v, major_t major_type_=default_major) { zero_init(); major_type = major_type_; resize(nr_copy, v); } // Constructor: dense_vector => dense_matrix: // Having the same status (view or deep) as v (the dense vector). dmat_t(const dvec_t<val_type>& v, major_t major_type_=default_major) { zero_init(); major_type = major_type_; if(!v.is_view()) resize(1, v); else { rows = is_rowmajor()? 1: v.size(); cols = is_colmajor()? 1: v.size(); buf = v.buf; } } // Constructor: sparse_matrix => dense_matrix: // Always deep. template<typename T> dmat_t(const smat_t<T>& sm, major_t major_type_=default_major) { zero_init(); major_type = major_type_; resize(sm.rows, sm.cols); memset(buf, 0, sizeof(val_type)*rows*cols); for(size_t i = 0; i < sm.rows; i++) for(size_t idx = sm.row_ptr[i]; idx != sm.row_ptr[i+1]; idx++) at(i, sm.col_idx[idx]) = sm.val_t[idx]; } // Constructor: identity_matrix => dense_matrix: // Always deep. template<typename T> dmat_t(const eye_t<T>& eye, major_t major_type_=default_major) { zero_init(); major_type = major_type_; resize(eye.rows, eye.cols); memset(buf, 0, sizeof(val_type)*rows*cols); for(size_t i = 0; i < rows; i++) at(i,i) = 1; } #if defined(CPP11) // Move Constructor dmat_t(dmat_t&& m){ zero_init(); *this = std::move(m); } // Move Assignment dmat_t& operator=(dmat_t&& other) { if(this == &other) return *this; clear_space(); rows = other.rows; cols = other.cols; buf = other.buf; mem_alloc_by_me = other.mem_alloc_by_me; major_type = other.major_type; other.zero_init(); return *this; } #endif ~dmat_t() { clear_space(); } bool is_view() const { return mem_alloc_by_me==false; } bool is_dense() const { return true; } bool is_rowmajor() const { return major_type==ROWMAJOR; } bool is_colmajor() const { return major_type==COLMAJOR; } major_t get_major() const { return major_type; } void clear_space() { if(mem_alloc_by_me) { free(buf); } zero_init(); } // The view of the current dense matrix is returned. // (Using View Constructor) dmat_t get_view() const { return dmat_t(rows,cols,major_type,buf); } /* (Not yet deleted, to understand the behavior for unsync code elsewhere) // For ROWMAJOR, the view of a single row is returned. // For COLMAJOR, the view of a single column is returned. dvec_t<val_type> get_single_view(const size_t &idx) const { if(is_rowmajor()) return dvec_t<val_type>(cols, &buf[idx * cols]); else return dvec_t<val_type>(rows, &buf[idx * rows]); } */ // Return a view on the idx-th row of the dense matrix. // (Can only called when the matrix is ROWMAJOR) dvec_t<val_type> get_row(const size_t &idx) const { assert(is_rowmajor()); if(is_rowmajor()) return dvec_t<val_type>(cols, &buf[idx * cols]); else return dvec_t<val_type>(); } // Return a view on the idx-th col of the dense matrix. // (Can only called when the matrix is COLMAJOR) dvec_t<val_type> get_col(const size_t &idx) const { assert(is_colmajor()); if(is_colmajor()) return dvec_t<val_type>(rows, &buf[idx * rows]); else return dvec_t<val_type>(); } // For grow_body(): // Deep, View => Deep. // (this is the sole purpose of this function) dmat_t& grow_body() { if(is_view()) { dmat_t tmp_view = *this; this->resize(rows,cols); memcpy(buf, tmp_view.buf, sizeof(val_type) * rows * cols); } return *this; } // For transpose(): // It will return a view of the transpose of *this. // (the major for ret will be the opposite of *this) dmat_t transpose() const { dmat_t ret = get_view(); ret.to_transpose(); return ret; } // ==================================================== // ================ In-place functions ================ // ==================================================== // For assign(): // Deep => Deep. // View => View. // Note: It differents from copy assignment! // After copy assignment, *this have the same status(View or Deep) as other. // But assign() directly overwrites the values in buf. // (it can modify the values it is viewing) dmat_t& assign(const dmat_t& other) { return assign((val_type)1.0, other); } // Similar to the above assign(), but now *this = a * other. template<typename T> dmat_t& assign(T a, const dmat_t& other) { if(a == T(0)) memset(buf, 0, sizeof(val_type)*rows*cols); else if(a == T(1)) { if(this == &other) return *this; if(is_rowmajor()) { #pragma omp parallel for schedule(static) for(size_t r = 0; r < rows; r++) for(size_t c = 0; c < cols; c++) at(r,c) = other.at(r,c); } else { #pragma omp parallel for schedule(static) for(size_t c = 0; c < cols; c++) for(size_t r = 0; r < rows; r++) at(r,c) = other.at(r,c); } } else { if(is_rowmajor()) { #pragma omp parallel for schedule(static) for(size_t r = 0; r < rows; r++) for(size_t c = 0; c < cols; c++) at(r,c) = a * other.at(r,c); } else { #pragma omp parallel for schedule(static) for(size_t c = 0; c < cols; c++) for(size_t r = 0; r < rows; r++) at(r,c) = a * other.at(r,c); } } return *this; } // After to_transpose(): // Deep => Deep. // View => View. // major_type will change. dmat_t& to_transpose() { std::swap(rows,cols); major_type = is_rowmajor()? COLMAJOR: ROWMAJOR; return *this; } // After inv_major(): // View, Deep => Deep. dmat_t& inv_major() { if(rows == cols && !is_view()) { // inplace for deep square matrix for(size_t r = 0; r < rows; r++) for(size_t c = 0; c < r; c++) std::swap(at(r,c),at(c,r)); major_type = is_rowmajor()? COLMAJOR: ROWMAJOR; } else { dmat_t tmp(*this); major_type = is_rowmajor()? COLMAJOR: ROWMAJOR; resize(rows,cols); for(size_t r = 0; r < rows; r++) for(size_t c = 0; c < cols; c++) at(r,c) = tmp.at(r,c); } return *this; } // After to_rowmajor(): // Deep => Deep. // View => View (if originally rowmajor), Deep (if originally colmajor). dmat_t& to_rowmajor() { if(is_colmajor()) inv_major(); return *this; } // After to_colmajor(): // Deep => Deep. // View => View (if originally colmajor), Deep (if originally rowmajor). dmat_t& to_colmajor() { if(is_rowmajor()) inv_major(); return *this; } // After apply_permutation(): // Deep => Deep. // View => View. // apply_permutation() directly overwrites the values in buf. // (thus it can modify the values dmat is viewing) dmat_t& apply_permutation(const std::vector<unsigned> &row_perm, const std::vector<unsigned> &col_perm) { return apply_permutation(row_perm.size()==rows? &row_perm[0]: NULL, col_perm.size()==cols? &col_perm[0] : NULL); } dmat_t& apply_permutation(const unsigned *row_perm=NULL, const unsigned *col_perm=NULL) { dmat_t tmp(*this); tmp.grow_body(); for(size_t r = 0; r < rows; r++) for(size_t c = 0; c < cols; c++) at(r,c) = tmp.at(row_perm? row_perm[r]: r, col_perm? col_perm[c]: c); return *this; } template<typename V1, typename V2> dmat_t& apply_scale(const V1 *row_scale, const V2 *col_scale) { if(row_scale != NULL && col_scale != NULL) { for(size_t r = 0; r < rows; r++) { for(size_t c = 0; c < cols; c++) { at(r, c) *= row_scale[r] * col_scale[c]; } } } else if(row_scale != NULL && col_scale == NULL) { for(size_t r = 0; r < rows; r++) { for(size_t c = 0; c < cols; c++) { at(r, c) *= row_scale[r]; } } } else if(row_scale == NULL && col_scale != NULL) { for(size_t r = 0; r < rows; r++) { for(size_t c = 0; c < cols; c++) { at(r, c) *= col_scale[c]; } } } return *this; } template<typename V> dmat_t& apply_scale(const dense_vector<V>& row_scale, const dense_vector<V>& col_scale) { return apply_scale(row_scale.data(), col_scale.data()); } template<typename V> dmat_t& apply_row_scale(const dense_vector<V>& row_scale) { return apply_scale<V, V>(row_scale.data(), NULL); } template<typename V> dmat_t& apply_col_scale(const dense_vector<V>& col_scale) { return apply_scale<V, V>(NULL, col_scale.data()); } // After resize(): // View, Deep => Deep. void resize(size_t nr_copy, const vec_t &v) { if(is_rowmajor()) { size_t rows_ = nr_copy, cols_ = v.size(); resize(rows_, cols_); size_t unit = sizeof(val_type)*v.size(); for(size_t r = 0; r < rows; r++) memcpy(buf + r * cols, v.data(), unit); } else { size_t rows_ = v.size(), cols_ = nr_copy; resize(rows_, cols_); size_t unit = sizeof(val_type)*v.size(); for(size_t c = 0; c < cols; c++) memcpy(buf + c * rows, v.data(), unit); } } // After resize(): // View, Deep => Deep. dmat_t& resize(size_t rows_, size_t cols_) { if(mem_alloc_by_me) { if(rows_ == rows && cols_ == cols) return *this; if(rows_*cols_ != rows*cols) buf = REALLOC(buf, val_type, rows_*cols_); } else { buf = MALLOC(val_type, rows_*cols_); } mem_alloc_by_me = true; rows = rows_; cols = cols_; return *this; } // After lazy_resize(): // Deep => Deep. // View => (If possible) ? View : Deep. dmat_t& lazy_resize(size_t rows_, size_t cols_, major_t major_type_=0) { if(is_view() && rows_*cols_==rows*cols && (major_type_ == 0 || major_type==major_type_)) reshape(rows_,cols_); else { if(major_type_ != 0) major_type = major_type_; resize(rows_, cols_); } return *this; } // After reshape: // Deep => Deep. // View => View. dmat_t& reshape(size_t rows_, size_t cols_) { assert(rows_*cols_ == rows*cols); if(rows_ != rows || cols != cols) { rows = rows_; cols = cols_; } return *this; } // ==================================================== // ============ Dmat-Vector Multiplication ============ // ==================================================== dvec_t<val_type>& Xv(const dvec_t<val_type>& v, dvec_t<val_type>& Xv, bool addson=0) const { assert(v.size() == this->cols); if(Xv.size() != this->rows) Xv.resize(this->rows, 0.0); for(size_t i = 0; i < rows; i++) { if(addson == 0) Xv[i] = 0; for(size_t j = 0; j < cols; j++) Xv[i] += at(i, j) * v[j]; } return Xv; } dvec_t<val_type>& Xv(const svec_t<val_type>& v, dvec_t<val_type>& Xv, bool addson=0) const { assert(v.size() == this->cols); if(Xv.size() != this->rows) Xv.resize(this->rows, 0.0); for(size_t i = 0; i < rows; i++) { if(addson == 0) Xv[i] = 0; for(size_t p = 0; p < v.get_nnz(); p++) Xv[i] += at(i, v.idx[p]) * v.val[p]; } return Xv; } dvec_t<val_type>& XTu(const dvec_t<val_type>& u, dvec_t<val_type>& XTu, bool addson=0) const { assert(u.size() == this->rows); if(XTu.size() != this->cols) XTu.resize(this->rows, 0.0); for(size_t i = 0; i < cols; i++) { if(addson == 0) XTu[i] = 0; for(size_t j = 0; j < rows; j++) XTu[i] += at(j, i) * u[j]; } return XTu; } dvec_t<val_type>& XTu(const svec_t<val_type>& u, dvec_t<val_type>& XTu, bool addson=0) const { assert(u.size() == this->rows); if(XTu.size() != this->cols) XTu.resize(this->rows, 0.0); for(size_t i = 0; i < cols; i++) { if(addson == 0) XTu[i] = 0; for(size_t p = 0; p < u.get_nnz(); p++) XTu[i] += at(u.idx[p], i) * u.val[p]; } return XTu; } // ==================================================== // ==================== IO Methods ==================== // ==================================================== void load_from_binary(const char *filename, major_t major_type_=default_major) { FILE *fp = fopen(filename, "rb"); if(fp == NULL) { fprintf(stderr, "Error: can't read the file (%s)!!\n", filename); return; } load_from_binary(fp, major_type_, filename); fclose(fp); } void load_from_binary(FILE *fp, major_t major_type_=default_major, const char *filename=NULL) { clear_space(); zero_init(); size_t rows_, cols_; if(fread(&rows_, sizeof(size_t), 1, fp) != 1) fprintf(stderr, "Error: wrong input stream in %s.\n", filename); if(fread(&cols_, sizeof(size_t), 1, fp) != 1) fprintf(stderr, "Error: wrong input stream in %s.\n", filename); std::vector<double> tmp(rows_*cols_); if(fread(&tmp[0], sizeof(double), rows_*cols_, fp) != rows_*cols_) fprintf(stderr, "Error: wrong input stream in %s.\n", filename); dmat_t<double> tmp_view(rows_, cols_, ROWMAJOR, &tmp[0]); major_type = major_type_; resize(rows_, cols_); for(size_t r = 0; r < rows; r++) for(size_t c = 0; c < cols; c++) at(r,c) = tmp_view.at(r,c); } void save_binary_to_file(const char *filename) { FILE *fp = fopen(filename, "wb"); if(fp == NULL) { fprintf(stderr,"Error: can't open file %s\n", filename); exit(1); } save_binary_to_file(fp); fclose(fp); } void save_binary_to_file(FILE *fp) { fwrite(&rows, sizeof(size_t), 1, fp); fwrite(&cols, sizeof(size_t), 1, fp); std::vector<double> tmp(rows*cols); size_t idx = 0; for(size_t r = 0; r < rows; r++) for(size_t c = 0; c < cols; c++) tmp[idx++] = (double)at(r,c); fwrite(&tmp[0], sizeof(double), tmp.size(), fp); } val_type& at(size_t r, size_t c) { return is_rowmajor()? buf[r*cols+c] : buf[c*rows+r]; } const val_type& at(size_t r, size_t c) const { return is_rowmajor()? buf[r*cols+c] : buf[c*rows+r]; } val_type* data() { return buf; } const val_type* data() const { return buf; } void print_mat(const char *str="", FILE *fp=stdout) const { fprintf(fp, "===>%s<===\n", str); fprintf(fp, "rows %ld cols %ld mem_alloc_by_me %d row_major %d\nbuf %p\n", rows, cols, mem_alloc_by_me, is_rowmajor(), buf); for(size_t r = 0; r < rows; r++) { for(size_t c = 0; c < cols; c++) fprintf(fp, "%.3f ", at(r,c)); fprintf(fp, "\n"); } } }; // Identity Matrix template<typename val_type> class eye_t : public gmat_t<val_type> { public: // size_t rows, cols; inherited from gmat_t using gmat_t<val_type>::rows; using gmat_t<val_type>::cols; eye_t (size_t rows_ = 0) : gmat_t<val_type>(rows_, rows_){} bool is_identity() const { return true; } dvec_t<val_type>& Xv(const dvec_t<val_type>& v, dvec_t<val_type>& Xv, bool addson=0) const { assert(v.size() == this->cols); if(Xv.size() != this->rows) Xv.resize(this->rows, 0.0); return addson? do_axpy(1, v, Xv): Xv.assign(v); } dvec_t<val_type>& Xv(const svec_t<val_type>& v, dvec_t<val_type>& Xv, bool addson=0) const { assert(v.size() == this->cols); if(Xv.size() != this->rows) Xv.resize(this->rows, 0.0); dvec_t<val_type> dv(v); return addson? do_axpy(1, dv, Xv): Xv.assign(dv); } dvec_t<val_type>& XTu(const dvec_t<val_type>& u, dvec_t<val_type>& XTu, bool addson=0) const { assert(u.size() == this->rows); if(XTu.size() != this->cols) XTu.resize(this->rows, 0.0); return addson? do_axpy(1, u, XTu): XTu.assign(u); } dvec_t<val_type>& XTu(const svec_t<val_type>& u, dvec_t<val_type>& XTu, bool addson=0) const { assert(u.size() == this->rows); if(XTu.size() != this->cols) XTu.resize(this->rows, 0.0); dvec_t<val_type> du(u); return addson? do_axpy(1, du, XTu): XTu.assign(du); } }; // Sparse Matrix (format CSC & CSR) template<typename val_type> class smat_t : public gmat_t<val_type> { private: bool mem_alloc_by_me; void zero_init() { mem_alloc_by_me = false; val=val_t=NULL; col_ptr=row_ptr=NULL; row_idx=col_idx=NULL; rows=cols=nnz=max_col_nnz=max_row_nnz=0; } void allocate_space(size_t rows_, size_t cols_, size_t nnz_) { if(mem_alloc_by_me) clear_space(); rows = rows_; cols = cols_; nnz = nnz_; val = MALLOC(val_type, nnz); val_t = MALLOC(val_type, nnz); row_idx = MALLOC(unsigned, nnz); col_idx = MALLOC(unsigned, nnz); row_ptr = MALLOC(size_t, rows+1); col_ptr = MALLOC(size_t, cols+1); memset(row_ptr, 0, sizeof(size_t)*(rows+1)); memset(col_ptr, 0, sizeof(size_t)*(cols+1)); mem_alloc_by_me = true; } void csc_to_csr_old() { memset(row_ptr, 0, sizeof(size_t)*(rows+1)); for(size_t idx = 0; idx < nnz; idx++) row_ptr[row_idx[idx]+1]++; for(size_t r = 1; r <= rows; r++) row_ptr[r] += row_ptr[r-1]; for(size_t c = 0; c < cols; c++) { for(size_t idx = col_ptr[c]; idx != col_ptr[c+1]; idx++) { size_t r = (size_t) row_idx[idx]; col_idx[row_ptr[r]] = c; val_t[row_ptr[r]++] = val[idx]; } } for(size_t r = rows; r > 0; r--) row_ptr[r] = row_ptr[r-1]; row_ptr[0] = 0; } void csc_to_csr() { smat_t tmp = this->transpose(); tmp.csr_to_csc(); } void csr_to_csc() { memset(col_ptr, 0, sizeof(size_t) * (cols + 1)); for(size_t idx = 0; idx < nnz; idx++) { col_ptr[col_idx[idx] + 1]++; } for(size_t c = 1; c <= cols; c++) { col_ptr[c] += col_ptr[c - 1]; } for(size_t r = 0; r < rows; r++) { for(size_t idx = row_ptr[r]; idx != row_ptr[r + 1]; idx++) { size_t c = (size_t) col_idx[idx]; row_idx[col_ptr[c]] = r; val[col_ptr[c]++] = val_t[idx]; } } for(size_t c = cols; c > 0; c--) { col_ptr[c] = col_ptr[c - 1]; } col_ptr[0] = 0; } void update_max_nnz() { max_row_nnz = max_col_nnz = 0; for(size_t c = 0; c < cols; c++) max_col_nnz = std::max(max_col_nnz, nnz_of_col(c)); for(size_t r = 0; r < rows; r++) max_row_nnz = std::max(max_row_nnz, nnz_of_row(r)); } // Comparator for sorting rates into row/column comopression storage class SparseLess { public: const unsigned *row_idx; const unsigned *col_idx; SparseLess(const unsigned *row_idx_, const unsigned *col_idx_, bool isCSR=true) { row_idx = (isCSR)? row_idx_: col_idx_; col_idx = (isCSR)? col_idx_: row_idx_; } bool operator()(size_t x, size_t y) const { return (row_idx[x] < row_idx[y]) || ((row_idx[x] == row_idx[y]) && (col_idx[x] < col_idx[y])); } }; class SparseEq { public: const unsigned *row_idx; const unsigned *col_idx; SparseEq(const unsigned *row_idx_, const unsigned *col_idx_) { row_idx = row_idx_; col_idx = col_idx_; } bool operator()(size_t x, size_t y) const { return (row_idx[x] == row_idx[y]) && (col_idx[x] == col_idx[y]); } }; public: // static methods static smat_t rand(rng_t &rng, size_t m, size_t n, double sparsity=0.01, double lower=0.0, double upper=1.0) { if(lower > upper) lower = upper; smat_t ret; size_t nnz_ = (size_t)(m*n*sparsity); ret.allocate_space(m, n, nnz_); for(size_t idx = 0; idx < nnz_; idx++) { ret.val_t[idx] = rng.uniform(lower, upper); ret.col_idx[idx] = rng.randint(0, n-1); ret.row_ptr[rng.randint(1, m)] += 1; } for(size_t i = 1; i <= m; i++) ret.row_ptr[i] += ret.row_ptr[i-1]; ret.csr_to_csc(); ret.update_max_nnz(); return ret; } static smat_t randn(rng_t &rng, size_t m, size_t n, double sparsity=0.01, double mean=0.0, double std=1.0) { smat_t ret; size_t nnz_ = (size_t)(m*n*sparsity); ret.allocate_space(m, n, nnz_); for(size_t idx = 0; idx < nnz_; idx++) { ret.val_t[idx] = (val_type)rng.normal(mean, std); ret.col_idx[idx] = rng.randint(0, n-1); ret.row_ptr[rng.randint(1,m)] += 1; } for(size_t i = 1; i <= m; i++) ret.row_ptr[i] += ret.row_ptr[i-1]; ret.csr_to_csc(); ret.update_max_nnz(); return ret; } // rows, cols are inherited from gmat_t using gmat_t<val_type>::rows; using gmat_t<val_type>::cols; size_t nnz, max_row_nnz, max_col_nnz; val_type *val, *val_t; size_t *col_ptr, *row_ptr; unsigned *row_idx, *col_idx; // filetypes for loading smat_t enum format_t { TXT=0, PETSc=1, SVMLIGHT=2, BINARY=3, COMPRESSION=4 }; // Default Constructor smat_t() { zero_init(); } // Copy Constructor smat_t(const smat_t& m) { zero_init(); *this = m; } // Copy Assignment // view => view, deep => deep. smat_t& operator=(const smat_t& other) { if(this == &other) { return *this; } if(mem_alloc_by_me) { clear_space(); } if(other.is_view()) { // for view memcpy(static_cast<void*>(this), &other, sizeof(smat_t)); } else { // deep copy *this = other.get_view(); grow_body(); } return *this; } // View Constructor: explicit smat_t(size_t rows, size_t cols, size_t nnz, val_type *val, val_type *val_t, size_t *col_ptr, size_t *row_ptr, unsigned *row_idx, unsigned *col_idx) : gmat_t<val_type>(rows, cols), nnz(nnz), val(val), val_t(val_t), col_ptr(col_ptr), row_ptr(row_ptr), row_idx(row_idx), col_idx(col_idx) { mem_alloc_by_me = false; update_max_nnz(); } // Constructor: dense matrix => sparse matrix smat_t(const dmat_t<val_type>& m) { zero_init(); dmat_iterator_t<val_type> entry_it(m); load_from_iterator(m.rows, m.cols, entry_it.get_nnz(), &entry_it); } // Constructor: identity matrix => sparse matrix smat_t(const eye_t<val_type>& eye) { zero_init(); allocate_space(eye.rows, eye.rows, eye.rows); for(size_t i = 0; i < eye.rows; i++) { row_ptr[i+1] = i+1; col_idx[i] = i; val_t[i] = (val_type)1; } for(size_t j = 0; j < eye.cols; j++) { col_ptr[j+1] = j+1; row_idx[j] = j; val[j] = (val_type)1; } } smat_t(size_t rows_, size_t cols_, size_t nnz_=0){ zero_init(); allocate_space(rows_, cols_, nnz_); } #if defined(CPP11) // Move Constructor smat_t(smat_t&& m){ zero_init(); *this = std::move(m); } // Move Assignment smat_t& operator=(smat_t&& other) { if(this == &other) { return *this; } clear_space(); memcpy(static_cast<void*>(this), &other, sizeof(smat_t)); other.zero_init(); return *this; } #endif // Destructor ~smat_t(){ clear_space(); } size_t get_nnz() const { return nnz; } bool is_view() const { return mem_alloc_by_me==false; } bool is_sparse() const { return true; } void clear_space() { if(mem_alloc_by_me) { if(val) { free(val); } if(val_t) { free(val_t); } if(row_ptr) { free(row_ptr); } if(row_idx) { free(row_idx); } if(col_ptr) { free(col_ptr); } if(col_idx) { free(col_idx); } } zero_init(); } smat_t get_view() const { if(is_view()) { return *this; } else { smat_t tmp; memcpy(static_cast<void*>(&tmp), this, sizeof(smat_t)); tmp.mem_alloc_by_me = false; return tmp; } } /* (Don't delete yet, so can understand codes not yet adapted elsewhere) svec_t<val_type> get_single_view(const size_t &idx, const major_t &major=default_major) const { if(major == ROWMAJOR) return svec_t<val_type>(cols, nnz_of_row(idx), &col_idx[row_ptr[idx]], &val_t[row_ptr[idx]], 0); else return svec_t<val_type>(rows, nnz_of_col(idx), &row_idx[col_ptr[idx]], &val[col_ptr[idx]], 0); } */ // For get_row and get_col, a sparse vector view is returned. // Caveat: If you directly modify the returned sparse vector view, // it will change the sparse matrix's underlying data. // And because we store both column and row major format, // the modification on the returned svec_t will only effect one of the format. // Resulting in an inconsistency within the sparse matrix. // Summary: Do not directly modify the returned sparse vector view. // (if the view becomes a deep vector afterwards, then things will be fine.) svec_t<val_type> get_row(const size_t &idx) const { return svec_t<val_type>(cols, nnz_of_row(idx), &col_idx[row_ptr[idx]], &val_t[row_ptr[idx]]); } svec_t<val_type> get_col(const size_t &idx) const { return svec_t<val_type>(rows, nnz_of_col(idx), &row_idx[col_ptr[idx]], &val[col_ptr[idx]]); } smat_t& grow_body() { if(is_view()) { smat_t tmp = *this; // a copy of the view col_ptr = MALLOC(size_t, cols + 1); memcpy(col_ptr, tmp.col_ptr, sizeof(size_t) * (cols + 1)); row_idx = MALLOC(unsigned, nnz); memcpy(row_idx, tmp.row_idx, sizeof(unsigned) * nnz); val = MALLOC(val_type, nnz); memcpy(val, tmp.val, sizeof(val_type) * nnz); row_ptr = MALLOC(size_t, rows + 1); memcpy(row_ptr, tmp.row_ptr, sizeof(size_t) * (rows + 1)); col_idx = MALLOC(unsigned, nnz); memcpy(col_idx, tmp.col_idx, sizeof(unsigned) * nnz); val_t = MALLOC(val_type, nnz); memcpy(val_t, tmp.val_t, sizeof(val_type) * nnz); mem_alloc_by_me = true; } return *this; } smat_t transpose() const{ smat_t<val_type> mt = get_view().to_transpose(); return mt; } // ==================================================== // ================ In-place functions ================ // ==================================================== smat_t& to_transpose() { std::swap(rows,cols); std::swap(val,val_t); std::swap(row_ptr,col_ptr); std::swap(row_idx,col_idx); std::swap(max_col_nnz, max_row_nnz); return *this; } smat_t& apply_permutation(const std::vector<unsigned> &row_perm, const std::vector<unsigned> &col_perm) { return apply_permutation(row_perm.size()==rows? &row_perm[0]: NULL, col_perm.size()==cols? &col_perm[0]: NULL); } smat_t& apply_permutation(const unsigned *row_perm=NULL, const unsigned *col_perm=NULL) { if(row_perm != NULL) { for(size_t idx = 0; idx < nnz; idx++) { row_idx[idx] = row_perm[row_idx[idx]]; } csc_to_csr(); csr_to_csc(); } if(col_perm != NULL) { for(size_t idx = 0; idx < nnz; idx++) { col_idx[idx] = col_perm[col_idx[idx]]; } csr_to_csc(); csc_to_csr(); } return *this; } template<typename V1, typename V2> smat_t& apply_scale(const V1 *row_scale, const V2 *col_scale) { if(row_scale != NULL && col_scale != NULL) { for(size_t r = 0; r < rows; r++) { val_type alpha = row_scale[r]; for(size_t idx = row_ptr[r]; idx != row_ptr[r + 1]; idx++) { val_t[idx] *= alpha * col_scale[col_idx[idx]]; } } for(size_t c = 0; c < cols; c++) { val_type alpha = col_scale[c]; for(size_t idx = col_ptr[c]; idx != col_ptr[c + 1]; idx++) { val[idx] *= alpha * row_scale[row_idx[idx]]; } } } else if(row_scale != NULL && col_scale == NULL) { for(size_t r = 0; r < rows; r++) { if(nnz_of_row(r)) { for(size_t idx = row_ptr[r]; idx < row_ptr[r + 1]; idx++) { val_t[idx] *= row_scale[r]; } } } for(size_t idx = 0; idx < nnz; idx++) { val[idx] *= row_scale[row_idx[idx]]; } } else if(row_scale == NULL && col_scale != NULL) { for(size_t c = 0; c < cols; c++) { if(nnz_of_col(c)) { for(size_t idx = col_ptr[c]; idx < col_ptr[c + 1]; idx++) { val[idx] *= col_scale[c]; } } } for(size_t idx = 0; idx < nnz; idx++) { val_t[idx] *= col_scale[col_idx[idx]]; } } return *this; } template<typename V1, typename V2> smat_t& apply_scale(const dvec_t<V1> &row_scale, const dvec_t<V2> &col_scale) { return apply_scale(row_scale.data(), col_scale.data()); } template<typename V> smat_t& apply_row_scale(const dvec_t<V> &row_scale) { return apply_scale<V, V>(row_scale.data(), NULL); } template<typename V> smat_t& apply_col_scale(const dvec_t<V> &col_scale) { return apply_scale<V, V>(NULL, col_scale.data()); } smat_t row_subset(const std::vector<unsigned> &subset) const { return row_subset(&subset[0], (int)subset.size()); } smat_t row_subset(const unsigned *subset, int subset_size) const { smat_subset_iterator_t<val_type> it(*this, subset, subset_size, ROWMAJOR); smat_t<val_type> sub_smat; sub_smat.load_from_iterator(subset_size, cols, it.get_nnz(), &it); return sub_smat; } smat_t col_subset(const std::vector<unsigned> &subset) const { return col_subset(&subset[0], (int)subset.size()); } smat_t col_subset(const unsigned *subset, int subset_size) const { smat_subset_iterator_t<val_type> it(*this, subset, subset_size, COLMAJOR); smat_t<val_type> sub_smat; sub_smat.load_from_iterator(rows, subset_size, it.get_nnz(), &it); return sub_smat; } size_t nnz_of_row(unsigned i) const { return (row_ptr[i+1] - row_ptr[i]); } size_t nnz_of_col(unsigned i) const { return (col_ptr[i+1] - col_ptr[i]); } // ==================================================== // ============ Smat-Vector Multiplication ============ // ==================================================== val_type* Xv(const val_type* v, val_type* Xv, bool addson=0) const { for(size_t i = 0; i < rows; i++) { if(addson == 0) Xv[i] = 0; for(size_t idx = row_ptr[i]; idx < row_ptr[i+1]; idx++) Xv[i] += val_t[idx] * v[col_idx[idx]]; } return Xv; } dvec_t<val_type>& Xv(const dvec_t<val_type>& v, dvec_t<val_type>& Xv, bool addson=0) const { assert(v.size() == this->cols); if(Xv.size() != this->rows) Xv.resize(this->rows, 0.0); this->Xv(v.data(), Xv.data(), addson); return Xv; } dvec_t<val_type>& Xv(const svec_t<val_type>& v, dvec_t<val_type>& Xv, bool addson=0) const { assert(v.size() == this->cols); if(Xv.size() != this->rows) Xv.resize(this->rows, 0.0); if(addson == 0) { for(size_t i = 0; i < Xv.size(); i++) { Xv[i] = 0; } } for(size_t k = 0; k < v.nnz; k++) { size_t col_idx = static_cast<size_t>(v.idx[k]); const val_type& alpha = v.val[k]; do_axpy(alpha, get_col(col_idx), Xv); } /* slower implementatoin dvec_t<val_type> dv(v); this->Xv(dv.data(), Xv.data(), addson); */ return Xv; } val_type* XTu(const val_type* u, val_type* XTu, bool addson=0) const { for(size_t i = 0; i < cols; i++) { if(addson == 0) XTu[i] = 0; for(size_t idx = col_ptr[i]; idx < col_ptr[i+1]; idx++) XTu[i] += val[idx] * u[row_idx[idx]]; } return XTu; } dvec_t<val_type>& XTu(const dvec_t<val_type>& u, dvec_t<val_type>& XTu, bool addson=0) const { assert(u.size() == this->rows); if(XTu.size() != this->cols) XTu.resize(this->rows, 0.0); this->XTu(u.data(), XTu.data(), addson); return XTu; } dvec_t<val_type>& XTu(const svec_t<val_type>& u, dvec_t<val_type>& XTu, bool addson=0) const { assert(u.size() == this->rows); if(XTu.size() != this->cols) XTu.resize(this->rows, 0.0); if(addson == 0) { for(size_t i = 0; i < XTu.size(); i++) { XTu[i] = 0; } } for(size_t k = 0; k < u.nnz; k++) { size_t row_idx = static_cast<size_t>(u.idx[k]); const val_type& alpha = u.val[k]; do_axpy(alpha, get_row(row_idx), XTu); } /* slower implementatoin dvec_t<val_type> du(u); this->XTu(du.data(), XTu.data(), addson); */ return XTu; } // ==================================================== // ==================== IO Methods ==================== // ==================================================== // The entry_iterator can be in arbitrary order (sort+unique is applied). void load_from_iterator(size_t _rows, size_t _cols, size_t _nnz, entry_iterator_t<val_type>* entry_it) { clear_space(); // clear any pre-allocated space in case of memory leak rows =_rows, cols=_cols, nnz=_nnz; allocate_space(rows,cols,nnz); // a trick to utilize the space that have been allocated std::vector<size_t> perm(nnz); unsigned *tmp_row_idx = col_idx; unsigned *tmp_col_idx = row_idx; val_type *tmp_val = val; for(size_t idx = 0; idx < nnz; idx++){ entry_t<val_type> rate = entry_it->next(); tmp_row_idx[idx] = rate.i; tmp_col_idx[idx] = rate.j; tmp_val[idx] = rate.v; perm[idx] = idx; } // TODO can change to O(n) method // sort entries into row-majored ordering std::sort(perm.begin(), perm.end(), SparseLess(tmp_row_idx, tmp_col_idx)); // add up the values in the same position (i, j) size_t cur_nnz = 0; for(size_t idx = 0; idx < nnz; idx++) { if(cur_nnz > 0 && tmp_row_idx[perm[idx]] == tmp_row_idx[perm[cur_nnz-1]] && tmp_col_idx[perm[idx]] == tmp_col_idx[perm[cur_nnz-1]]) tmp_val[perm[cur_nnz-1]] += tmp_val[perm[idx]]; else { tmp_row_idx[perm[cur_nnz]] = tmp_row_idx[perm[idx]]; tmp_col_idx[perm[cur_nnz]] = tmp_col_idx[perm[idx]]; tmp_val[perm[cur_nnz]] = tmp_val[perm[idx]]; cur_nnz ++; } } nnz = cur_nnz; for(size_t idx = 0; idx < nnz; idx++){ row_ptr[tmp_row_idx[perm[idx]] + 1] ++; col_ptr[tmp_col_idx[perm[idx]] + 1] ++; } // Generate CSR format for(size_t idx = 0; idx < nnz; idx++) { val_t[idx] = tmp_val[perm[idx]]; col_idx[idx] = tmp_col_idx[perm[idx]]; } // Calculate nnz for each row and col max_row_nnz = max_col_nnz = 0; for(size_t r = 1; r <= rows; r++) { max_row_nnz = std::max(max_row_nnz, row_ptr[r]); row_ptr[r] += row_ptr[r-1]; } for(size_t c = 1; c <= cols; c++) { max_col_nnz = std::max(max_col_nnz, col_ptr[c]); col_ptr[c] += col_ptr[c-1]; } // Transpose CSR into CSC matrix for(size_t r = 0; r < rows; r++){ for(size_t idx = row_ptr[r]; idx < row_ptr[r+1]; idx++){ size_t c = (size_t) col_idx[idx]; row_idx[col_ptr[c]] = r; val[col_ptr[c]++] = val_t[idx]; } } for(size_t c = cols; c > 0; c--) col_ptr[c] = col_ptr[c-1]; col_ptr[0] = 0; } void load(size_t _rows, size_t _cols, size_t _nnz, const char *filename, format_t fmt) { if(fmt == smat_t<val_type>::TXT) { file_iterator_t<val_type> entry_it(_nnz, filename); load_from_iterator(_rows, _cols, _nnz, &entry_it); } else if(fmt == smat_t<val_type>::PETSc) { load_from_PETSc(filename); } else if(fmt == smat_t<val_type>::SVMLIGHT) { load_from_svmlight(filename); } else { fprintf(stderr, "Error: filetype %d not supported\n", fmt); return; } } void load_from_svmlight(const char *filename, size_t nr_skips=1, bool zero_based=false, double append_bias=-1.0) { svmlight_file_iterator_t<val_type> entry_it(filename, nr_skips, zero_based, append_bias); load_from_iterator(entry_it.get_rows(), entry_it.get_cols(), entry_it.get_nnz(), &entry_it); } void load_from_PETSc(const char *filename) { FILE *fp = fopen(filename, "rb"); if(fp == NULL) { fprintf(stderr, "Error: can't read the file (%s)!!\n", filename); return; } load_from_PETSc(fp, filename); fclose(fp); } void load_from_PETSc(FILE *fp, const char *filename=NULL) { clear_space(); // clear any pre-allocated space in case of memory leak const int UNSIGNED_FILE = 1211216, LONG_FILE = 1015; int32_t int_buf[3]; size_t headersize = 0; headersize += sizeof(int)*fread(int_buf, sizeof(int), 3, fp); int filetype = int_buf[0]; rows = (size_t) int_buf[1]; cols = (size_t) int_buf[2]; if(filetype == UNSIGNED_FILE) { headersize += sizeof(int)*fread(int_buf, sizeof(int32_t), 1, fp); nnz = (size_t) int_buf[0]; } else if (filetype == LONG_FILE){ headersize += sizeof(size_t)*fread(&nnz, sizeof(int64_t), 1, fp); } else { fprintf(stderr, "Error: wrong PETSc format in %s.\n", filename); } allocate_space(rows,cols,nnz); // load CSR from the binary PETSc format { // read row_ptr std::vector<int32_t> nnz_row(rows); headersize += sizeof(int32_t)*fread(&nnz_row[0], sizeof(int32_t), rows, fp); row_ptr[0] = 0; for(size_t r = 1; r <= rows; r++) row_ptr[r] = row_ptr[r-1] + nnz_row[r-1]; // read col_idx headersize += sizeof(int)*fread(&col_idx[0], sizeof(unsigned), nnz, fp); // read val_t const size_t chunksize = 1024; double buf[chunksize]; size_t idx = 0; while(idx + chunksize < nnz) { headersize += sizeof(double)*fread(&buf[0], sizeof(double), chunksize, fp); for(size_t i = 0; i < chunksize; i++) val_t[idx+i] = (val_type) buf[i]; idx += chunksize; } size_t remaining = nnz - idx; headersize += sizeof(double)*fread(&buf[0], sizeof(double), remaining, fp); for(size_t i = 0; i < remaining; i++) val_t[idx+i] = (val_type) buf[i]; } csr_to_csc(); update_max_nnz(); } void save_PETSc_to_file(const char *filename) const { FILE *fp = fopen(filename, "wb"); if(fp == NULL) { fprintf(stderr,"Error: can't open file %s\n", filename); exit(1); } save_PETSc_to_file(fp); } void save_PETSc_to_file(FILE *fp) const { const int UNSIGNED_FILE = 1211216, LONG_FILE = 1015; int32_t int_buf[3] = {(int32_t)LONG_FILE, (int32_t)rows, (int32_t)cols}; std::vector<int32_t> nnz_row(rows); for(size_t r = 0; r < rows; r++) nnz_row[r] = (int)nnz_of_row(r); fwrite(&int_buf[0], sizeof(int32_t), 3, fp); fwrite(&nnz, sizeof(size_t), 1, fp); fwrite(&nnz_row[0], sizeof(int32_t), rows, fp); fwrite(&col_idx[0], sizeof(unsigned), nnz, fp); // the following part == fwrite(val_t, sizeof(double), nnz, fp); const size_t chunksize = 1024; double buf[chunksize]; size_t idx = 0; while(idx + chunksize < nnz) { for(size_t i = 0; i < chunksize; i++) buf[i] = (double) val_t[idx+i]; fwrite(&buf[0], sizeof(double), chunksize, fp); idx += chunksize; } size_t remaining = nnz - idx; for(size_t i = 0; i < remaining; i++) buf[i] = (double) val_t[idx+i]; fwrite(&buf[0], sizeof(double), remaining, fp); } val_type get_global_mean() const { val_type sum=0; for(size_t idx = 0; idx < nnz; idx++) sum += val[idx]; return sum / (val_type)nnz; } void remove_bias(val_type bias=0) { if(bias) { for(size_t idx = 0; idx < nnz; idx++) { val[idx] -= bias; val_t[idx] -= bias; } } } void print_mat(const char *str="", FILE *fp=stdout) const { fprintf(fp, "===>%s<===\n", str); fprintf(fp, "rows %lu, cols %lu, nnz %lu\n", rows, cols, nnz); fprintf(fp, "col_ptr, row_idx, val = %p, %p, %p\n", col_ptr, row_idx, val); fprintf(fp, "row_ptr, col_idx, val_t = %p, %p, %p\n", row_ptr, col_idx, val_t); fprintf(fp, "mem_alloc_by_me = %d\n", mem_alloc_by_me); fprintf(fp, "Matrix:\n"); for(size_t i = 0; i < rows; i++) { size_t it = row_ptr[i]; for(size_t j = 0; j < cols; j++) { if(it < row_ptr[i+1] && col_idx[it] == j) { fprintf(fp, "%.3f ", val_t[it]); it ++; } else fprintf(fp, "0.000 "); } fprintf(fp, "\n"); } fprintf(fp, "Matrix^T:\n"); for(size_t i = 0; i < cols; i++) { size_t it = col_ptr[i]; for(size_t j = 0; j < rows; j++) { if(it < col_ptr[i+1] && row_idx[it] == j) { fprintf(fp, "%.3f ", val[it]); it ++; } else fprintf(fp, "0.000 "); } fprintf(fp, "\n"); } } // =========================================== // ========= Friend Functions/Classes ======== // =========================================== template<typename VX, typename VY, typename VZ> friend smat_t<VZ>& smat_x_smat(const smat_t<VX> &X, const smat_t<VY> &Y, smat_t<VZ> &Z, int threads); template<typename VX, typename VY, typename VZ> friend smat_t<VZ>& smat_x_smat_single_thread(const smat_t<VX> &X, const smat_t<VY> &Y, smat_t<VZ> &Z); }; #ifdef __cplusplus extern "C" { #endif // rows, cols, nnz, &row_ptr, &col_ptr, &val_ptr typedef void(*py_coo_allocator_t)(uint64_t, uint64_t, uint64_t, void*, void*, void*); #ifdef __cplusplus } // extern #endif template<typename val_type> struct coo_t { size_t rows; size_t cols; std::vector<size_t> row_idx; std::vector<size_t> col_idx; std::vector<val_type> val; coo_t(size_t rows=0, size_t cols=0): rows(rows), cols(cols) {} size_t nnz() const { return val.size(); } void reshape(size_t rows_, size_t cols_) { rows = rows_; cols = cols_; clear(); } void clear() { row_idx.clear(); col_idx.clear(); val.clear(); } void reserve(size_t capacity) { row_idx.reserve(capacity); col_idx.reserve(capacity); val.reserve(capacity); } void swap(coo_t& other) { std::swap(rows, other.rows); std::swap(cols, other.cols); row_idx.swap(other.row_idx); col_idx.swap(other.col_idx); val.swap(other.val); } void extends(coo_t& other) { std::copy(other.row_idx.begin(), other.row_idx.end(), std::back_inserter(row_idx)); std::copy(other.col_idx.begin(), other.col_idx.end(), std::back_inserter(col_idx)); std::copy(other.val.begin(), other.val.end(), std::back_inserter(val)); } template<typename I, typename V> void push_back(I i, I j, V x, double threshold=0) { if(fabs(x) >= threshold) { row_idx.push_back(i); col_idx.push_back(j); val.push_back(x); } } void create_smat(smat_t<val_type>& X) { coo_iterator_t<val_type> it(nnz(), row_idx.data(), col_idx.data(), val.data()); X.load_from_iterator(rows, cols, nnz(), &it); } void create_pycoo(const py_coo_allocator_t& alloc) const { uint64_t* row_ptr=NULL; uint64_t* col_ptr=NULL; val_type* val_ptr=NULL; alloc(rows, cols, nnz(), &row_ptr, &col_ptr, &val_ptr); for(size_t i = 0; i < nnz(); i++) { row_ptr[i] = row_idx[i]; col_ptr[i] = col_idx[i]; val_ptr[i] = val[i]; } } }; /*-------------- Iterators -------------------*/ template<typename val_type> class entry_t{ public: unsigned i, j; val_type v, weight; entry_t(int _i=0, int _j=0, val_type _v=0, val_type _w=1.0): i(_i), j(_j), v(_v), weight(_w){} }; template<typename val_type> class entry_iterator_t { public: // Number of elements left to iterate size_t nnz; // When no next entry, return entry_t(0, 0, 0, -1); virtual entry_t<val_type> next() = 0; size_t get_nnz() const { return nnz; } }; #define MAXLINE 10240 // Iterator for files with (i,j,v) tuples template<typename val_type> class file_iterator_t: public entry_iterator_t<val_type> { public: using entry_iterator_t<val_type>::nnz; file_iterator_t(size_t nnz_, const char* filename, size_t start_pos=0) { nnz = nnz_; fp = fopen(filename,"rb"); if(fp == NULL) { fprintf(stderr, "Error: cannot read the file (%s)!!\n", filename); return; } fseek(fp, start_pos, SEEK_SET); } ~file_iterator_t(){ if (fp) fclose(fp); } entry_t<val_type> next() { const int base10 = 10; if(nnz > 0) { --nnz; if(fgets(&line[0], MAXLINE, fp)==NULL) fprintf(stderr, "Error: reading error !!\n"); char *head_ptr = &line[0]; size_t i = strtol(head_ptr, &head_ptr, base10); size_t j = strtol(head_ptr, &head_ptr, base10); double v = strtod(head_ptr, &head_ptr); return entry_t<val_type>(i - 1, j - 1, (val_type)v); } else { // No more to iterate return entry_t<val_type>(0, 0, 0, -1); } } private: FILE *fp; char line[MAXLINE]; }; template<class val_type> class svmlight_file_iterator_t : public entry_iterator_t<val_type> { public: using entry_iterator_t<val_type>::nnz; svmlight_file_iterator_t( const char* filename, size_t nr_skips=1, bool zero_based=false, double append_bias=-1.0) { std::ifstream fs; std::string line, kv; const int base10 = 10; fs.open(filename, std::ios::in); if(!fs.is_open()) { std::cout << "Unable to open" << filename << std::endl; exit(-1); } I.clear(); J.clear(); V.clear(); nr_rows = nr_cols = 0; while(std::getline(fs, line)) { if(fs.eof()) { break; } std::stringstream line_ss; line_ss.str(line); if(nr_skips != 0) { // skip label part; for(size_t i = 0; i < nr_skips; i++) { line_ss >> kv; } } size_t row_idx = nr_rows; while(line_ss >> kv) { char *head_ptr = const_cast<char*>(kv.c_str()); size_t key = strtol(head_ptr, &head_ptr, base10); head_ptr++; // advancing for the ":" seperator val_type val = static_cast<val_type>(strtod(head_ptr, &head_ptr)); size_t col_idx = (zero_based)? key : (key - 1); nr_cols = std::max(nr_cols, col_idx + 1); I.push_back(row_idx); J.push_back(col_idx); V.push_back(val); } nr_rows += 1; } if(append_bias > 0) { size_t col_idx = nr_cols; nr_cols += 1; val_type val = static_cast<val_type>(append_bias); for(size_t row_idx = 0; row_idx < nr_rows; row_idx++) { I.push_back(row_idx); J.push_back(col_idx); V.push_back(val); } } idx = 0; nnz = I.size(); } entry_t<val_type> next() { if(nnz > 0) { ++idx; --nnz; return entry_t<val_type>(I[idx - 1], J[idx - 1], V[idx - 1]); } else { return entry_t<val_type>(0, 0, 0, -1); } } size_t get_rows() const { return nr_rows; } size_t get_cols() const { return nr_cols; } private: size_t nr_rows, nr_cols; size_t idx; std::vector<size_t> I, J; std::vector<val_type> V; }; // Iterator for three vectors (I, J, V) template<typename val_type> class coo_iterator_t: public entry_iterator_t<val_type> { public: using entry_iterator_t<val_type>::nnz; coo_iterator_t(const std::vector<size_t> _I, const std::vector<size_t> _J, const std::vector<val_type> _V){ nnz = std::min(std::min(_I.size(), _J.size()), _V.size()); idx = 0; I = &_I[0]; J = &_J[0]; V = &_V[0]; } coo_iterator_t(const size_t _nnz, const size_t* _I, const size_t* _J, const val_type* _V){ nnz = _nnz; idx = 0; I = _I; J = _J; V = _V; } ~coo_iterator_t(){ } entry_t<val_type> next() { if(nnz > 0) { ++idx; --nnz; return entry_t<val_type>(I[idx - 1], J[idx - 1], V[idx - 1]); } else { return entry_t<val_type>(0, 0, 0, -1); } } private: int idx; const size_t *I, *J; const val_type *V; }; // Iterator for sparse matrix template<typename val_type> class smat_iterator_t: public entry_iterator_t<val_type> { public: using entry_iterator_t<val_type>::nnz; smat_iterator_t(const smat_t<val_type>& M, major_t major = ROWMAJOR) { nnz = M.nnz; col_idx = (major == ROWMAJOR)? M.col_idx: M.row_idx; row_ptr = (major == ROWMAJOR)? M.row_ptr: M.col_ptr; val_t = (major == ROWMAJOR)? M.val_t: M.val; rows = (major==ROWMAJOR)? M.rows: M.cols; cols = (major==ROWMAJOR)? M.cols: M.rows; cur_idx = cur_row = 0; } ~smat_iterator_t() {} entry_t<val_type> next() { if (nnz > 0) nnz--; else return entry_t<val_type>(0, 0, 0, -1); while (cur_idx >= row_ptr[cur_row+1]) cur_row++; entry_t<val_type> ret(cur_row, col_idx[cur_idx], val_t[cur_idx]); cur_idx++; return ret; } private: unsigned *col_idx; size_t *row_ptr; val_type *val_t; size_t rows, cols, cur_idx; size_t cur_row; }; // Iterator for a subset of sparse matrix template<typename val_type> class smat_subset_iterator_t: public entry_iterator_t<val_type> { public: using entry_iterator_t<val_type>::nnz; // When ROWMAJOR (COLMAJOR) is used, we sample several rows (columns) according to the order in subset_. // If remapping = true, then we are using the corresponding index (i, j) in the submatrix. // If remapping = false, then we are using the index (i, j) in the original matrix. smat_subset_iterator_t(const smat_t<val_type>& M, const unsigned *subset_, size_t size, major_t major_ = ROWMAJOR, bool remapping_=true) { major = major_; remapping = remapping_; cr_idx = (major == ROWMAJOR)? M.col_idx: M.row_idx; rc_ptr = (major == ROWMAJOR)? M.row_ptr: M.col_ptr; val_t = (major == ROWMAJOR)? M.val_t: M.val; rows = (major==ROWMAJOR)? (remapping? size: M.rows): M.rows; cols = (major==ROWMAJOR)? M.cols: (remapping? size: M.cols); subset.resize(size); nnz = 0; for(size_t i = 0; i < size; i++) { unsigned idx = subset_[i]; subset[i] = idx; nnz += (major == ROWMAJOR)? M.nnz_of_row(idx): M.nnz_of_col(idx); } cur_rc = 0; cur_idx = rc_ptr[subset[cur_rc]]; } smat_subset_iterator_t(const smat_t<val_type>& M, const std::vector<unsigned> &subset_, major_t major_ = ROWMAJOR, bool remapping_=true) { major = major_; remapping = remapping_; cr_idx = (major == ROWMAJOR)? M.col_idx: M.row_idx; rc_ptr = (major == ROWMAJOR)? M.row_ptr: M.col_ptr; val_t = (major == ROWMAJOR)? M.val_t: M.val; rows = (major==ROWMAJOR)? (remapping? subset_.size(): M.rows): M.rows; cols = (major==ROWMAJOR)? M.cols: (remapping? subset_.size(): M.cols); subset.resize(subset_.size()); nnz = 0; for(size_t i = 0; i < subset_.size(); i++) { unsigned idx = subset_[i]; subset[i] = idx; nnz += (major == ROWMAJOR)? M.nnz_of_row(idx): M.nnz_of_col(idx); } cur_rc = 0; cur_idx = rc_ptr[subset[cur_rc]]; } ~smat_subset_iterator_t() {} size_t get_rows() { return rows; } size_t get_cols() { return cols; } entry_t<val_type> next() { if (nnz > 0) nnz--; else return entry_t<val_type>(0,0,0, -1); while (cur_idx >= rc_ptr[subset[cur_rc]+1]) { cur_rc++; cur_idx = rc_ptr[subset[cur_rc]]; } entry_t<val_type> ret_rowwise(remapping? cur_rc: subset[cur_rc], cr_idx[cur_idx], val_t[cur_idx]); entry_t<val_type> ret_colwise(cr_idx[cur_idx], remapping? cur_rc: subset[cur_rc], val_t[cur_idx]); cur_idx++; return major==ROWMAJOR? ret_rowwise: ret_colwise; } private: size_t rows, cols; std::vector<unsigned>subset; unsigned *cr_idx; size_t *rc_ptr; val_type *val_t; size_t cur_rc, cur_idx; major_t major; bool remapping; }; // Iterator for a dense matrix template<typename val_type> class dmat_iterator_t: public entry_iterator_t<val_type> { public: using entry_iterator_t<val_type>::nnz; dmat_iterator_t(const dmat_t<val_type>& M, double threshold=1e-12) : M(M), rows(M.rows), cols(M.cols), threshold(fabs(threshold)) { cur_row = 0; cur_col = 0; nnz = 0; bool find_firstnz = true; for(size_t i = 0; i < rows; i++) for(size_t j = 0; j < cols; j++) if(fabs((double)M.at(i,j)) >= threshold) { if(find_firstnz) { cur_row = i; cur_col = j; find_firstnz = false; } nnz++; } } ~dmat_iterator_t() {} entry_t<val_type> next() { if (nnz > 0) nnz--; else return entry_t<val_type>(0,0,0, -1); entry_t<val_type> entry(cur_row, cur_col, M.at(cur_row, cur_col)); do { cur_col ++; if(cur_col == cols) { cur_row ++; cur_col = 0; } } while(fabs((double)M.at(cur_row, cur_col)) < threshold); return entry; } private: const dmat_t<val_type>& M; size_t rows, cols, cur_row, cur_col; double threshold; }; /*-------------- Implementation of Linear Algebra Operations --------------*/ // Lapack and Blas support #ifdef _WIN32 #define ddot_ ddot #define sdot_ sdot #define daxpy_ daxpy #define saxpy_ saxpy #define dcopy_ dcopy #define scopy_ scopy #define dgemm_ dgemm #define sgemm_ sgemm #define dposv_ dposv #define sposv_ sposv #define dgesdd_ dgesdd #define sgesdd_ sgesdd #endif extern "C" { double ddot_(ptrdiff_t *, double *, ptrdiff_t *, double *, ptrdiff_t *); float sdot_(ptrdiff_t *, float *, ptrdiff_t *, float *, ptrdiff_t *); ptrdiff_t dscal_(ptrdiff_t *, double *, double *, ptrdiff_t *); ptrdiff_t sscal_(ptrdiff_t *, float *, float *, ptrdiff_t *); ptrdiff_t daxpy_(ptrdiff_t *, double *, double *, ptrdiff_t *, double *, ptrdiff_t *); ptrdiff_t saxpy_(ptrdiff_t *, float *, float *, ptrdiff_t *, float *, ptrdiff_t *); double dcopy_(ptrdiff_t *, double *, ptrdiff_t *, double *, ptrdiff_t *); float scopy_(ptrdiff_t *, float *, ptrdiff_t *, float *, ptrdiff_t *); void dgemm_(char *transa, char *transb, ptrdiff_t *m, ptrdiff_t *n, ptrdiff_t *k, double *alpha, double *a, ptrdiff_t *lda, double *b, ptrdiff_t *ldb, double *beta, double *c, ptrdiff_t *ldc); void sgemm_(char *transa, char *transb, ptrdiff_t *m, ptrdiff_t *n, ptrdiff_t *k, float *alpha, float *a, ptrdiff_t *lda, float *b, ptrdiff_t *ldb, float *beta, float *c, ptrdiff_t *ldc); int dposv_(char *uplo, ptrdiff_t *n, ptrdiff_t *nrhs, double *a, ptrdiff_t *lda, double *b, ptrdiff_t *ldb, ptrdiff_t *info); int sposv_(char *uplo, ptrdiff_t *n, ptrdiff_t *nrhs, float *a, ptrdiff_t *lda, float *b, ptrdiff_t *ldb, ptrdiff_t *info); void dgesdd_(char* jobz, ptrdiff_t* m, ptrdiff_t* n, double* a, ptrdiff_t* lda, double* s, double* u, ptrdiff_t* ldu, double* vt, ptrdiff_t* ldvt, double* work, ptrdiff_t* lwork, ptrdiff_t* iwork, ptrdiff_t* info); void sgesdd_(char* jobz, ptrdiff_t* m, ptrdiff_t* n, float* a, ptrdiff_t* lda, float* s, float* u, ptrdiff_t* ldu, float* vt, ptrdiff_t* ldvt, float* work, ptrdiff_t* lwork, ptrdiff_t* iwork, ptrdiff_t* info); } template<typename val_type> val_type dot(ptrdiff_t *, val_type *, ptrdiff_t *, val_type *, ptrdiff_t *); template<> inline double dot(ptrdiff_t *len, double *x, ptrdiff_t *xinc, double *y, ptrdiff_t *yinc) { return ddot_(len,x,xinc,y,yinc);} template<> inline float dot(ptrdiff_t *len, float *x, ptrdiff_t *xinc, float *y, ptrdiff_t *yinc) { return sdot_(len,x,xinc,y,yinc);} template<typename val_type> val_type scal(ptrdiff_t *, val_type *, val_type *, ptrdiff_t *); template<> inline double scal(ptrdiff_t *len, double *a, double *x, ptrdiff_t *xinc) { return dscal_(len,a,x,xinc);} template<> inline float scal(ptrdiff_t *len, float *a, float *x, ptrdiff_t *xinc) { return sscal_(len,a,x,xinc);} template<typename val_type> ptrdiff_t axpy(ptrdiff_t *, val_type *, val_type *, ptrdiff_t *, val_type *, ptrdiff_t *); template<> inline ptrdiff_t axpy(ptrdiff_t *len, double *alpha, double *x, ptrdiff_t *xinc, double *y, ptrdiff_t *yinc) { return daxpy_(len,alpha,x,xinc,y,yinc);}; template<> inline ptrdiff_t axpy(ptrdiff_t *len, float *alpha, float *x, ptrdiff_t *xinc, float *y, ptrdiff_t *yinc) { return saxpy_(len,alpha,x,xinc,y,yinc);}; template<typename val_type> val_type copy(ptrdiff_t *, val_type *, ptrdiff_t *, val_type *, ptrdiff_t *); template<> inline double copy(ptrdiff_t *len, double *x, ptrdiff_t *xinc, double *y, ptrdiff_t *yinc) { return dcopy_(len,x,xinc,y,yinc);} template<> inline float copy(ptrdiff_t *len, float *x, ptrdiff_t *xinc, float *y, ptrdiff_t *yinc) { return scopy_(len,x,xinc,y,yinc);} template<typename val_type> void gemm(char *transa, char *transb, ptrdiff_t *m, ptrdiff_t *n, ptrdiff_t *k, val_type *alpha, val_type *a, ptrdiff_t *lda, val_type *b, ptrdiff_t *ldb, val_type *beta, val_type *c, ptrdiff_t *ldc); template<> inline void gemm(char *transa, char *transb, ptrdiff_t *m, ptrdiff_t *n, ptrdiff_t *k, double *alpha, double *a, ptrdiff_t *lda, double *b, ptrdiff_t *ldb, double *beta, double *c, ptrdiff_t *ldc) { dgemm_(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc); } template<> inline void gemm<float>(char *transa, char *transb, ptrdiff_t *m, ptrdiff_t *n, ptrdiff_t *k, float *alpha, float *a, ptrdiff_t *lda, float *b, ptrdiff_t *ldb, float *beta, float *c, ptrdiff_t *ldc) { sgemm_(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc); } template<typename val_type> int posv(char *uplo, ptrdiff_t *n, ptrdiff_t *nrhs, val_type *a, ptrdiff_t *lda, val_type *b, ptrdiff_t *ldb, ptrdiff_t *info); template<> inline int posv(char *uplo, ptrdiff_t *n, ptrdiff_t *nrhs, double *a, ptrdiff_t *lda, double *b, ptrdiff_t *ldb, ptrdiff_t *info) { return dposv_(uplo, n, nrhs, a, lda, b, ldb, info); } template<> inline int posv(char *uplo, ptrdiff_t *n, ptrdiff_t *nrhs, float *a, ptrdiff_t *lda, float *b, ptrdiff_t *ldb, ptrdiff_t *info) { return sposv_(uplo, n, nrhs, a, lda, b, ldb, info); } template<typename val_type> void gesdd(char* jobz, ptrdiff_t* m, ptrdiff_t* n, val_type* a, ptrdiff_t* lda, val_type* s, val_type* u, ptrdiff_t* ldu, val_type* vt, ptrdiff_t* ldvt, val_type* work, ptrdiff_t* lwork, ptrdiff_t* iwork, ptrdiff_t* info); template<> inline void gesdd(char* jobz, ptrdiff_t* m, ptrdiff_t* n, double* a, ptrdiff_t* lda, double* s, double* u, ptrdiff_t* ldu, double* vt, ptrdiff_t* ldvt, double* work, ptrdiff_t* lwork, ptrdiff_t* iwork, ptrdiff_t* info) { return dgesdd_(jobz, m, n, a, lda, s, u, ldu, vt, ldvt, work, lwork, iwork, info); } template<> inline void gesdd(char* jobz, ptrdiff_t* m, ptrdiff_t* n, float* a, ptrdiff_t* lda, float* s, float* u, ptrdiff_t* ldu, float* vt, ptrdiff_t* ldvt, float* work, ptrdiff_t* lwork, ptrdiff_t* iwork, ptrdiff_t* info) { return sgesdd_(jobz, m, n, a, lda, s, u, ldu, vt, ldvt, work, lwork, iwork, info); } // <x,y> template<typename val_type> val_type do_dot_product(const val_type *x, const val_type *y, size_t size) { val_type *xx = const_cast<val_type*>(x); val_type *yy = const_cast<val_type*>(y); ptrdiff_t inc = 1; ptrdiff_t len = (ptrdiff_t) size; return dot(&len, xx, &inc, yy, &inc); } template<typename val_type> val_type do_dot_product(const dvec_t<val_type> &x, const dvec_t<val_type> &y) { assert(x.size() == y.size()); return do_dot_product(x.data(), y.data(), x.size()); } template<typename val_type> val_type do_dot_product(const svec_t<val_type> &x, const svec_t<val_type> &y) { if(x.nnz > y.nnz) { return do_dot_product(y, x); } val_type ret = 0; size_t s = 0, t = 0; unsigned *xend = x.idx + x.nnz; unsigned *yend = y.idx + y.nnz; while(s < x.nnz && t < y.nnz) { if(x.idx[s] == y.idx[t]) { ret += x.val[s] * y.val[t]; s++; t++; } else if(x.idx[s] < y.idx[t]) { s = std::lower_bound(x.idx + s, xend, y.idx[t]) - x.idx; } else { t = std::lower_bound(y.idx + t, yend, x.idx[s]) - y.idx; } } return ret; } template<typename val_type> val_type do_dot_product_old(const svec_t<val_type> &x, const svec_t<val_type> &y) { assert(x.size() == y.size()); val_type ret = 0; for(size_t i = 0, j = 0; i < x.get_nnz() && j < y.get_nnz();) { if(x.idx[i] < y.idx[j]) { i ++; } else if(x.idx[i] > y.idx[j]) { j ++; } else { ret += x.val[i] * y.val[j]; i ++; j ++; } } return ret; } template<typename val_type> val_type do_dot_product(const sdvec_t<val_type> &x, const sdvec_t<val_type> &y) { assert(x.size() == y.size()); val_type ret = 0; for(size_t i = 0, j = 0; i < x.get_nnz() && j < y.get_nnz();) { if(x.nz_idx[i] < y.nz_idx[j]) { i++; } else if(x.nz_idx[i] < y.nz_idx[i]) { j++; } else { ret += x[x.nz_idx[i]] * y[y.nz_idx[j]]; i++; j++; } } return ret; } template<typename val_type> val_type do_dot_product(const dvec_t<val_type> &x, const svec_t<val_type> &y) { assert(x.size() == y.size()); val_type ret = 0; for(size_t i = 0; i < y.get_nnz(); i++) ret += x[y.idx[i]] * y.val[i]; return ret; } template<typename val_type> val_type do_dot_product(const svec_t<val_type> &x, const dvec_t<val_type> &y) { assert(x.size() == y.size()); return do_dot_product(y, x); } template<typename val_type> val_type do_dot_product(const dvec_t<val_type> &x, const sdvec_t<val_type> &y) { val_type ret = 0; for(size_t i = 0; i < y.get_nnz(); i++) { ret += x[y.nz_idx[i]] * y[y.nz_idx[i]]; } return ret; } template<typename val_type> val_type do_dot_product(const sdvec_t<val_type> &x, const dvec_t<val_type> &y) { return do_dot_product(y, x); } template<typename val_type> val_type do_dot_product_old(const svec_t<val_type> &x, const sdvec_t<val_type> &y) { val_type ret = 0; for(size_t i = 0, j = 0; i < x.get_nnz() && j < y.get_nnz();) { if(x.idx[i] < y.nz_idx[j]) { i++; } else if(x.idx[i] > y.nz_idx[j]) { j++; } else { ret += x.val[i] * y[y.nz_idx[j]]; i++; j++; } } return ret; } template<typename val_type> val_type do_dot_product(const sdvec_t<val_type> &x, const svec_t<val_type> &y) { return do_dot_product(y, x); } template<typename val_type> val_type do_dot_product(const gvec_t<val_type> &x, const gvec_t<val_type> &y) { assert(x.size() == y.size()); if(x.is_sparse() && y.is_sparse()) return do_dot_product(x.get_sparse(), y.get_sparse()); else if(x.is_sparse() && y.is_dense()) return do_dot_product(x.get_sparse(), y.get_dense()); else if(x.is_dense() && y.is_sparse()) return do_dot_product(x.get_dense(), y.get_sparse()); else if(x.is_dense() && y.is_dense()) return do_dot_product(x.get_dense(), y.get_dense()); else return 0; } template<typename val_type> val_type do_dot_product(const dmat_t<val_type> &x, const dmat_t<val_type> &y) { assert(x.rows == y.rows && x.cols == y.cols); if((x.is_rowmajor() && y.is_rowmajor()) || (x.is_colmajor() && y.is_colmajor())) return do_dot_product(x.data(), y.data(), x.rows*x.cols); else { val_type ret = 0.0; const dmat_t<val_type> &xx = (x.rows > x.cols) ? x : x.transpose(); const dmat_t<val_type> &yy = (y.rows > y.cols) ? y : y.transpose(); #pragma omp parallel for schedule(static) reduction(+:ret) for(size_t i = 0; i < xx.rows; i++) { double ret_local = 0.0; for(size_t j = 0; j < xx.cols; j++) ret_local += xx.at(i,j)*yy.at(i,j); ret += ret_local; } return (val_type)ret; } } template<typename val_type> val_type do_dot_product(const smat_t<val_type> &x, const smat_t<val_type> &y) { assert(x.rows == y.rows && x.cols == y.cols); val_type ret = 0.0; const smat_t<val_type> &xx = (x.rows > x.cols) ? x : x.transpose(); const smat_t<val_type> &yy = (y.rows > y.cols) ? y : y.transpose(); #pragma omp parallel for schedule(static) reduction(+:ret) for(size_t i = 0; i < xx.rows; i++) { svec_t<val_type> sv1 = xx.get_row(i); svec_t<val_type> sv2 = yy.get_row(i); val_type ret_local = do_dot_product(sv1, sv2); ret += ret_local; } return (val_type)ret; } template<typename val_type> val_type do_dot_product(const smat_t<val_type> &x, const dmat_t<val_type>&y) { assert(x.rows == y.rows && x.cols == y.cols); double ret = 0; const smat_t<val_type> &xx = (x.rows > x.cols) ? x : x.transpose(); #pragma omp parallel for schedule(static) reduction(+:ret) for(size_t i = 0; i < xx.rows; i++) { double tmp = 0; for(size_t idx = xx.row_ptr[i]; idx < xx.row_ptr[i + 1]; idx++) { tmp += xx.val[idx] * y.at(i, xx.col_idx[idx]); } ret += tmp; } return static_cast<val_type>(ret); } template<typename val_type> val_type do_dot_product(const dmat_t<val_type>&x, const smat_t<val_type> &y) { return do_dot_product(y, x); } template<typename val_type> val_type do_dot_product(const gmat_t<val_type>&x, const gmat_t<val_type> &y) { assert(x.rows == y.rows && x.cols == y.cols); if(x.is_sparse() && y.is_sparse()) return do_dot_product(x.get_sparse(), y.get_sparse()); else if(x.is_sparse() && y.is_dense()) return do_dot_product(x.get_sparse(), y.get_dense()); else if(x.is_dense() && y.is_sparse()) return do_dot_product(x.get_dense(), y.get_sparse()); else if(x.is_dense() && y.is_dense()) return do_dot_product(x.get_dense(), y.get_dense()); else return 0; } // y = alpha * x + y template<typename val_type, typename T> val_type* do_axpy(T alpha, const val_type *x, val_type *y, size_t size) { if(alpha == 0) return y; val_type alpha_ = (val_type)alpha; ptrdiff_t inc = 1; ptrdiff_t len = (ptrdiff_t) size; val_type *xx = const_cast<val_type*>(x); axpy(&len, &alpha_, xx, &inc, y, &inc); return y; } template<typename val_type, typename T> dvec_t<val_type>& do_axpy(T alpha, const dvec_t<val_type> &x, dvec_t<val_type> &y) { do_axpy(alpha, x.data(), y.data(), x.size()); return y; } template<typename val_type, typename T> dvec_t<val_type>& do_axpy(T alpha, const svec_t<val_type> &x, dvec_t<val_type> &y) { if(alpha == 0) return y; for(size_t i = 0; i < x.get_nnz(); i++) { y[x.idx[i]] += alpha * x.val[i]; } return y; } template<typename XV, typename YV, typename T> sdvec_t<YV>& do_axpy(T alpha, const svec_t<XV>& x, sdvec_t<YV> &y) { if(alpha == 0) return y; for(size_t i = 0; i < x.get_nnz(); i++) { y.add_nonzero_at(x.idx[i], alpha * x.val[i]); } return y; } template<typename XV, typename YV, typename T> sdvec_t<YV>& do_axpy(T alpha, const dvec_t<XV>& x, sdvec_t<YV> &y) { if(alpha == 0) return y; for(size_t i = 0; i < x.size(); i++) { y.add_nonzero_at(i, alpha * x[i]); } return y; } template<typename val_type, typename T> dmat_t<val_type>& do_axpy(T alpha, const dmat_t<val_type> &x, dmat_t<val_type> &y) { assert(x.rows == y.rows && x.cols == y.cols); if((x.is_rowmajor() && y.is_rowmajor()) || (x.is_colmajor() && y.is_colmajor())) do_axpy(alpha, x.data(), y.data(), x.rows*x.cols); else { if(x.rows > x.cols) { #pragma omp parallel for schedule(static) for(size_t i = 0; i < x.rows; i++) for(size_t j = 0; j < x.cols; j++) y.at(i,j) += alpha*x.at(i,j); } else { #pragma omp parallel for schedule(static) for(size_t j = 0; j < x.cols; j++) for(size_t i = 0; i < x.rows; i++) y.at(i,j) += alpha*x.at(i,j); } } return y; } // x *= alpha template<typename val_type, typename T> void do_scale(T alpha, val_type *x, size_t size) { if(alpha == 0.0) { memset(x, 0, sizeof(val_type) * size); } else if (alpha == 1.0) { return; } else { val_type alpha_minus_one = (val_type)(alpha - 1); do_axpy(alpha_minus_one, x, x, size); } } template<typename val_type, typename T> void do_scale(T alpha, dvec_t<val_type> &x) { do_scale(alpha, x.data(), x.size()); } template<typename val_type, typename T> void do_scale(T alpha, svec_t<val_type> &x) { do_scale(alpha, x.val, x.get_nnz()); } template<typename val_type, typename T> void do_scale(T alpha, gvec_t<val_type> &x) { if(x.is_sparse()) do_scale(alpha, x.get_sparse()); else if(x.is_dense()) do_scale(alpha, x.get_dense()); } template<typename val_type, typename T> void do_scale(T alpha, dmat_t<val_type> &x) { do_scale(alpha, x.data(), x.rows*x.cols); } template<typename val_type, typename T> void do_scale(T alpha, smat_t<val_type> &x) { do_scale(alpha, x.val, x.get_nnz()); do_scale(alpha, x.val_t, x.get_nnz()); } // H = a*X*W + b H0 (H0 can put H. However H don't need to be pre-allocated, but H0 do.) template<typename val_type, typename T2, typename T3> dmat_t<val_type>& dmat_x_dmat(T2 a, const dmat_t<val_type> &X, const dmat_t<val_type> &W, T3 b, const dmat_t<val_type> &H0, dmat_t<val_type> &H) { if(b == 0) assert(X.cols == W.rows); else assert(W.cols == H0.cols && X.cols == W.rows && X.rows == H0.rows); H.lazy_resize(X.rows, W.cols).assign(b, H0); return dmat_x_dmat(a, X, W, 1, H); } template<typename val_type, typename T2, typename T3> dmat_t<val_type>& smat_x_dmat(T2 a, const smat_t<val_type> &X, const dmat_t<val_type> &W, T3 b, const dmat_t<val_type> &H0, dmat_t<val_type> &H) { if(b == 0) assert(X.cols == W.rows); else assert(W.cols == H0.cols && X.cols == W.rows && X.rows == H0.rows); H.lazy_resize(X.rows, W.cols).assign(b, H0); // H += aXW if(W.is_rowmajor()) { if(H.is_rowmajor()) { smat_x_dmat(a, X, W.data(), W.cols, 1.0, H.data(), H.data()); } else { // H is col_major #pragma omp parallel for schedule(dynamic, 64) shared(X, W, H) for(size_t i = 0; i < X.rows; i++) { for(size_t idx = X.row_ptr[i]; idx != X.row_ptr[i+1]; idx++){ size_t j = X.col_idx[idx]; const val_type &Xij = X.val_t[idx]; for(size_t t = 0; t < W.cols; t++) H.at(i,t) += a*Xij*W.at(j,t); } } } } else { // W.is_colmajor if(H.is_colmajor()) { #pragma omp parallel for schedule(static) for(size_t j = 0; j < W.cols; j++) { dvec_t<val_type> Wj = W.get_col(j); dvec_t<val_type> Hj = H.get_col(j); X.Xv(Wj, Hj, true); } } else { // H.is row_major #pragma omp parallel for schedule(dynamic, 64) shared(X, W, H) for(size_t i = 0; i < X.rows; i++) { for(size_t idx = X.row_ptr[i]; idx != X.row_ptr[i+1]; idx++){ size_t j = X.col_idx[idx]; const val_type &Xij = X.val_t[idx]; for(size_t t = 0; t < W.cols; t++) H.at(i,t) += a*Xij*W.at(j,t); } } } } return H; } template<typename val_type, typename T2, typename T3> dmat_t<val_type>& gmat_x_dmat(T2 a, const gmat_t<val_type> &X, const dmat_t<val_type> &W, T3 b, const dmat_t<val_type> &H0, dmat_t<val_type> &H) { if(b == 0) assert(X.cols == W.rows); else assert(W.cols == H0.cols && X.cols == W.rows && X.rows == H0.rows); if(X.is_sparse()) smat_x_dmat(a, X.get_sparse(), W, b, H0, H); else if(X.is_dense()) dmat_x_dmat(a, X.get_dense(), W, b, H0, H); else if(X.is_identity()) { H.lazy_resize(X.rows, W.cols).assign(b, H0); do_axpy(a, W, H); } return H; } // H = a*X*W + H0 (H0 can put H. However H don't need to be pre-allocated, but H0 do) template<typename val_type, typename T2> dmat_t<val_type>& dmat_x_dmat(T2 a, const dmat_t<val_type> &X, const dmat_t<val_type> &W, const dmat_t<val_type> &H0, dmat_t<val_type> &H) { return dmat_x_dmat(a, X, W, 1.0, H0, H); } template<typename val_type, typename T2> dmat_t<val_type>& smat_x_dmat(T2 a, const smat_t<val_type> &X, const dmat_t<val_type> &W, const dmat_t<val_type> &H0, dmat_t<val_type> &H) { return smat_x_dmat(a, X, W, 1.0, H0, H); } template<typename val_type, typename T2> dmat_t<val_type>& gmat_x_dmat(T2 a, const gmat_t<val_type> &X, const dmat_t<val_type> &W, const dmat_t<val_type> &H0, dmat_t<val_type> &H) { return gmat_x_dmat(a, X, W, 1.0, H0, H); } // H = X*W (H don't need to be pre-allocated) template<typename val_type> dmat_t<val_type>& dmat_x_dmat(const dmat_t<val_type> &X, const dmat_t<val_type> &W, dmat_t<val_type> &H) { return dmat_x_dmat(1.0, X, W, 0.0, H, H); } template<typename val_type> dmat_t<val_type> operator*(const dmat_t<val_type> &X, const dmat_t<val_type> &W) { dmat_t<val_type> H(X.rows, W.cols); dmat_x_dmat(X, W, H); return H; } template<typename VX, typename VW, typename VH> smat_t<VH>& smat_x_smat_single_thread(const smat_t<VX> &X, const smat_t<VW> &W, smat_t<VH> &H) { std::vector<unsigned> row_idx; std::vector<size_t> col_ptr; std::vector<VH> val; size_t rows = X.rows, cols = W.cols; sdvec_t<VH> temp(rows); col_ptr.push_back(0); size_t total_nnz = 0; for(size_t c = 0; c < cols; c++) { const svec_t<VW>& Wc = W.get_col(c); temp.clear(); for(size_t s = 0; s < Wc.nnz; s++) { // temp += Wc[i] * Xi do_axpy(Wc.val[s], X.get_col(Wc.idx[s]), temp); } temp.update_nz_idx(); total_nnz += temp.nnz; col_ptr.push_back(total_nnz); for(size_t s = 0; s < temp.nnz; s++) { row_idx.push_back(temp.nz_idx[s]); val.push_back(temp[temp.nz_idx[s]]); } } H.allocate_space(rows, cols, total_nnz); memcpy(H.val, val.data(), sizeof(VH) * total_nnz); memcpy(H.row_idx, row_idx.data(), sizeof(unsigned) * total_nnz); memcpy(H.col_ptr, col_ptr.data(), sizeof(size_t) * (cols + 1)); H.csc_to_csr(); return H; } template<typename VX, typename VW, typename VH> smat_t<VH>& smat_x_smat(const smat_t<VX> &X, const smat_t<VW> &W, smat_t<VH> &H, int threads=-1) { struct worker_t { worker_t() {} sdvec_t<VH> temp; std::vector<unsigned> row_idx; std::vector<VH> val; size_t nnz() const { return row_idx.size(); } void set_rows(size_t rows) { temp.resize(rows); } void reserve(size_t capacity) { row_idx.reserve(capacity); val.reserve(capacity); } void push_back(unsigned idx, VH value) { row_idx.push_back(static_cast<unsigned>(idx)); val.push_back(static_cast<VH>(value)); } }; size_t rows = X.rows, cols = W.cols; if(threads == 1) { return smat_x_smat_single_thread(X, W, H); } if(rows > cols) { // maximize the parallelism smat_t<VX> Xt = X.transpose(); smat_t<VW> Wt = W.transpose(); smat_x_smat(Wt, Xt, H, threads); H.to_transpose(); return H; } if(threads < 1) { threads = omp_get_num_procs(); } threads = std::min(threads, omp_get_num_procs()); std::vector<worker_t> worker_set(threads); std::vector<size_t> col_ptr(cols + 1); size_t workload = (cols / threads) + (cols % threads != 0); #pragma omp parallel for schedule(static,1) for(int tid = 0; tid < threads; tid++) { worker_t& worker = worker_set[tid]; worker.set_rows(rows); worker.reserve(X.nnz + W.nnz); size_t c_start = tid * workload; size_t c_end = std::min((tid + 1) * workload, cols); sdvec_t<VH>& temp = worker.temp; for(size_t c = c_start; c < c_end; c++) { const svec_t<VW>& Wc = W.get_col(c); temp.clear(); for(size_t s = 0; s < Wc.nnz; s++) { // temp += Wc[i] * Xi do_axpy(Wc.val[s], X.get_col(Wc.idx[s]), temp); } temp.update_nz_idx(); col_ptr[c + 1] = temp.nnz; for(size_t s = 0; s < temp.nnz; s++) { size_t r = temp.nz_idx[s]; worker.push_back(r, temp[r]); } } } for(size_t c = 1; c <= cols; c++) { col_ptr[c] += col_ptr[c - 1]; } size_t total_nnz = col_ptr[cols]; H.allocate_space(rows, cols, total_nnz); memcpy(H.col_ptr, col_ptr.data(), sizeof(size_t) * (cols + 1)); #pragma omp parallel for schedule(static,1) for(int tid = 0; tid < threads; tid++) { size_t c_start = tid * workload; worker_t& worker = worker_set[tid]; memcpy(&H.val[col_ptr[c_start]], worker.val.data(), sizeof(VH) * worker.nnz()); memcpy(&H.row_idx[col_ptr[c_start]], worker.row_idx.data(), sizeof(unsigned) * worker.nnz()); } H.csc_to_csr(); return H; } template<typename VX, typename VW> smat_t<VX> operator*(const smat_t<VX> &X, const smat_t<VW>& W) { smat_t<VX> H; smat_x_smat(X, W, H); return H; } template<typename val_type> dmat_t<val_type>& smat_x_dmat(const smat_t<val_type> &X, const dmat_t<val_type> &W, dmat_t<val_type> &H) { return smat_x_dmat(1.0, X, W, 0.0, H, H); } template<typename val_type> dmat_t<val_type> operator*(const smat_t<val_type> &X, const dmat_t<val_type> &W) { dmat_t<val_type> H(X.rows, W.cols); smat_x_dmat(X, W, H); return H; } template<typename val_type> dmat_t<val_type> operator*(const dmat_t<val_type> &X, const smat_t<val_type> &W) { dmat_t<val_type> H(X.rows, W.cols); smat_x_dmat(X.transpose(), W.transpose(), H.transpose()); return H; } template<typename val_type> dmat_t<val_type>& gmat_x_dmat(const gmat_t<val_type> &X, const dmat_t<val_type> &W, dmat_t<val_type> &H) { return gmat_x_dmat(1.0, X, W, 0.0, H, H); } template<typename val_type> dmat_t<val_type> operator*(const gmat_t<val_type> &X, const dmat_t<val_type> &W) { dmat_t<val_type> H(X.rows, W.cols); gmat_x_dmat(X, W, H); return H; } template<typename val_type, typename I, typename V> void compute_sparse_entries_from_gmat_x_gmat( const gmat_t<val_type> &gX, const gmat_t<val_type> &gM, size_t len, const I *X_row_idx, const I *M_col_idx, V *val) { if(gX.is_sparse() && gM.is_sparse()) { const smat_t<val_type>& X = gX.get_sparse(); const smat_t<val_type>& M = gM.get_sparse(); #pragma omp parallel for schedule(dynamic,64) for(size_t idx = 0; idx < len; idx++) { const svec_t<val_type>& xi = X.get_row(X_row_idx[idx]); const svec_t<val_type>& mj = M.get_col(M_col_idx[idx]); val[idx] = static_cast<V>(do_dot_product(xi, mj)); } } else if(gX.is_sparse() && gM.is_dense()) { const smat_t<val_type>& X = gX.get_sparse(); const dmat_t<val_type>& M = gM.get_dense(); #pragma omp parallel for schedule(dynamic,64) for(size_t idx = 0; idx < len; idx++) { const svec_t<val_type>& xi = X.get_row(X_row_idx[idx]); I j = M_col_idx[idx]; double tmp = 0; for(size_t t = 0; t < xi.nnz; t++) { tmp += xi.val[t] * M.at(xi.idx[t], j); } val[idx] = tmp; } } else if(gX.is_dense() && gM.is_sparse()) { const dmat_t<val_type>& X = gX.get_dense(); const smat_t<val_type>& M = gM.get_sparse(); #pragma omp parallel for schedule(dynamic,64) for(size_t idx = 0; idx < len; idx++) { const svec_t<val_type>& mj = M.get_col(M_col_idx[idx]); I i = X_row_idx[idx]; double tmp = 0; for(size_t t = 0; t < mj.nnz; t++) { tmp += X.at(i, mj.idx[t]) * mj.val[t]; } val[idx] = tmp; } } else if(gX.is_dense() && gM.is_dense()) { const dmat_t<val_type>& X = gX.get_dense(); const dmat_t<val_type>& M = gM.get_dense(); #pragma omp parallel for schedule(static,64) for(size_t idx = 0; idx < len; idx++) { I i = X_row_idx[idx]; I j = M_col_idx[idx]; double tmp = 0; for(size_t t = 0; t < X.cols; t++) { tmp += X.at(i, t) * M.at(t, j); } val[idx] = tmp; } } } // tr(W^T X H) (W, H: dense matrix; X: sparse matrix) template<typename val_type> val_type trace_dmat_T_smat_dmat(const dmat_t<val_type> &W, const smat_t<val_type> &X, const dmat_t<val_type> &H) { assert(W.cols == H.cols && W.rows == X.rows && H.rows == X.cols); if(W.is_colmajor() && H.is_colmajor()) { double ret = 0; #pragma omp parallel for schedule(static) reduction(+:ret) for(size_t t = 0; t < W.cols; t++) { const dvec_t<val_type> u = W.get_col(t); const dvec_t<val_type> v = H.get_col(t); double local_sum = 0; for(size_t i = 0; i < X.rows; i++) { for(size_t idx = X.row_ptr[i]; idx != X.row_ptr[i+1]; idx++) local_sum += X.val_t[idx]*u[i]*v[X.col_idx[idx]]; } ret += local_sum; } return ret; } else { double ret= 0; #pragma omp parallel for schedule(dynamic,64) reduction(+:ret) for(size_t i = 0; i < X.rows; i++) { double local_sum = 0; for(size_t idx = X.row_ptr[i]; idx != X.row_ptr[i+1]; idx++) { size_t j = X.col_idx[idx]; double sum = 0; for(size_t t = 0; t < W.cols; t++) sum += W.at(i,t)*H.at(j,t); local_sum += sum * X.val_t[idx]; } ret += local_sum; } return ret; } } // tr(W^T diag(D) H) (W, H: dense matrix; D: dense vector) template<typename val_type> val_type trace_dmat_T_diag_dmat(const dmat_t<val_type> &W, const dvec_t<val_type> &D, const dmat_t<val_type> &H) { assert(W.rows == H.rows && W.rows == D.len && W.cols == H.cols); assert(W.is_rowmajor() && H.is_rowmajor()); return trace_dmat_T_diag_dmat(W.data(),D.data(),H.data(),W.rows,W.cols); } // -------------- Implementation of Linear Algebra Solvers -------------- // Solve Ax = b, A is symmetric positive definite, b is overwritten with the result x // A will be modifed by internal Lapack. Make copy when necessary template<typename val_type> bool ls_solve_chol(val_type *A, val_type *b, size_t n) { ptrdiff_t nn=n, lda=n, ldb=n, nrhs=1, info=0; char uplo = 'U'; posv(&uplo, &nn, &nrhs, A, &lda, b, &ldb, &info); return (info == 0); } // Solve AX = B, A is symmetric positive definite, B is overwritten with the result X // A is a m-by-m matrix, while B is a m-by-n matrix stored in col_major // A will be modified by internal Lapack. Make copy when necessary template<typename val_type> bool ls_solve_chol_matrix_colmajor(val_type *A, val_type *B, size_t m, size_t n = size_t(0)) { ptrdiff_t mm=m, lda=m, ldb=m, nrhs=n, info=0; char uplo = 'U'; posv(&uplo, &mm, &nrhs, A, &lda, B, &ldb, &info); return (info == 0); } // Solve AX = B, A is symmetric positive definite, return X template<typename val_type> dmat_t<val_type> ls_solve_chol(const dmat_t<val_type>& A, const dmat_t<val_type>& B, bool A_as_workspace) { dmat_t<val_type> X(B); X.grow_body().to_colmajor(); dmat_t<val_type> AA(A); if(A_as_workspace == false) AA.grow_body(); if(ls_solve_chol_matrix_colmajor(AA.data(), X.data(), AA.rows, X.cols) == false) fprintf(stderr, "error when applying ls_solve_cho_matrix_colmajor"); return X; } // Solve Ax = b, A is symmetric positive definite, return x template<typename val_type> dvec_t<val_type> ls_solve_chol(const dmat_t<val_type>& A, const dvec_t<val_type>& b, bool A_as_workspace) { dvec_t<val_type> x(b); x.grow_body(); dmat_t<val_type> AA(A); if(A_as_workspace == false) AA.grow_body(); if(ls_solve_chol(AA.data(), x.data(), AA.rows) == false) fprintf(stderr, "error when applying ls_solve_chol"); return x; } // SVD: A = USV' // U, S, V don't necessarily need to be pre-allocated template<typename val_type> class svd_solver_t { private: char jobz; ptrdiff_t mm, nn, min_mn, max_mn, lda, ldu, ldvt, lwork1, lwork2, lwork, info; std::vector<val_type> u_buf, v_buf, s_buf, work; std::vector<ptrdiff_t> iwork; size_t k; void prepare_parameter(const dmat_t<val_type>& A, dmat_t<val_type>& U, dvec_t<val_type>& S, dmat_t<val_type>& V, bool reduced) { k = std::min(A.rows, A.cols); mm = (ptrdiff_t)A.rows; nn = (ptrdiff_t)A.cols; min_mn = std::min(mm,nn); max_mn = std::max(mm,nn); lda = mm; ldu = mm; ldvt = reduced? min_mn : nn; lwork1 = 3*min_mn*min_mn + std::max(max_mn, 4*min_mn*min_mn + 4*min_mn); lwork2 = 3*min_mn + std::max(max_mn, 4*min_mn*min_mn + 3*min_mn + max_mn); lwork = 2 * std::max(lwork1, lwork2); // due to differences between lapack 3.1 and 3.4 info = 0; work.resize(lwork); iwork.resize((size_t)(8*min_mn)); if(!S.is_view() || S.size() != k) S.resize(k); if(reduced) { jobz = 'S'; U.lazy_resize(A.rows, k, COLMAJOR); V.lazy_resize(A.cols, k, ROWMAJOR); } else { jobz = 'A'; U.lazy_resize(A.rows, A.rows, COLMAJOR); V.lazy_resize(A.cols, A.cols, ROWMAJOR); } } public: svd_solver_t() {} bool solve(const dmat_t<val_type>& A, dmat_t<val_type>& U, dvec_t<val_type>& S, dmat_t<val_type>& V, bool reduced=true, bool A_as_workspace=false) { if(A.is_rowmajor()) return solve(A.transpose(), V, S, U, reduced, A_as_workspace); else { dmat_t<val_type> AA(A.get_view()); if(A_as_workspace == false) AA.grow_body(); prepare_parameter(AA, U, S, V, reduced); #if defined(CPP11) gesdd(&jobz, &mm, &nn, AA.data(), &lda, S.data(), U.data(), &ldu, V.data(), &ldvt, work.data(), &lwork, iwork.data(), &info); #else gesdd(&jobz, &mm, &nn, AA.data(), &lda, S.data(), U.data(), &ldu, V.data(), &ldvt, &work[0], &lwork, &iwork[0], &info); #endif return (info == 0); } } }; template<typename val_type> void svd(const dmat_t<val_type>& A, dmat_t<val_type>& U, dvec_t<val_type>& S, dmat_t<val_type>& V, bool reduced, bool A_as_workspace) { svd_solver_t<val_type> solver; solver.solve(A, U, S, V, reduced, A_as_workspace); } // -------------- Implementation of Miscellaneous Functions -------------- // y = x for pointer to array template<typename val_type> void do_copy(const val_type *x, val_type *y, size_t size) { if(x == y) return; ptrdiff_t inc = 1; ptrdiff_t len = (ptrdiff_t) size; val_type *xx = const_cast<val_type*>(x); copy(&len, xx, &inc, y, &inc); } // H = a*X*W + b H0 // X is an m*n // W is an n*k, row-majored array // H is an m*k, row-majored array template<typename val_type, typename T2, typename T3> void smat_x_dmat(T2 a, const smat_t<val_type> &X, const val_type *W, const size_t k, T3 b, const val_type *H0, val_type *H) { size_t m = X.rows; val_type aa = (val_type) a; val_type bb = (val_type) b; if(a == T2(0)) { if(bb == (val_type)0.0){ memset(H, 0, sizeof(val_type)*m*k); return ; } else { if(H!=H0) { do_copy(H0, H, m*k); //memcpy(H, H0, sizeof(val_type)*m*k); } do_scale(bb, H, m*k); } return; } #pragma omp parallel for schedule(dynamic,64) shared(X, W, H, H0, aa,bb) for(size_t i = 0; i < m; i++) { val_type *Hi = &H[k*i]; if(bb == (val_type)0.0) memset(Hi, 0, sizeof(val_type)*k); else { if(Hi!=&H0[k*i]) do_copy(&H0[k*i], Hi, k); do_scale(bb, Hi, k); } for(size_t idx = X.row_ptr[i]; idx < X.row_ptr[i+1]; idx++) { const val_type Xij = X.val_t[idx]; const val_type *Wj = &W[X.col_idx[idx]*k]; for(size_t t = 0; t < k; t++) Hi[t] += aa*Xij*Wj[t]; } } } template<typename val_type, typename T2> void smat_x_dmat(T2 a, const smat_t<val_type> &X, const val_type* W, const size_t k, const val_type *H0, val_type *H) { smat_x_dmat(a, X, W, k, 1.0, H0, H); } // C = alpha*A*B + beta*C // C : m * n, k is the dimension of the middle // (1) A, B, C are stored in column major! template<typename val_type, typename T1, typename T2> void dmat_x_dmat_colmajor(T1 alpha, const val_type *A, bool trans_A, const val_type *B, bool trans_B, T2 beta, val_type *C, size_t m, size_t n, size_t k) { ptrdiff_t mm = (ptrdiff_t)m, nn = (ptrdiff_t)n, kk = (ptrdiff_t)k; ptrdiff_t lda = trans_A? kk:mm, ldb = trans_B? nn:kk, ldc = mm; char transpose = 'T', notranspose = 'N'; char *transa = trans_A? &transpose: &notranspose; char *transb = trans_B? &transpose: &notranspose; val_type alpha_ = (val_type) alpha; val_type beta_ = (val_type) beta; val_type *AA = const_cast<val_type*>(A); val_type *BB = const_cast<val_type*>(B); gemm(transa, transb, &mm, &nn, &kk, &alpha_, AA, &lda, BB, &ldb, &beta_, C, &ldc); } // (2) A, B, C are stored in row major! template<typename val_type, typename T1, typename T2> void dmat_x_dmat(T1 alpha, const val_type *A, bool trans_A, const val_type *B, bool trans_B, T2 beta, val_type *C, size_t m, size_t n, size_t k) { dmat_x_dmat_colmajor(alpha, B, trans_B, A, trans_A, beta, C, n, m, k); } // C = alpha*A*B + beta*C template<typename val_type, typename T1, typename T2> dmat_t<val_type>& dmat_x_dmat(T1 alpha, const dmat_t<val_type>& A, const dmat_t<val_type>& B, T2 beta, dmat_t<val_type>& C) { assert(A.cols == B.rows); C.lazy_resize(A.rows, B.cols); if (C.is_rowmajor()) { bool trans_A = A.is_rowmajor()? false : true; bool trans_B = B.is_rowmajor()? false : true; dmat_x_dmat(alpha, A.data(), trans_A, B.data(), trans_B, beta, C.data(), C.rows, C.cols, A.cols); } else { bool trans_A = A.is_colmajor()? false : true; bool trans_B = B.is_colmajor()? false : true; dmat_x_dmat_colmajor(alpha, A.data(), trans_A, B.data(), trans_B, beta, C.data(), C.rows, C.cols, A.cols); } return C; } // C = A'*B // C : m*n, k is the dimension of the middle // A, B, C are stored in row major! template<typename val_type> void dmat_trans_x_dmat(const val_type *A, const val_type *B, val_type *C, size_t m, size_t n, size_t k) { bool transpose = true; dmat_x_dmat(val_type(1.0), A, transpose, B, !transpose, val_type(0.0), C, m, n, k); } // C=A*B // A, B, C are stored in row major! template<typename val_type> void dmat_x_dmat(const val_type *A, const val_type *B, val_type *C, size_t m, size_t n, size_t k) { bool trans = true; dmat_x_dmat(val_type(1.0), A, !trans, B, !trans, val_type(0.0), C, m, n, k); } // Input: an n*k row-major matrix H // Output: an k*k matrix H^TH template<typename val_type> void doHTH(const val_type *H, val_type *HTH, size_t n, size_t k) { bool transpose = true; dmat_x_dmat_colmajor(val_type(1.0), H, !transpose, H, transpose, val_type(0.0), HTH, k, k, n); } /* trace(W^T X H) X is an m*n, sparse matrix W is an m*k, row-majored array H is an n*k, row-major */ template<typename val_type> val_type trace_dmat_T_smat_dmat(const val_type *W, const smat_t<val_type> &X, const val_type *H, const size_t k) { size_t m = X.rows; double ret = 0; #pragma omp parallel for schedule(dynamic,50) shared(X,H,W) reduction(+:ret) for(size_t i = 0; i < m; i++) { const val_type *Wi = &W[k*i]; for(long idx = X.row_ptr[i]; idx < X.row_ptr[i+1]; idx++) { const val_type *Hj = &H[X.col_idx[idx]*k]; double tmp=0; for(size_t t = 0; t < k; t++) tmp += Wi[t]*Hj[t]; ret += X.val_t[idx]*tmp; } } return (val_type)ret; } /* trace(W^T diag(D) H) D is an m*1 vector W is an m*k, row-majored array H is an m*k, row-major array */ template<typename val_type> val_type trace_dmat_T_diag_dmat(const val_type *W, const val_type *D, const val_type *H, const size_t m, const size_t k) { val_type *w = const_cast<val_type*>(W); val_type *h = const_cast<val_type*>(H); val_type *d = const_cast<val_type*>(D); double ret = 0.0; #pragma omp parallel for schedule(static) shared(w,h,d) reduction(+:ret) for(size_t i = 0; i < m; i++) { val_type *wi = &w[i*k], *hi = &h[i*k]; ret += do_dot_product(wi, wi, k) * d[i]; } return (val_type)ret; } template<typename val_type> val_type trace_dmat_T_diag_dmat(const dmat_t<val_type> &W, const dmat_t<val_type> &D, const dmat_t<val_type> &H) { return trace_dmat_T_diag_dmat(W, dvec_t<val_type>(D.get_view()), H); } //------------------ Implementation of zip_it ----------------------- // helpler functions and classes for zip_it template<class T1, class T2> struct zip_body { T1 x; T2 y; zip_body(const zip_ref<T1,T2>& other): x(*other.x), y(*other.y){} bool operator<(const zip_body &other) const {return x < other.x;} bool operator>(zip_body &other) const {return x > other.x;} bool operator==(zip_body &other) const {return x == other.x;} bool operator!=(zip_body &other) const {return x != other.x;} }; template<class T1, class T2> struct zip_ref { T1 *x; T2 *y; zip_ref(T1 &x, T2 &y): x(&x), y(&y){} zip_ref(zip_body<T1,T2>& other): x(&other.x), y(&other.y){} bool operator<(zip_ref other) const {return *x < *other.x;} bool operator>(zip_ref other) const {return *x > *other.x;} bool operator==(zip_ref other) const {return *x == *other.x;} bool operator!=(zip_ref other) const {return *x != *other.x;} zip_ref& operator=(zip_ref& other) { *x = *other.x; *y = *other.y; return *(this); } zip_ref& operator=(zip_body<T1,T2> other) { *x = other.x; *y = other.y; return *(this); } }; template<class T1, class T2> void swap(zip_ref<T1,T2> a, zip_ref<T1,T2> b) { std::swap(*(a.x),*(b.x)); std::swap(*(a.y),*(b.y)); } template<class IterT1, class IterT2> struct zip_it { typedef std::random_access_iterator_tag iterator_category; typedef typename std::iterator_traits<IterT1>::value_type T1; typedef typename std::iterator_traits<IterT2>::value_type T2; typedef zip_body<T1,T2> value_type; typedef zip_ref<T1,T2> reference; typedef zip_body<T1,T2>* pointer; typedef ptrdiff_t difference_type; IterT1 x; IterT2 y; zip_it(IterT1 x, IterT2 y): x(x), y(y){} reference operator*() {return reference(*x, *y);} reference operator[](const difference_type n) const {return reference(x[n],y[n]);} zip_it& operator++() {++x; ++y; return *this;} // prefix ++ zip_it& operator--() {--x; --y; return *this;} // prefix -- zip_it operator++(int) {return zip_it(x++,y++);} // sufix ++ zip_it operator--(int) {return zip_it(x--,y--);} // sufix -- zip_it operator+(const difference_type n) {return zip_it(x+n,y+n);} zip_it operator-(const difference_type n) {return zip_it(x-n,y-n);} zip_it& operator+=(const difference_type n) {x+=n; y+=n; return *this;} zip_it& operator-=(const difference_type n) {x-=n; y-=n; return *this;} bool operator<(const zip_it& other) {return x<other.x;} bool operator>(const zip_it& other) {return x>other.x;} bool operator==(const zip_it& other) {return x==other.x;} bool operator!=(const zip_it& other) {return x!=other.x;} difference_type operator-(const zip_it& other) {return x-other.x;} }; template<class IterT1, class IterT2> zip_it<IterT1, IterT2> zip_iter(IterT1 x, IterT2 y) { return zip_it<IterT1,IterT2>(x,y); } // ---------------- Implementation of string split utility -------------- // split utility template<typename Out> void split(const std::string &s, char delim, Out result) { std::stringstream ss; ss.str(s); std::string item; while (std::getline(ss, item, delim)) { *(result++) = item; } } /* std::vector<std::string> split(const std::string &s, char delim) { std::vector<std::string> elems; split(s, delim, std::back_inserter(elems)); return elems; } std::vector<std::string>& split(const std::string &s, char delim, std::vector<std::string>& elems) { elems.clear(); split(s, delim, std::back_inserter(elems)); return elems; } */ #undef coo_t #undef gmat_t #undef eye_t #undef smat_t #undef dmat_t #undef gvec_t #undef sdvec_t #undef svec_t #undef dvec_t // C Interface extern "C" { enum { DENSE_ROWMAJOR = 1, DENSE_COLMAJOR = 2, SPARSE = 3, EYE = 4 }; typedef struct { uint64_t rows, cols, nnz; size_t* row_ptr; size_t* col_ptr; uint32_t* row_idx; uint32_t* col_idx; void* val; void* val_t; int32_t type; } PyMatrix; } // end of extern "C" template<typename val_type> class general_matrix_wrapper { public: typedef sparse_vector<val_type> svec_t; typedef dense_vector<val_type> dvec_t; typedef sparse_dense_vector<val_type> sdvec_t; typedef general_vector<val_type> gvec_t; typedef sparse_matrix<val_type> smat_t; typedef dense_matrix<val_type> dmat_t; typedef identity_matrix<val_type> eye_t; typedef general_matrix<val_type> gmat_t; typedef coo_matrix<val_type> coo_t; general_matrix_wrapper() {} general_matrix_wrapper(const PyMatrix* py_mat_ptr) { if(py_mat_ptr->type == DENSE_ROWMAJOR) { dense = dmat_t(py_mat_ptr->rows, py_mat_ptr->cols, ROWMAJOR, static_cast<val_type*>(py_mat_ptr->val)); gmat_ptr = &dense; } else if(py_mat_ptr->type == DENSE_COLMAJOR) { dense = dmat_t(py_mat_ptr->rows, py_mat_ptr->cols, COLMAJOR, static_cast<val_type*>(py_mat_ptr->val)); gmat_ptr = &dense; } else if(py_mat_ptr->type == SPARSE) { sparse = smat_t( py_mat_ptr->rows, py_mat_ptr->cols, py_mat_ptr->nnz, static_cast<val_type*>(py_mat_ptr->val), static_cast<val_type*>(py_mat_ptr->val_t), py_mat_ptr->col_ptr, py_mat_ptr->row_ptr, py_mat_ptr->row_idx, py_mat_ptr->col_idx); gmat_ptr = &sparse; } } size_t rows() const { return gmat_ptr->rows; } size_t cols() const { return gmat_ptr->cols; } gmat_t& get_gmat() { return *gmat_ptr; } const gmat_t& get_gmat() const { return *gmat_ptr; } bool is_sparse() const { return gmat_ptr->is_sparse(); } bool is_dense() const { return gmat_ptr->is_dense(); } bool is_identity() const { return gmat_ptr->is_identity(); } smat_t& get_sparse() { return gmat_ptr->get_sparse(); } const smat_t& get_sparse() const { return gmat_ptr->get_sparse(); } dmat_t& get_dense() { return gmat_ptr->get_dense(); } const dmat_t& get_dense() const { return gmat_ptr->get_dense(); } general_matrix_wrapper<val_type> transpose() const { general_matrix_wrapper gmw; gmw.dense = this->dense.transpose(); if(is_sparse()) { gmw.sparse = this->sparse.transpose(); gmw.gmat_ptr = &gmw.sparse; } else if(is_dense()) { gmw.dense = this->dense.transpose(); gmw.gmat_ptr = &gmw.dense; } else if(is_identity()) { gmw.eye = this->eye; gmw.gmat_ptr = &gmw.eye; } return gmw; } private: smat_t sparse; dmat_t dense; eye_t eye; gmat_t* gmat_ptr; }; #endif // RF_MATRIX_H
sum-mp-noshare.c
#include <omp.h> #define N 100000000 #define NTHREADS 8 int values[N]; int main(int argc, char *argv[]) { int tid; static int sum[NTHREADS]; omp_set_num_threads(NTHREADS); #pragma omp parallel private(tid) { int local_sum = 0; tid = omp_get_thread_num(); for (int i = 0; i < N; i++) local_sum += values[i] >> tid; sum[tid] = local_sum; } }
labelManager.h
#pragma once #include<string> #include<unordered_map> #include<vector> #include<list> #include<fstream> #include<sstream> #include<iostream> // needed for posix io #include<cstdio> #include <sys/types.h> #include <sys/stat.h> #include<omp.h> #include"util.h" using std::string; using std::unordered_map; using std::pair; using std::vector; using std::list; using std::fstream; using std::ios; using std::stringstream; class LabelManager { public: LabelManager(const string& lblPath){ fastLoadLabels(lblPath); }; nodeIdx operator[](const string& lbl) { cache(lbl); return lbl2idxCache[lbl]; } string operator[](nodeIdx idx) const{ return idx2lbl[idx]; } bool contains(const string& lbl) { try{ cache(lbl); // either works or throws exception return true; } catch(std::runtime_error){ return false; } } bool contains(nodeIdx idx) const{ return idx < idx2lbl.size(); } private: unordered_map<string, nodeIdx> lbl2idxCache; vector<string> idx2lbl; void cache(const string& lbl){ if(lbl2idxCache.find(lbl) == lbl2idxCache.end()){ bool found = false; #pragma omp parallel for shared(found) for(size_t i = 0; i < idx2lbl.size(); ++i){ if(found) continue; if(idx2lbl[i] == lbl){ lbl2idxCache[lbl] = i; found = true; } } if(!found){ throw std::runtime_error("Failed to find " + lbl); } } } void cacheAll(){ for(size_t i = 0; i < idx2lbl.size(); ++i){ lbl2idxCache[idx2lbl[i]] = i; } } void fastLoadLabels(const string& filePath){ //get properties of abstract path struct stat st; stat(filePath.c_str(), &st); size_t totalFileSize = st.st_size; vector<size_t> fileStarts; vector<size_t> labelStarts; #pragma omp parallel { unsigned int tid = omp_get_thread_num(); unsigned int totalThreadNum = omp_get_num_threads(); size_t bytesPerThread = totalFileSize / totalThreadNum; #pragma omp single { fileStarts = vector<size_t>(totalThreadNum + 1, 0); fileStarts[totalThreadNum] = totalFileSize; labelStarts = vector<size_t>(totalThreadNum+1, 0); } #pragma omp barrier // each thread puts its start position fstream localFile(filePath, ios::in | ios::binary); localFile.seekg(tid * bytesPerThread); string localLine; if(tid > 0){ // jump to next newline getline(localFile, localLine); } fileStarts[tid] = localFile.tellg(); #pragma omp barrier vector<string> localLabels; localLabels.reserve(fileStarts[tid+1] - fileStarts[tid] + 10); // while we are still inside our own section size_t numLines = 0; while(localFile.tellg() < fileStarts[tid+1] && localFile){ getline(localFile, localLine); numLines += 1; localLabels.emplace_back(move(localLine)); } localFile.close(); labelStarts[tid+1] = localLabels.size(); #pragma omp barrier #pragma omp single { // prefix sum for(size_t i = 1; i < labelStarts.size(); ++i){ labelStarts[i] += labelStarts[i-1]; } // create idx2lbl = vector<string>(labelStarts[totalThreadNum]); } #pragma omp barrier size_t start = labelStarts[tid]; size_t end = labelStarts[tid+1]; for(size_t i = start; i < end; ++i){ idx2lbl[i] = move(localLabels[i-start]); } } } };
dlthread_pool.c
/** * @file dlthread_pool.c * @brief A custom thread pool. * @author Dominique LaSalle <lasalle@cs.umn.edu> * Copyright (c) 2014-2015, Dominique LaSalle * @version 1 * @date 2015-01-17 */ #ifndef DLTHREAD_POOL_C #define DLTHREAD_POOL_C #include "dlthread_pool.h" #include "dlenv.h" #include <pthread.h> #include <omp.h> /****************************************************************************** * TYPES *********************************************************************** ******************************************************************************/ typedef enum task_state_t { TASK_STATE_WAITING, TASK_STATE_RUNNING, TASK_STATE_FINISHED } task_state_t; typedef struct task_t { void (* func)(void * ptr); void * var; int state; } task_t; typedef struct taskq_t { size_t maxtasks; size_t offset; size_t first; size_t last; task_t * tasks; pthread_mutex_t lock; } taskq_t; typedef struct dlthread_pool_t { size_t nthreads; size_t nwaiting; int schedule; taskq_t * queues; pthread_mutex_t lock; pthread_cond_t cond; } dlthread_pool_t; typedef enum task_bit_t { LOCAL_FIRST = DLTHREAD_POOL_TS_LFRF & DLTHREAD_POOL_TS_LFRL, REMOTE_FIRST = DLTHREAD_POOL_TS_LLRF & DLTHREAD_POOL_TS_LFRF } task_bit_t; /****************************************************************************** * STRINGS ********************************************************************* ******************************************************************************/ #define DLTHREAD_POOL_STR_TS_LFRF "lfrf" #define DLTHREAD_POOL_STR_TS_LFRL "lfrl" #define DLTHREAD_POOL_STR_TS_LLRF "llrf" #define DLTHREAD_POOL_STR_TS_LLRL "llrl" #define DLTHREAD_POOL_STR_SCHEDULE "DLTHREAD_POOL_SCHEDULE" /****************************************************************************** * CONSTANTS ******************************************************************* ******************************************************************************/ static char const * trans_table_schedule[] = { [DLTHREAD_POOL_TS_LFRF] = DLTHREAD_POOL_STR_TS_LFRF, [DLTHREAD_POOL_TS_LFRL] = DLTHREAD_POOL_STR_TS_LFRL, [DLTHREAD_POOL_TS_LLRF] = DLTHREAD_POOL_STR_TS_LLRF, [DLTHREAD_POOL_TS_LLRL] = DLTHREAD_POOL_STR_TS_LLRL }; /****************************************************************************** * GLOBAL VARIABLES ************************************************************ ******************************************************************************/ volatile int pool_alive = 0; dlthread_pool_t pool; /****************************************************************************** * CONSTANTS ******************************************************************* ******************************************************************************/ static size_t const DEFAULT_MAXTASKS = 4096; static size_t const NULL_TASK = (size_t)-1; /****************************************************************************** * PRIVATE FUNCTIONS *********************************************************** ******************************************************************************/ static void __expand_pool(void) { size_t i, nactive; taskq_t * q; task_t * tasks; size_t const myid = omp_get_thread_num(); q = pool.queues+myid; /* calculate the new offset */ for (i=0;i<q->last;++i) { if (q->tasks[i].state != TASK_STATE_FINISHED) { break; } } q->offset += i; q->first = 0; q->maxtasks *= 2; tasks = malloc(q->maxtasks*sizeof(task_t)); nactive = q->last - i; memcpy(tasks,q->tasks+i,sizeof(task_t)*nactive); q->last = nactive; dl_free(q->tasks); q->tasks = tasks; } static size_t __get_task_back( taskq_t * const q) { size_t tid; task_t * task; if (q->last > q->first) { /* perform a local task */ --q->last; task = q->tasks+q->last; DL_ASSERT_EQUALS(task->state,TASK_STATE_WAITING,"%d"); task->state = TASK_STATE_RUNNING; /* calculate the task id */ tid = q->last + q->offset; } else { tid = NULL_TASK; } return tid; } static size_t __get_task_front( taskq_t * const q) { size_t tid; task_t * task; if (q->last > q->first) { task = q->tasks+q->first; /* calculate the task id */ tid = q->first + q->offset; ++q->first; DL_ASSERT_EQUALS(task->state,TASK_STATE_WAITING,"%d"); task->state = TASK_STATE_RUNNING; } else { tid = NULL_TASK; } return tid; } static int __perform_task(void) { size_t t, tid; taskq_t * q; task_t * task; task_t ltask; size_t const myid = omp_get_thread_num(); size_t const nthreads = pool.nthreads; q = pool.queues+myid; /* check my pool for tasks */ pthread_mutex_lock(&q->lock); if (pool.schedule & LOCAL_FIRST) { tid = __get_task_front(q); } else { tid = __get_task_back(q); } if (tid != NULL_TASK) { /* for debugging */ t = myid; } else { /* perform a remote task */ /* unlock my queue */ pthread_mutex_unlock(&q->lock); /* this currently searches for a task in rr order, but it would be best to * do it in hypercube order */ for (t=(myid+1)%nthreads;t!=myid;t=(t+1)%nthreads) { q = pool.queues+t; pthread_mutex_lock(&q->lock); if (pool.schedule & REMOTE_FIRST) { tid = __get_task_front(q); } else { tid = __get_task_back(q); } if (tid != NULL_TASK) { /* the lock will be released below the loop */ break; } /* release the current lock */ pthread_mutex_unlock(&q->lock); } if (t == myid) { /* there is no work to be done */ return 0; } } /* the location of the task may change, so make a copy */ ltask = q->tasks[tid-q->offset]; pthread_mutex_unlock(&q->lock); /* the location of the task may change */ /* perform task */ ltask.func(ltask.var); pthread_mutex_lock(&q->lock); /* change the state of the task */ task = q->tasks+(tid-q->offset); task->state = TASK_STATE_FINISHED; pthread_mutex_unlock(&q->lock); return 1; } /****************************************************************************** * PUBLIC FUNCTIONS ************************************************************ ******************************************************************************/ void dlthread_pool_init( size_t const nthreads) { int i; size_t t; taskq_t * q; const char * str; #pragma omp single { pool.nthreads = nthreads; pool.nwaiting = 0; pthread_mutex_init(&pool.lock,NULL); pthread_cond_init(&pool.cond,NULL); pool.queues = malloc(nthreads*sizeof(taskq_t)); /* figure out the schedule to use */ str = dl_get_env_string(DLTHREAD_POOL_STR_SCHEDULE, \ DLTHREAD_POOL_STR_TS_LLRF); for (i=0;i<__DLTHREAD_POOL_TS_TERM;++i) { if (strcmp(trans_table_schedule[i],str) == 0) { break; } } if (i == __DLTHREAD_POOL_TS_TERM) { wprintf("Invalid schedule '%s', using default\n",str); i = DLTHREAD_POOL_TS_LLRF; } pool.schedule = i; for (t=0;t<nthreads;++t) { q = pool.queues+t; q->first = 0; q->last = 0; q->offset = 0; q->maxtasks = DEFAULT_MAXTASKS; q->tasks = malloc(q->maxtasks*sizeof(task_t)); pthread_mutex_init(&q->lock,NULL); } pool_alive = 1; } while (pool_alive == 0) { /* do nothing */ } } void dlthread_pool_finalize(void) { int work; size_t t; taskq_t * q; size_t const nthreads = pool.nthreads; do { work = __perform_task(); pthread_mutex_lock(&pool.lock); if (!work) { ++pool.nwaiting; if (pool.nwaiting < pool.nthreads) { pthread_cond_wait(&pool.cond,&pool.lock); } else { pthread_cond_broadcast(&pool.cond); } if (pool.nwaiting < pool.nthreads) { --pool.nwaiting; } } pthread_mutex_unlock(&pool.lock); } while(pool.nwaiting < pool.nthreads); #pragma omp barrier #pragma omp master { for (t=0;t<nthreads;++t) { q = pool.queues+t; dl_free(q->tasks); pthread_mutex_destroy(&q->lock); } dl_free(pool.queues); pthread_mutex_destroy(&pool.lock); pthread_cond_destroy(&pool.cond); pool_alive = 0; } #pragma omp barrier } void dlthread_pool_set_schedule( int schedule) { pool.schedule = schedule; } size_t dlthread_pool_add( void (* func)(void*ptr), void * var) { size_t id; task_t task; taskq_t * q; size_t const myid = omp_get_thread_num(); DL_ASSERT(myid < pool.nthreads,"Invalid thread id of %zu/%zu\n",myid, \ pool.nthreads); q = pool.queues+myid; pthread_mutex_lock(&q->lock); if (q->last == q->maxtasks) { __expand_pool(); } id = q->last + q->offset; task.func = func; task.var = var; task.state = TASK_STATE_WAITING; q->tasks[q->last++] = task; pthread_mutex_unlock(&q->lock); pthread_cond_signal(&pool.cond); return id; } void dlthread_pool_wait( size_t tid) { int finished; taskq_t * q; size_t const myid = omp_get_thread_num(); finished = 0; q = pool.queues+myid; do { if (tid < q->offset) { /* the task has been pushed out the end as finished */ break; } pthread_mutex_lock(&q->lock); if (q->tasks[tid-q->offset].state == TASK_STATE_FINISHED) { finished = 1; } pthread_mutex_unlock(&q->lock); if (finished) { break; } /* tackle a task while we wait for ours */ __perform_task(); } while(1); } #endif
GB_binop__pow_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__pow_uint8) // A.*B function (eWiseMult): GB (_AemultB_08__pow_uint8) // A.*B function (eWiseMult): GB (_AemultB_02__pow_uint8) // A.*B function (eWiseMult): GB (_AemultB_04__pow_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__pow_uint8) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__pow_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__pow_uint8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pow_uint8) // C=scalar+B GB (_bind1st__pow_uint8) // C=scalar+B' GB (_bind1st_tran__pow_uint8) // C=A+scalar GB (_bind2nd__pow_uint8) // C=A'+scalar GB (_bind2nd_tran__pow_uint8) // C type: uint8_t // A type: uint8_t // B,b type: uint8_t // BinaryOp: cij = GB_pow_uint8 (aij, bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint8_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint8_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_pow_uint8 (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_POW || GxB_NO_UINT8 || GxB_NO_POW_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__pow_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__pow_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__pow_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__pow_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__pow_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__pow_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__pow_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__pow_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__pow_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = GBX (Bx, p, false) ; Cx [p] = GB_pow_uint8 (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__pow_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = GBX (Ax, p, false) ; Cx [p] = GB_pow_uint8 (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_pow_uint8 (x, aij) ; \ } GrB_Info GB (_bind1st_tran__pow_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_pow_uint8 (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__pow_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
parallel_for_sum.c
#include <stdio.h> int main(void){ int total = 0; int n_iter = 1000000; #pragma omp parallel for for(int i=0; i<n_iter; i++){ total++; } printf("Value: %d", total); return 0; }
3d25pt.c
/* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 24; tile_size[3] = 512; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*( coef0* A[t%2][i ][j ][k ] + coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] + A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] + A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) + coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] + A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] + A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) + coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] + A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] + A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) + coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] + A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] + A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) ); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
hoNDArray_utils.h
#pragma once #include <boost/make_shared.hpp> #include <boost/range/combine.hpp> #include <numeric> #include "hoNDArray.h" #include "hoNDArray_iterators.h" #include "vector_td_utilities.h" #include <boost/version.hpp> #if (BOOST_VERSION < 107200) #include <boost/math/interpolators/cubic_b_spline.hpp> namespace boost::math::interpolators { auto cardinal_cubic_b_spline = [](auto ... args){return boost::math::cubic_b_spline(args...);}; } #else #include <boost/math/interpolators/cardinal_cubic_b_spline.hpp> #endif #include <boost/math/special_functions/trunc.hpp> #include <boost/range/adaptor/strided.hpp> #include <range/v3/numeric.hpp> #include <range/v3/view.hpp> #include <range/v3/action.hpp> #ifdef USE_OMP #include <omp.h> #endif #ifdef max #undef max #endif #ifdef min #undef min #endif namespace Gadgetron { class ArrayIterator { public: ArrayIterator(std::vector<size_t> *dimensions, std::vector<size_t> *order) { block_sizes_.push_back(1); for (size_t i = 0; i < order->size(); i++) { dimensions_.push_back((*dimensions)[i]); order_.push_back((*order)[i]); current_.push_back(0); if (i > 0) { block_sizes_.push_back(block_sizes_[i-1]*dimensions_[i-1]); } } current_idx_ = 0; } inline size_t advance() { size_t order_index = 0; current_[order_[order_index]]++; while (current_[order_[order_index]] >= dimensions_[order_[order_index]]) { current_[order_[order_index]] = 0; order_index = (order_index+1)%dimensions_.size(); current_[order_[order_index]]++; } current_idx_ = 0; for (size_t i = 0; i < dimensions_.size(); i++) { current_idx_ += current_[i]*block_sizes_[i]; } return current_idx_; } inline size_t get_current_idx() const { return current_idx_; } std::vector<size_t> get_current_sub() { return current_; } protected: std::vector<size_t> dimensions_; std::vector<size_t> order_; std::vector<size_t> current_; std::vector<size_t> block_sizes_; size_t current_idx_; }; template<class T> hoNDArray<T> shift_dim( const hoNDArray<T>& in, int shift ) { std::vector<size_t> order; for (size_t i = 0; i < in.get_number_of_dimensions(); i++) { order.push_back(static_cast<size_t>((i+shift)%in.get_number_of_dimensions())); } return permute(in,order); } template<class T> void shift_dim(const hoNDArray<T>& in, hoNDArray<T>& out, int shift ) { std::vector<size_t> order; for (size_t i = 0; i < in.get_number_of_dimensions(); i++) { order.push_back(static_cast<size_t>((i+shift)%in.get_number_of_dimensions())); } permute(in,out,order); } template<class T> hoNDArray<T> permute( const hoNDArray<T>& in, const std::vector<size_t>& dim_order) { std::vector<size_t> dims; for (size_t i = 0; i < dim_order.size(); i++) dims.push_back(in.get_dimensions()->at(dim_order[i])); hoNDArray<T> out(dims); permute( in, out, dim_order); return out; } template<class T> void permute(const hoNDArray<T>& in, hoNDArray<T>& out, const std::vector<size_t>& dim_order) { // Check ordering array if (dim_order.size() > in.get_number_of_dimensions()) { throw std::runtime_error("hoNDArray::permute - Invalid length of dimension ordering array");; } std::vector<size_t> dim_count(in.get_number_of_dimensions(),0); for (size_t i = 0; i < dim_order.size(); i++) { if (dim_order[i] >= in.get_number_of_dimensions()) { throw std::runtime_error("hoNDArray::permute - Invalid dimension order array");; } dim_count[dim_order[i]]++; } // Create an internal array to store the dimensions std::vector<size_t> dim_order_int; // Check that there are no duplicate dimensions for (size_t i = 0; i < dim_order.size(); i++) { if (dim_count[dim_order[i]] != 1) { throw std::runtime_error("hoNDArray::permute - Invalid dimension order array (duplicates)");; } dim_order_int.push_back(dim_order[i]); } for (size_t i = 0; i < dim_order_int.size(); i++) { if ((*in.get_dimensions())[dim_order_int[i]] != out.get_size(i)) { throw std::runtime_error("permute(): dimensions of output array do not match the input array");; } } // Pad dimension order array with dimension not mentioned in order array if (dim_order_int.size() < in.get_number_of_dimensions()) { for (size_t i = 0; i < dim_count.size(); i++) { if (dim_count[i] == 0) { dim_order_int.push_back(i); } } } T* o = out.get_data_ptr(); // if memcpy can be used during permute size_t stride = 1; size_t num_dim_memcpy = 0; for (size_t i = 0; i < dim_order_int.size(); i++) { if (dim_order_int[i]==i){ stride *= in.get_size(i); num_dim_memcpy = i; } else{ break; } } if (stride == 1) { // point by point assignment is needed ArrayIterator it(in.get_dimensions().get(), &dim_order_int); for (size_t i = 0; i < in.get_number_of_elements(); i++) { o[i] = in.get_data_ptr()[it.get_current_idx()]; it.advance(); } } else { // memcpy can be used size_t nDim = in.get_number_of_dimensions(); size_t num_memcpy = in.get_number_of_elements() / stride; if (num_dim_memcpy == nDim - 1){ memcpy(out.begin(), in.begin(), in.get_number_of_bytes()); return; } // for the array index calculation std::vector<size_t> dim_permute(nDim-num_dim_memcpy-1); for (size_t i = num_dim_memcpy+1; i < dim_order_int.size(); i++) { dim_permute[i - num_dim_memcpy - 1] = in.get_size(i); } size_t n; const hoNDArray<T> permuteArray(dim_permute, const_cast<T*>(in.get_data_ptr()), false); // starting index for in and out array for every permute memcpy operation std::vector<size_t> ind_permute_in(dim_permute.size(), 0), ind_in(nDim, 0), ind_out(nDim, 0); for (n = 0; n < num_memcpy; n++) { permuteArray.calculate_index(n, ind_permute_in); memcpy(&ind_in[0] + num_dim_memcpy + 1, &ind_permute_in[0], sizeof(size_t)*ind_permute_in.size()); // permute the indexes for (size_t i = 0; i < nDim; i++) { ind_out[i] = ind_in[dim_order_int[i]]; } size_t offset_in = in.calculate_offset(ind_in); size_t offset_out = out.calculate_offset(ind_out); memcpy(o + offset_out, in.begin() + offset_in, sizeof(T)*stride); } } } // Expand array to new dimension template<class T> hoNDArray<T> expand(const hoNDArray<T>& in, size_t new_dim_size ) { const size_t number_of_elements_in = in.get_number_of_elements(); std::vector<size_t> dims = in.dimensions(); dims.push_back(new_dim_size); auto out = hoNDArray<T>(dims); #ifdef USE_OMP #pragma omp parallel for #endif for( long long int idx=0; idx<number_of_elements_in*new_dim_size; idx++ ){ out[idx] = in[idx%number_of_elements_in]; } return out; } namespace { template<class T, class ACCUMULATOR> hoNDArray<T> accumulate(const hoNDArray<T>& in, size_t dim, ACCUMULATOR acc ) { if( !(in.get_number_of_dimensions()>1) ){ throw std::runtime_error("sum(): underdimensioned.");; } if( dim > in.get_number_of_dimensions()-1 ){ throw std::runtime_error( "sum(): dimension out of range.");; } size_t number_of_batches = in.get_size(dim); size_t number_of_elements = in.get_number_of_elements()/number_of_batches; std::vector<size_t> dims; for (auto i = 0; i < in.get_number_of_dimensions(); i++){ if (i != dim) dims.push_back(in.get_size(i)); } auto out = hoNDArray<T>(dims); auto orig_dims = *in.get_dimensions(); auto stride = std::accumulate(orig_dims.begin(),orig_dims.begin()+dim,1,std::multiplies<size_t>()); size_t inner_elements = stride; size_t outer_elements = out.get_number_of_elements()/inner_elements; //#ifdef USE_OMP //#pragma omp parallel for schedule(dynamic,1) collapse(2) //#endif for (size_t outer_idx = 0; outer_idx < outer_elements; outer_idx++) { for (size_t idx = 0; idx < inner_elements; idx++) { size_t offset = outer_idx*inner_elements; size_t old_offset = offset*number_of_batches; T val = in.at(idx+old_offset); for (size_t j = 1; j < number_of_batches; j++) { size_t in_idx = j * stride + idx+ old_offset; val = acc(val,in.at(in_idx)); } out.at(idx + offset) = val; } } return out; } } // Sum over dimension template<class T> hoNDArray<T> sum(const hoNDArray<T>& in, size_t dim ) { return accumulate(in, dim, std::plus<T>()); } template<class T> boost::shared_ptr<hoNDArray<T>> sum(const hoNDArray<T>* in, size_t dim ) { return boost::make_shared<hoNDArray<T>>(accumulate(*in, dim, std::plus<T>())); } template<class T> hoNDArray<T> max(const hoNDArray<T>& in, size_t dim ) { return accumulate(in, dim, [](auto v1, auto v2){ return std::max(v1,v2);}); } template<class T> hoNDArray<T> min(const hoNDArray<T>& in, size_t dim ) { return accumulate(in, dim, [](auto v1, auto v2){ return std::min(v1,v2);}); } /** * @param[in] crop_offset starting position to crop * @param[in] crop_size Size of cropped array * @param[in] in input array * @param[out] out Output array after cropping */ template<class T, unsigned int D> void crop(const vector_td<size_t, D>& crop_offset, const vector_td<size_t, D>& crop_size, const hoNDArray<T>& in, hoNDArray<T>& out) { if (in.get_number_of_dimensions() < D){ std::stringstream ss; ss << "crop: number of image dimensions should be at least " << D; throw std::runtime_error(ss.str());; } std::vector<size_t> dims = to_std_vector(crop_size); for (unsigned int d = D; d<in.get_number_of_dimensions(); d++){ dims.push_back(in.get_size(d)); } if (!out.dimensions_equal(&dims)){ out.create(dims); } typename uint64d<D>::Type matrix_size_in = from_std_vector<size_t, D>(*in.get_dimensions()); typename uint64d<D>::Type matrix_size_out = from_std_vector<size_t, D>(*out.get_dimensions()); if (weak_greater(crop_offset + matrix_size_out, matrix_size_in)){ throw std::runtime_error("crop: cropping size mismatch");; } size_t len = out.get_size(0); size_t num = out.get_number_of_elements() / len; long long k; const T *in_ptr = in.get_data_ptr(); T *out_ptr = out.get_data_ptr(); #pragma omp parallel default(none) private(k) shared(in_ptr, out_ptr, num, len, in, out, crop_offset) { std::vector<size_t> ind; #pragma omp for for (k = 0; k < (long long)num; k++){ ind = out.calculate_index(k*len); for (unsigned int d = 0; d < D; d++){ ind[d] += crop_offset[d]; } const T* in_ptr_curr = in_ptr + in.calculate_offset(ind); memcpy(out_ptr + k*len, in_ptr_curr, sizeof(T)*len); } } } /** * @param[in] crop_size * @param[in] in input array * Crop the input array around its center N/2; that is, the center pixel of in array is the center pixel of out array */ template<class T, unsigned int D> hoNDArray<T> crop(const vector_td<size_t, D>& crop_size, const hoNDArray<T>& in) { // compute crop offset, perserving the center hoNDArray<T> out; auto crop_offset = (from_std_vector<size_t,D>(*in.get_dimensions())-crop_size)/size_t(2); crop(crop_offset, crop_size, in, out); return out; } template<class T> void crop(size_t x, const hoNDArray<T>& in, hoNDArray<T>& out) { vector_td<size_t, 1> crop_size(x); auto crop_offset = (from_std_vector<size_t,1>(*in.get_dimensions())-crop_size)/size_t(2); crop(crop_offset, crop_size, in, out); } template<class T> void crop(size_t x, size_t y, const hoNDArray<T>& in, hoNDArray<T>& out) { vector_td<size_t, 2> crop_size(x, y); auto crop_offset = (from_std_vector<size_t,2>(*in.get_dimensions())-crop_size)/size_t(2); crop(crop_offset,crop_size, in, out); } template<class T> void crop(size_t x, size_t y, size_t z, const hoNDArray<T>& in, hoNDArray<T>& out) { vector_td<size_t, 3> crop_size(x, y, z); auto crop_offset = (from_std_vector<size_t,3>(*in.get_dimensions())-crop_size)/size_t(2); crop(crop_offset, crop_size, in, out); } template<class T, unsigned int D> hoNDArray<T> crop( const vector_td<size_t, D>& crop_offset, const vector_td<size_t, D>& crop_size, const hoNDArray<T>& in ) { auto out = hoNDArray<T>(); crop(crop_offset, crop_size, in, out); return out; } /** * @param[in] offset_src starting position in src array * @param[in] size Size of subarray to be replaced * @param[in] src Src array to read in replaced content * @param[in] offset_dst starting position in dst array * @param[out] dst array to be replaced; other part outside the offset+size region will be unchanged */ template<class T, unsigned int D> void fill(const vector_td<size_t, D>& offset_src, const vector_td<size_t, D>& size, hoNDArray<T> *src, const vector_td<size_t, D>& offset_dst, hoNDArray<T> *dst) { if (src == 0x0) { throw std::runtime_error("replace: 0x0 src array provided");; } if (src->get_number_of_dimensions() < D) { std::stringstream ss; ss << "fill: number of src image dimensions should be at least " << D; throw std::runtime_error(ss.str());; } if (dst == 0x0) { throw std::runtime_error("replace: 0x0 dst array provided");; } if (dst->get_number_of_dimensions() < D) { std::stringstream ss; ss << "fill: number of dst image dimensions should be at least " << D; throw std::runtime_error(ss.str());; } if (src->get_number_of_dimensions() != dst->get_number_of_dimensions()) { std::stringstream ss; ss << "fill: src and dst array have different number of dimensions " << D; throw std::runtime_error(ss.str());; } std::vector<size_t> src_dim; src->get_dimensions(src_dim); std::vector<size_t> dst_dim; dst->get_dimensions(dst_dim); size_t d; for (d = 0; d < D; d++) { if (src_dim[d] < offset_src[d]+size[d]-1) { throw std::runtime_error("fill: src array is too small for provided offset and size");; } if (dst_dim[d] < offset_dst[d] + size[d] - 1) { throw std::runtime_error("fill: dst array is too small for provided offset and size");; } } size_t len = size[0]; size_t num = 1; for (d = 1; d < D; d++) num *= size[d]; long long k; T *src_ptr = src->get_data_ptr(); T *dst_ptr = dst->get_data_ptr(); std::vector<size_t> size_dim = to_std_vector(size); hoNDArray<T> array_size; array_size.create(size_dim, src->begin()); { std::vector<size_t> ind_src = src->calculate_index(0); std::vector<size_t> ind_dst = dst->calculate_index(0); std::vector<size_t> ind_size(D, 0); for (k = 0; k < (long long)num; k++) { ind_size = array_size.calculate_index(k*len); for (unsigned int d = 0; d < D; d++) { ind_src[d] = offset_src[d] + ind_size[d]; ind_dst[d] = offset_dst[d] + ind_size[d]; } T* src_ptr_curr = src_ptr + src->calculate_offset(ind_src); T* dst_ptr_curr = dst_ptr + dst->calculate_offset(ind_dst); memcpy(dst_ptr_curr, src_ptr_curr, sizeof(T)*len); } } } template<class T, unsigned int D> void fill(const vector_td<size_t, D>& offset_src, hoNDArray<T>& src, const vector_td<size_t, D>& offset_dst, hoNDArray<T>& dst) { std::vector<size_t> dim; src.get_dimensions(dim); vector_td<size_t, D> size; if (dim.size() < D) { std::stringstream ss; ss << "fill: number of src image dimensions should be at least " << D; throw std::runtime_error(ss.str());; } size_t d; for (d = 0; d < D; d++) size[d] = dim[d]; Gadgetron::fill(offset_src, size, &src, offset_dst, &dst); } template<class T, unsigned int D> void fill(hoNDArray<T>& src, const vector_td<size_t, D>& offset_dst, hoNDArray<T>& dst) { std::vector<size_t> dim; src.get_dimensions(dim); vector_td<size_t, D> offset_src, size; if (dim.size() < D) { std::stringstream ss; ss << "fill: number of src image dimensions should be at least " << D; throw std::runtime_error(ss.str());; } size_t d; for (d = 0; d < D; d++) { offset_src[d] = 0; size[d] = dim[d]; } Gadgetron::fill(offset_src, size, &src, offset_dst, &dst); } /** * @param[in] size Size of the output array * @param[in] in Input array * @param[out] out Output array after padding * @param[in] preset_out_with_val if true, out array will be filled with val before padding * @param[in] val Value to use for padding * The padding operations keep the center of array unchanged, e.g. the center is always N/2 */ template<class T, unsigned int D> void pad(const typename uint64d<D>::Type& size, const hoNDArray<T>& in, hoNDArray<T>& out, bool preset_out_with_val = true, T val = T(0)) { if (in.get_number_of_dimensions() < D){ std::stringstream ss; ss << "pad: number of image dimensions should be at least " << D; throw std::runtime_error(ss.str());; } unsigned int d; std::vector<size_t> dims = to_std_vector(size); for (d = D; d<in.get_number_of_dimensions(); d++){ dims.push_back(in.get_size(d)); } if (!out.dimensions_equal(&dims)){ out.create(dims); } if (in.dimensions_equal(&dims)){ memcpy(out.begin(), in.begin(), in.get_number_of_bytes()); return; } const T *in_ptr = in.get_data_ptr(); T *out_ptr = out.get_data_ptr(); if (preset_out_with_val){ if (val == T(0)){ memset(out_ptr, 0, out.get_number_of_bytes()); } else{ size_t N = out.get_number_of_elements(); long long n; #pragma omp parallel for default(none) private(n) shared(N, out_ptr, val) for (n = 0; n<(long long)N; n++) { out_ptr[n] = val; } } } typename uint64d<D>::Type matrix_size_in = from_std_vector<size_t, D>(*in.get_dimensions()); typename uint64d<D>::Type matrix_size_out = from_std_vector<size_t, D>(*out.get_dimensions()); if (weak_greater(matrix_size_in, matrix_size_out)){ throw std::runtime_error("pad: size mismatch, cannot expand"); } typename uint64d<D>::Type offset(D); for (d = 0; d<D; d++){ offset[d] = matrix_size_out[d]/2 - matrix_size_in[d]/2; } size_t len = in.get_size(0); size_t num = in.get_number_of_elements() / len; long long k; #pragma omp parallel default(none) private(k, d) shared(in_ptr, out_ptr, num, len, in, out, offset) { std::vector<size_t> ind; #pragma omp for for (k = 0; k < (long long)num; k++){ ind = in.calculate_index(k*len); for (d = 0; d < D; d++){ ind[d] += offset[d]; } T* out_ptr_curr = out_ptr + out.calculate_offset(ind); memcpy(out_ptr_curr, in_ptr + k*len, sizeof(T)*len); } } } template<class T, unsigned int D> void pad(const hoNDArray<T>& in, hoNDArray<T>& out, T val = T(0)){ vector_td<size_t,D> dims = from_std_vector<size_t,D>(*out.get_dimensions()); pad<T,D>(dims,in,out,true, val); } template<class T> void pad(size_t x, const hoNDArray<T>& in, hoNDArray<T>& out, bool preset_out_with_val = true, T val = T(0)) { typename uint64d<1>::Type padSize(x); pad<T, 1>(padSize, in, out, preset_out_with_val, val); } template<class T> void pad(size_t x, size_t y, const hoNDArray<T>& in, hoNDArray<T>& out, bool preset_out_with_val = true, T val = T(0)) { typename uint64d<2>::Type padSize(x, y); pad<T, 2>(padSize, in, out, preset_out_with_val, val); } template<class T> void pad(size_t x, size_t y, size_t z, const hoNDArray<T> &in, hoNDArray<T>& out, bool preset_out_with_val = true, T val = T(0)) { typename uint64d<3>::Type padSize(x, y, z); pad<T, 3>(padSize, in, out, preset_out_with_val, val); } /** * @param[in] size Size of the output array * @param[in] in Input array * @param[in] val Value to use for padding * @returns New array of the specified size, containing the original input array in the center and val outside. */ template<class T, unsigned int D> hoNDArray<T> pad(const typename uint64d<D>::Type& size, const hoNDArray<T> & in, T val = T(0)) { auto out = hoNDArray<T>(); pad<T,D>(size, in, out, true, val); return out; } /// copy the sub array x(:, indLastDim) to all other places of the last dimensions template<typename T> bool repmatLastDimension(hoNDArray<T>& x, size_t indLastDim) { try { size_t NDim = x.get_number_of_dimensions(); size_t lastDim = x.get_size(NDim-1); GADGET_CHECK_RETURN_FALSE( indLastDim < lastDim ); std::vector<size_t> ind(NDim, 0); ind[NDim-1] = indLastDim; size_t offsetIndLastDim = x.calculate_offset(ind); size_t N = x.get_number_of_elements() / lastDim; long long l; #pragma omp parallel default(none) private(l) shared(lastDim, offsetIndLastDim, x, ind, indLastDim, N, NDim) { std::vector<size_t> indLocal(ind); #pragma omp for for ( l=0; l<(long long)lastDim; l++ ) { if ( l==indLastDim ) continue; indLocal[NDim-1] = l; size_t offsetInd = x.calculate_offset(indLocal); memcpy(x.begin()+offsetInd, x.begin()+offsetIndLastDim, sizeof(T)*N); } } } catch (...) { GERROR_STREAM("Errors in repmatLastDimension(hoNDArray<T>& x, size_t indLastDim) ... "); return false; } return true; } // Utility to check if all neighbors required for the linear interpolation exists // ... do not include dimensions of size 1 template<class REAL, unsigned int D> inline bool is_border_pixel( vector_td<size_t,D> co, vector_td<size_t,D> dims ) { for( size_t dim=0; dim<D; dim++ ){ if( dims[dim] > 1 && ( co[dim] == 0 || co[dim] == (dims[dim]-1) ) ) return true; } return false; } // Downsample template<class REAL, unsigned int D> hoNDArray<REAL> downsample(const hoNDArray<REAL>& _in ) { // A few sanity checks if( _in.get_number_of_dimensions() < D ){ throw std::runtime_error( "downsample(): the number of array dimensions should be at least D"); } for( size_t d=0; d<D; d++ ){ if( (_in.get_size(d)%2) == 1 && _in.get_size(d) != 1 ){ throw std::runtime_error( "downsample(): uneven array dimensions larger than one not accepted"); } } typename uint64d<D>::Type matrix_size_in = from_std_vector<size_t,D>( *_in.get_dimensions() ); typename uint64d<D>::Type matrix_size_out = matrix_size_in >> 1; for( size_t d=0; d<D; d++ ){ if( matrix_size_out[d] == 0 ) matrix_size_out[d] = 1; } size_t num_elements = prod(matrix_size_out); size_t num_batches = 1; for( size_t d=D; d<_in.get_number_of_dimensions(); d++ ){ num_batches *= _in.get_size(d); } std::vector<size_t> dims = to_std_vector(matrix_size_out); for( size_t d=D; d<_in.get_number_of_dimensions(); d++ ){ dims.push_back(_in.get_size(d)); } const REAL *in = _in.get_data_ptr(); hoNDArray<REAL> _out( dims ); REAL *out = _out.get_data_ptr(); typedef vector_td<size_t,D> uint64d; #ifdef USE_OMP #pragma omp parallel for #endif for( int64_t idx=0; idx < num_elements*num_batches; idx++ ){ const size_t frame_offset = idx/num_elements; const uint64d co_out = idx_to_co<uint64_t,D>( idx-frame_offset*num_elements, matrix_size_out ); const uint64d co_in = co_out << 1; const uint64d twos(2); const size_t num_adds = 1 << D; size_t actual_adds = 0; REAL res = REAL(0); for( size_t i=0; i<num_adds; i++ ){ const uint64d local_co = idx_to_co( i, twos ); if( weak_greater_equal( local_co, matrix_size_out ) ) continue; // To allow array dimensions of size 1 const size_t in_idx = co_to_idx(co_in+local_co, matrix_size_in)+frame_offset*prod(matrix_size_in); actual_adds++; res += in[in_idx]; } out[idx] = res/REAL(actual_adds); } return _out; } namespace { template<class T> hoNDArray<T> upsample_along_dimension(const hoNDArray<T>& array,int dim){ auto new_dims = *array.get_dimensions(); auto old_dim = new_dims[dim]; new_dims[dim] *= 2; hoNDArray<T> result(new_dims); size_t stride = std::accumulate(new_dims.begin(),new_dims.begin()+dim,size_t(1),std::multiplies<size_t>()); size_t nbatches = result.get_number_of_elements()/stride/new_dims[dim]; size_t batch_size = stride*new_dims[dim]; size_t old_batch_size = batch_size/2; #pragma omp parallel for for (int batch = 0; batch < nbatches; batch++){ T* result_ptr = result.get_data_ptr()+batch_size*batch; const T* input_ptr = array.get_data_ptr()+batch*old_batch_size; for (size_t i = 0; i < old_dim-1; i++){ for (size_t k = 0; k < stride; k++){ result_ptr[2*i*stride+k] = input_ptr[i*stride+k]; result_ptr[(2*i+1)*stride+k] = (input_ptr[i*stride+k]+input_ptr[i*stride+k])/2; } } size_t i = old_dim-1; for (size_t k = 0; k < stride; k++){ result_ptr[2*i*stride+k] = input_ptr[i*stride+k]; result_ptr[(2*i+1)*stride+k] = input_ptr[i*stride+k]; } } return result; } template<class T> hoNDArray<T> upsample_spline_along_dimension(const hoNDArray<T>& array,int dim,int scale){ namespace ba = boost::adaptors; namespace bm = boost::math; auto new_dims = *array.get_dimensions(); auto old_dim = new_dims[dim]; new_dims[dim] *= 2; hoNDArray<T> result(new_dims); size_t stride = std::accumulate(new_dims.begin(),new_dims.begin()+dim,size_t(1),std::multiplies<size_t>()); size_t nbatches = result.get_number_of_elements()/stride/new_dims[dim]; size_t batch_size = stride*new_dims[dim]; size_t old_batch_size = batch_size/2; #pragma omp parallel for for (int batch = 0; batch < (int)nbatches; batch++){ T* result_ptr = result.get_data_ptr()+batch_size*batch; const T* input_ptr = array.get_data_ptr()+batch*old_batch_size; for (size_t k = 0; k < stride; k++){ auto strided_iterator = std::make_pair(input_ptr+k,input_ptr+k+old_batch_size) | ba::strided(stride); auto spline = bm::interpolators::cardinal_cubic_b_spline( boost::begin(strided_iterator), boost::end(strided_iterator), T(0.25)*scale, T(scale), T(0), T(0) ); for (int i = 0; i < new_dims[dim]; i++){ result_ptr[k+i*stride] = spline(i); } } } return result; } } // Linear interpolation upsampling template<class T, unsigned int D> hoNDArray<T> upsample( const hoNDArray<T>& in ) { if( in.get_number_of_dimensions() < D ){ throw std::runtime_error( "upsample(): the number of array dimensions should be at least D"); } hoNDArray<T> result = in; for (int i = D-1; i >= 0; i--){ result = upsample_along_dimension<T>(result,i); } return result; } template<class T, unsigned int D> hoNDArray<T> upsample_spline( const hoNDArray<T>& in, int scale = 2 ) { if( in.get_number_of_dimensions() < D ){ throw std::runtime_error( "upsample(): the number of array dimensions should be at least D"); } hoNDArray<T> result = in; for (int i = D-1; i >= 0; i--){ result = upsample_spline_along_dimension<T>(result,i,scale); } return result; } // Linear interpolation upsampling template<class T, unsigned int D> hoNDArray<T> upsample_nearest( const hoNDArray<T>& in ) { // A few sanity checks if( in.get_number_of_dimensions() < D ){ throw std::runtime_error( "upsample(): the number of array dimensions should be at least D"); } typename uint64d<D>::Type matrix_size_in = from_std_vector<size_t,D>( *in.get_dimensions() ); typename uint64d<D>::Type matrix_size_out = matrix_size_in << 1; for( size_t d=0; d<D; d++ ){ if( matrix_size_in[d] == 1 ) matrix_size_out[d] = 1; } size_t num_elements = prod(matrix_size_out); size_t num_batches = 1; for( size_t d=D; d<in.get_number_of_dimensions(); d++ ){ num_batches *= in.get_size(d); } std::vector<size_t> dims = to_std_vector(matrix_size_out); for( size_t d=D; d<in.get_number_of_dimensions(); d++ ){ dims.push_back(in.get_size(d)); } const T *in_ptr = in.get_data_ptr(); hoNDArray<T> out(&dims); T *out_ptr = out.get_data_ptr(); typedef vector_td<size_t,D> uint64d; #ifdef USE_OMP #pragma omp parallel for #endif for( long long idx=0; idx < num_elements*num_batches; idx++ ){ const size_t frame_idx = idx/num_elements; const uint64d co_out = idx_to_co<D>( idx-frame_idx*num_elements, matrix_size_out ); uint64d co_in = co_out/uint64_t(2); const size_t in_idx = co_to_idx<D>(co_in, matrix_size_in)+frame_idx*prod(matrix_size_in); out_ptr[idx] = in_ptr[in_idx]; } return out; } template<class T> hoNDArray<T> repeat(const hoNDArray<T>& array,unsigned int repeats){ auto dims = array.dimensions(); dims.push_back(repeats); hoNDArray<T> output(dims); for (auto span : spans(output, array.get_number_of_dimensions())) { span = array; } return output; } /** * This functions takes a collection of hoNDArrays and concatenates them along the specified dimension * @tparam COLL Collection of hoNDArray such as std::vector<hoNDArray<float>>. * @param arrays The hoNDArrays. Must be of equal size, except along the concat dimension * @param dimension Dimension along which to concatenate * @return The concatenated arrays. */ template <class COLL> auto concat_along_dimension(const COLL& arrays, size_t dimension) { using namespace ranges; using T = std::decay_t<decltype(*std::begin(*std::begin(arrays)))>; if (arrays.empty()) return hoNDArray<T>(); const hoNDArray<T>& first = *std::begin(arrays); std::vector dims = first.dimensions(); size_t count = ranges::accumulate(arrays | view::transform([dimension](const auto& array) { return array.dimensions().at(dimension); }), size_t(0)); dims[dimension] = count; auto dimensions_valid = [&dims,dimension](const auto& array) { bool result = true; const auto& d = array.dimensions(); for (size_t i = 0; i < d.size(); i++) { if (i == dimension) continue; result &= d[i] == dims[i]; } return result && (d.size() == dims.size()); }; bool all_dimensions_valid = ranges::accumulate(arrays | view::transform(dimensions_valid), true, std::logical_and() ); if (!all_dimensions_valid) throw std::runtime_error("The dimensions of all provided arrays must be equal except along the concatenate dimension"); auto result = hoNDArray<T>(dims); const size_t inner_stride = ranges::accumulate(dims | views::slice(size_t(0), dimension), size_t(1), std::multiplies()); const size_t outer_stride = inner_stride * count; size_t current_slice = 0; for (const auto& array : arrays) { size_t slice_count = array.dimensions()[dimension]; auto array_inner_stride = slice_count * inner_stride; auto repetitions = array.size() / array_inner_stride; for (int i = 0; i < repetitions; i++) { std::copy_n(array.begin() + i * array_inner_stride, array_inner_stride, result.begin() + current_slice * inner_stride + outer_stride * i); } current_slice += slice_count; } return result; } template<class COLL> auto concat(const COLL &arrays) { using T = std::decay_t<decltype(*std::begin(*std::begin(arrays)))>; if (arrays.empty()) return hoNDArray<T>(); const hoNDArray<T> &first = *std::begin(arrays); auto dims = first.dimensions(); auto size = first.size(); using std::begin; using std::end; if (!std::all_of(begin(arrays), end(arrays), [&](const auto &array) { return dims == array.dimensions(); }) || !std::all_of(begin(arrays), end(arrays), [&](const auto &array) { return size == array.size(); })) { throw std::runtime_error("Array size or dimensions do not match."); } dims.push_back(arrays.size()); hoNDArray<T> output(dims); auto output_iterator = spans(output, first.get_number_of_dimensions()).begin(); for (const auto& array : arrays) { *output_iterator = array; ++output_iterator; } return output; } template<class T, class... ARRAYS> hoNDArray<T> concat(const hoNDArray<T>& first_array, const ARRAYS& ... arrays){ static_assert((std::is_same_v<hoNDArray<T>,std::decay_t<ARRAYS>> && ...)); using namespace ranges; return concat(view::concat(view::single(first_array),view::single(arrays)...)); } }
PR44893.c
// RUN: %clang -fopenmp -O -g -x c %s -c -disable-output -o %t // Do not crash ;) void foo() { #pragma omp critical ; } void bar() { foo(); foo(); }
ast-dump-openmp-target-parallel-for-simd.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s void test_one(int x) { #pragma omp target parallel for simd for (int i = 0; i < x; i++) ; } void test_two(int x, int y) { #pragma omp target parallel for simd for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_three(int x, int y) { #pragma omp target parallel for simd collapse(1) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_four(int x, int y) { #pragma omp target parallel for simd collapse(2) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_five(int x, int y, int z) { #pragma omp target parallel for simd collapse(2) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) for (int i = 0; i < z; i++) ; } // CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-target-parallel-for-simd.c:3:1, line:7:1> line:3:6 test_one 'void (int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:7:1> // CHECK-NEXT: | `-OMPTargetParallelForSimdDirective {{.*}} <line:4:1, col:37> // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:5:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <col:3, line:6:5> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:5:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:6:5> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:4:1) *const restrict' // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:4:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <col:3, line:6:5> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:5:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:6:5> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:4:1) *const restrict' // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:4:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:3, line:6:5> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:5:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:6:5> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:4:1) *const restrict' // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:4:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <col:3, line:6:5> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:5:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:6:5> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:4:1) *const restrict' // CHECK-NEXT: | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:9:1, line:14:1> line:9:6 test_two 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:14:1> // CHECK-NEXT: | `-OMPTargetParallelForSimdDirective {{.*}} <line:10:1, col:37> // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:11:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:12:10, col:19> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:13:7> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:10:1) *const restrict' // CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:10:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:11:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:12:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:13:7> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:10:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:10:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:11:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:12:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:13:7> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:10:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:10:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:11:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:12:5, line:13:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:12:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:13:7> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:10:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:16:1, line:21:1> line:16:6 test_three 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:21:1> // CHECK-NEXT: | `-OMPTargetParallelForSimdDirective {{.*}} <line:17:1, col:49> // CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:38, col:48> // CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:47> 'int' // CHECK-NEXT: | | |-value: Int 1 // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:47> 'int' 1 // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:18:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:19:10, col:19> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:20:7> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:17:1) *const restrict' // CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:17:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:18:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:19:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:20:7> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:17:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:17:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:18:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:19:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:20:7> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:17:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:17:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:18:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:19:5, line:20:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:19:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:20:7> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:17:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:23:1, line:28:1> line:23:6 test_four 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:28:1> // CHECK-NEXT: | `-OMPTargetParallelForSimdDirective {{.*}} <line:24:1, col:49> // CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:38, col:48> // CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:47> 'int' // CHECK-NEXT: | | |-value: Int 2 // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:47> 'int' 2 // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:25:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:26:10, col:19> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:27:7> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:24:1) *const restrict' // CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:24:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:25:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:26:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:27:7> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:24:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:24:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:25:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:26:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:27:7> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:24:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:24:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:25:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:26:5, line:27:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:26:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:27:7> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:24:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: `-FunctionDecl {{.*}} <line:30:1, line:36:1> line:30:6 test_five 'void (int, int, int)' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int' // CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:36:1> // CHECK-NEXT: `-OMPTargetParallelForSimdDirective {{.*}} <line:31:1, col:49> // CHECK-NEXT: |-OMPCollapseClause {{.*}} <col:38, col:48> // CHECK-NEXT: | `-ConstantExpr {{.*}} <col:47> 'int' // CHECK-NEXT: | |-value: Int 2 // CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:47> 'int' 2 // CHECK-NEXT: |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: `-CapturedStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | |-CapturedStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-CapturedStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:32:8, col:17> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:33:5, line:35:9> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:33:10, col:19> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:34:12, col:21> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:35:9> // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:31:1) *const restrict' // CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:31:1) *const restrict' // CHECK-NEXT: | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-ForStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:32:8, col:17> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:33:5, line:35:9> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:33:10, col:19> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:34:12, col:21> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:35:9> // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:31:1) *const restrict' // CHECK-NEXT: | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:31:1) *const restrict' // CHECK-NEXT: | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int' // CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | |-CapturedStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-ForStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:32:8, col:17> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:33:5, line:35:9> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:33:10, col:19> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:34:12, col:21> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:35:9> // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:31:1) *const restrict' // CHECK-NEXT: | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:31:1) *const restrict' // CHECK-NEXT: | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int' // CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | |-ForStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:32:8, col:17> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-ForStmt {{.*}} <line:33:5, line:35:9> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:33:10, col:19> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-ForStmt {{.*}} <line:34:7, line:35:9> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:34:12, col:21> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-NullStmt {{.*}} <line:35:9> // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:31:1) *const restrict' // CHECK-NEXT: | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
random_read_openmp.c
// an example programme that uses slow5lib to randmly access records in a SLOW5 file using multiple threads (openMP) #include <omp.h> #include <stdio.h> #include <stdlib.h> #include <slow5/slow5.h> #define FILE_PATH "examples/example.slow5" #define READ_LIST_SIZE 4 char * read_id_list[READ_LIST_SIZE] = {"r4", "r1", "r3", "r4"}; int main(){ slow5_file_t *sp = slow5_open(FILE_PATH,"r"); if(sp==NULL){ fprintf(stderr,"Error in opening file\n"); exit(EXIT_FAILURE); } int ret=0; ret = slow5_idx_load(sp); if (ret < 0) { fprintf(stderr, "Error in loading index\n"); exit(EXIT_FAILURE); } #pragma omp parallel for for(int32_t i=0; i<READ_LIST_SIZE; i++) { slow5_rec_t *rec = NULL; ret = slow5_get(read_id_list[i], &rec, sp); if (ret < 0) { fprintf(stderr, "Error in when fetching the read %s\n",read_id_list[i]); } else { fprintf(stderr, "Successfully fetched the read %s with %ld raw signal samples\n", rec->read_id, rec->len_raw_signal); } slow5_rec_free(rec); } slow5_idx_unload(sp); slow5_close(sp); }
mpy_math.h
#ifndef _MPY_MATH_HELPER_ #define _MPY_MATH_HELPER_ #include "halffloat.h" #ifndef INFINITY static const union { npy_uint32 __i; float __f;} __binff = {0x7f800000UL}; #define INFINITY (__binff.__f) #endif #ifndef NAN static const union { npy_uint32 __i; float __f;} __bnanf = {0x7fc00000UL}; #define NAN (__bnanf.__f) #endif #ifndef PZERO static const union { npy_uint32 __i; float __f;} __bpzerof = {0x00000000UL}; #define PZERO (__bpzerof.__f) #endif #ifndef NZERO static const union { npy_uint32 __i; float __f;} __bnzerof = {0x80000000UL}; #define NZERO (__bnzerof.__f) #endif #define MPY_INFINITYF INFINITY #define MPY_NANF NAN #define MPY_PZEROF PZERO #define MPY_NZEROF NZERO #define MPY_INFINITY ((npy_double)MPY_INFINITYF) #define MPY_NAN ((npy_double)MPY_NANF) #define MPY_PZERO ((npy_double)MPY_PZEROF) #define MPY_NZERO ((npy_double)MPY_NZEROF) #define MPY_INFINITYL ((npy_longdouble)MPY_INFINITYF) #define MPY_NANL ((npy_longdouble)MPY_NANF) #define MPY_PZEROL ((npy_longdouble)MPY_PZEROF) #define MPY_NZEROL ((npy_longdouble)MPY_NZEROF) #pragma omp declare target /* * C99 double math funcs */ inline double mpy_sin(double x){ return sin(x); } inline double mpy_cos(double x){ return cos(x); } inline double mpy_tan(double x){ return tan(x); } inline double mpy_sinh(double x){ return sinh(x); } inline double mpy_cosh(double x){ return cosh(x); } inline double mpy_tanh(double x){ return tanh(x); } inline double mpy_asin(double x){ return asin(x); } inline double mpy_acos(double x){ return acos(x); } inline double mpy_atan(double x){ return atan(x); } inline double mpy_log(double x){ return log(x); } inline double mpy_log10(double x){ return log10(x); } inline double mpy_exp(double x){ return exp(x); } inline double mpy_sqrt(double x){ return sqrt(x); } inline double mpy_cbrt(double x){ return cbrt(x); } inline double mpy_fabs(double x){ return fabs(x); } inline double mpy_ceil(double x){ return ceil(x); } inline double mpy_fmod(double x, double y){ return fmod(x, y); } inline double mpy_floor(double x){ return floor(x); } inline double mpy_expm1(double x){ return expm1(x); } inline double mpy_log1p(double x){ return log1p(x); } inline double mpy_logb(double x){ return logb(x); } inline double mpy_hypot(double x, double y){ return hypot(x, y); } inline double mpy_acosh(double x){ return acosh(x); } inline double mpy_asinh(double x){ return asinh(x); } inline double mpy_atanh(double x){ return atanh(x); } inline double mpy_rint(double x){ return rint(x); } inline double mpy_trunc(double x){ return trunc(x); } inline double mpy_exp2(double x){ return exp2(x); } inline double mpy_log2(double x){ return log2(x); } inline double mpy_atan2(double x, double y){ return atan2(x, y); } inline double mpy_pow(double x, double y){ return pow(x, y); } inline double mpy_modf(double x, double* y){ return modf(x, y); } inline double mpy_frexp(double x, int* y){ return frexp(x, y); } inline double mpy_ldexp(double n, int y){ return ldexp(n, y); } inline double mpy_copysign(double x, double y){ return copysign(x, y); } inline double mpy_nextafter(double x, double y){ return nextafter(x, y); } /* * float C99 math functions */ inline float mpy_sinf(float x){ return sinf(x); } inline float mpy_cosf(float x){ return cosf(x); } inline float mpy_tanf(float x){ return tanf(x); } inline float mpy_sinhf(float x){ return sinhf(x); } inline float mpy_coshf(float x){ return coshf(x); } inline float mpy_tanhf(float x){ return tanhf(x); } inline float mpy_fabsf(float x){ return fabsf(x); } inline float mpy_floorf(float x){ return floorf(x); } inline float mpy_ceilf(float x){ return ceilf(x); } inline float mpy_rintf(float x){ return rintf(x); } inline float mpy_truncf(float x){ return truncf(x); } inline float mpy_sqrtf(float x){ return sqrtf(x); } inline float mpy_cbrtf(float x){ return cbrtf(x); } inline float mpy_log10f(float x){ return log10f(x); } inline float mpy_logf(float x){ return logf(x); } inline float mpy_expf(float x){ return expf(x); } inline float mpy_expm1f(float x){ return expm1f(x); } inline float mpy_asinf(float x){ return asinf(x); } inline float mpy_acosf(float x){ return acosf(x); } inline float mpy_atanf(float x){ return atanf(x); } inline float mpy_asinhf(float x){ return asinhf(x); } inline float mpy_acoshf(float x){ return acoshf(x); } inline float mpy_atanhf(float x){ return atanhf(x); } inline float mpy_log1pf(float x){ return log1pf(x); } inline float mpy_logbf(float x){ return logbf(x); } inline float mpy_exp2f(float x){ return exp2f(x); } inline float mpy_log2f(float x){ return log2f(x); } inline float mpy_atan2f(float x, float y){ return atan2f(x, y); } inline float mpy_hypotf(float x, float y){ return hypotf(x, y); } inline float mpy_powf(float x, float y){ return powf(x, y); } inline float mpy_fmodf(float x, float y){ return fmodf(x, y); } inline float mpy_modff(float x, float* y){ return modff(x, y); } inline float mpy_frexpf(float x, int* y){ return frexpf(x, y); } inline float mpy_ldexpf(float x, int y){ return ldexpf(x, y); } inline float mpy_copysignf(float x, float y){ return copysignf(x, y); } inline float mpy_nextafterf(float x, float y){ return nextafterf(x, y); } /* * long double C99 math functions */ /* * Complex declarations */ inline npy_longdouble mpy_sinl(npy_longdouble x){ return sinl(x); } inline npy_longdouble mpy_cosl(npy_longdouble x){ return cosl(x); } inline npy_longdouble mpy_tanl(npy_longdouble x){ return tanl(x); } inline npy_longdouble mpy_sinhl(npy_longdouble x){ return sinhl(x); } inline npy_longdouble mpy_coshl(npy_longdouble x){ return coshl(x); } inline npy_longdouble mpy_tanhl(npy_longdouble x){ return tanhl(x); } inline npy_longdouble mpy_fabsl(npy_longdouble x){ return fabsl(x); } inline npy_longdouble mpy_floorl(npy_longdouble x){ return floorl(x); } inline npy_longdouble mpy_ceill(npy_longdouble x){ return ceill(x); } inline npy_longdouble mpy_rintl(npy_longdouble x){ return rintl(x); } inline npy_longdouble mpy_truncl(npy_longdouble x){ return truncl(x); } inline npy_longdouble mpy_sqrtl(npy_longdouble x){ return sqrtl(x); } inline npy_longdouble mpy_cbrtl(npy_longdouble x){ return cbrtl(x); } inline npy_longdouble mpy_log10l(npy_longdouble x){ return log10l(x); } inline npy_longdouble mpy_logl(npy_longdouble x){ return logl(x); } inline npy_longdouble mpy_expl(npy_longdouble x){ return expl(x); } inline npy_longdouble mpy_expm1l(npy_longdouble x){ return expm1l(x); } inline npy_longdouble mpy_asinl(npy_longdouble x){ return asinl(x); } inline npy_longdouble mpy_acosl(npy_longdouble x){ return acosl(x); } inline npy_longdouble mpy_atanl(npy_longdouble x){ return atanl(x); } inline npy_longdouble mpy_asinhl(npy_longdouble x){ return asinhl(x); } inline npy_longdouble mpy_acoshl(npy_longdouble x){ return acoshl(x); } inline npy_longdouble mpy_atanhl(npy_longdouble x){ return atanhl(x); } inline npy_longdouble mpy_log1pl(npy_longdouble x){ return log1pl(x); } inline npy_longdouble mpy_logbl(npy_longdouble x){ return logbl(x); } inline npy_longdouble mpy_exp2l(npy_longdouble x){ return exp2l(x); } inline npy_longdouble mpy_log2l(npy_longdouble x){ return log2l(x); } inline npy_longdouble mpy_atan2l(npy_longdouble x, npy_longdouble y){ return atan2l(x, y); } inline npy_longdouble mpy_hypotl(npy_longdouble x, npy_longdouble y){ return hypotl(x, y); } inline npy_longdouble mpy_powl(npy_longdouble x, npy_longdouble y){ return powl(x, y); } inline npy_longdouble mpy_fmodl(npy_longdouble x, npy_longdouble y){ return fmodl(x, y); } inline npy_longdouble mpy_modfl(npy_longdouble x, npy_longdouble* y){ return modfl(x, y); } inline npy_longdouble mpy_frexpl(npy_longdouble x, int* y){ return frexpl(x, y); } inline npy_longdouble mpy_ldexpl(npy_longdouble x, int y){ return ldexpl(x, y); } inline npy_longdouble mpy_copysignl(npy_longdouble x, npy_longdouble y){ return copysignl(x, y); } inline npy_longdouble mpy_nextafterl(npy_longdouble x, npy_longdouble y){ return nextafterl(x, y); } /* * C99 specifies that complex numbers have the same representation as * an array of two elements, where the first element is the real part * and the second element is the imaginary part. */ #define __NPY_CPACK_IMP(x, y, type, ctype) \ union { \ ctype z; \ type a[2]; \ } z1;; \ \ z1.a[0] = (x); \ z1.a[1] = (y); \ \ return z1.z; static NPY_INLINE npy_cdouble mpy_cpack(double x, double y) { __NPY_CPACK_IMP(x, y, double, npy_cdouble); } static NPY_INLINE npy_cfloat mpy_cpackf(float x, float y) { __NPY_CPACK_IMP(x, y, float, npy_cfloat); } static NPY_INLINE npy_clongdouble mpy_cpackl(npy_longdouble x, npy_longdouble y) { __NPY_CPACK_IMP(x, y, npy_longdouble, npy_clongdouble); } #undef __NPY_CPACK_IMP /* * Same remark as above, but in the other direction: extract first/second * member of complex number, assuming a C99-compatible representation * * Those are defineds as static inline, and such as a reasonable compiler would * most likely compile this to one or two instructions (on CISC at least) */ #define __NPY_CEXTRACT_IMP(z, index, type, ctype) \ union { \ ctype z; \ type a[2]; \ } __z_repr; \ __z_repr.z = z; \ \ return __z_repr.a[index]; static NPY_INLINE double mpy_creal(npy_cdouble z) { __NPY_CEXTRACT_IMP(z, 0, double, npy_cdouble); } static NPY_INLINE double mpy_cimag(npy_cdouble z) { __NPY_CEXTRACT_IMP(z, 1, double, npy_cdouble); } static NPY_INLINE float mpy_crealf(npy_cfloat z) { __NPY_CEXTRACT_IMP(z, 0, float, npy_cfloat); } static NPY_INLINE float mpy_cimagf(npy_cfloat z) { __NPY_CEXTRACT_IMP(z, 1, float, npy_cfloat); } static NPY_INLINE npy_longdouble mpy_creall(npy_clongdouble z) { __NPY_CEXTRACT_IMP(z, 0, npy_longdouble, npy_clongdouble); } static NPY_INLINE npy_longdouble mpy_cimagl(npy_clongdouble z) { __NPY_CEXTRACT_IMP(z, 1, npy_longdouble, npy_clongdouble); } #undef __NPY_CEXTRACT_IMP /* * Double precision complex functions */ double mpy_cabs(npy_cdouble z); double mpy_carg(npy_cdouble z); npy_cdouble mpy_cexp(npy_cdouble z); npy_cdouble mpy_clog(npy_cdouble z); npy_cdouble mpy_cpow(npy_cdouble x, npy_cdouble y); npy_cdouble mpy_csqrt(npy_cdouble z); npy_cdouble mpy_ccos(npy_cdouble z); npy_cdouble mpy_csin(npy_cdouble z); npy_cdouble mpy_ctan(npy_cdouble z); npy_cdouble mpy_ccosh(npy_cdouble z); npy_cdouble mpy_csinh(npy_cdouble z); npy_cdouble mpy_ctanh(npy_cdouble z); npy_cdouble mpy_cacos(npy_cdouble z); npy_cdouble mpy_casin(npy_cdouble z); npy_cdouble mpy_catan(npy_cdouble z); npy_cdouble mpy_cacosh(npy_cdouble z); npy_cdouble mpy_casinh(npy_cdouble z); npy_cdouble mpy_catanh(npy_cdouble z); /* * Single precision complex functions */ float mpy_cabsf(npy_cfloat z); float mpy_cargf(npy_cfloat z); npy_cfloat mpy_cexpf(npy_cfloat z); npy_cfloat mpy_clogf(npy_cfloat z); npy_cfloat mpy_cpowf(npy_cfloat x, npy_cfloat y); npy_cfloat mpy_csqrtf(npy_cfloat z); npy_cfloat mpy_ccosf(npy_cfloat z); npy_cfloat mpy_csinf(npy_cfloat z); npy_cfloat mpy_ctanf(npy_cfloat z); npy_cfloat mpy_ccoshf(npy_cfloat z); npy_cfloat mpy_csinhf(npy_cfloat z); npy_cfloat mpy_ctanhf(npy_cfloat z); npy_cfloat mpy_cacosf(npy_cfloat z); npy_cfloat mpy_casinf(npy_cfloat z); npy_cfloat mpy_catanf(npy_cfloat z); npy_cfloat mpy_cacoshf(npy_cfloat z); npy_cfloat mpy_casinhf(npy_cfloat z); npy_cfloat mpy_catanhf(npy_cfloat z); /* * Extended precision complex functions */ npy_longdouble mpy_cabsl(npy_clongdouble z); npy_longdouble mpy_cargl(npy_clongdouble z); npy_clongdouble mpy_cexpl(npy_clongdouble z); npy_clongdouble mpy_clogl(npy_clongdouble z); npy_clongdouble mpy_cpowl(npy_clongdouble x, npy_clongdouble y); npy_clongdouble mpy_csqrtl(npy_clongdouble z); npy_clongdouble mpy_ccosl(npy_clongdouble z); npy_clongdouble mpy_csinl(npy_clongdouble z); npy_clongdouble mpy_ctanl(npy_clongdouble z); npy_clongdouble mpy_ccoshl(npy_clongdouble z); npy_clongdouble mpy_csinhl(npy_clongdouble z); npy_clongdouble mpy_ctanhl(npy_clongdouble z); npy_clongdouble mpy_cacosl(npy_clongdouble z); npy_clongdouble mpy_casinl(npy_clongdouble z); npy_clongdouble mpy_catanl(npy_clongdouble z); npy_clongdouble mpy_cacoshl(npy_clongdouble z); npy_clongdouble mpy_casinhl(npy_clongdouble z); npy_clongdouble mpy_catanhl(npy_clongdouble z); /* * platform-dependent code translates floating point * status to an integer sum of these values */ int mpy_get_floatstatus(void); int mpy_clear_floatstatus(void); void mpy_set_floatstatus_divbyzero(void); void mpy_set_floatstatus_overflow(void); void mpy_set_floatstatus_underflow(void); void mpy_set_floatstatus_invalid(void); npy_float mpy_spacingf(npy_float x); npy_double mpy_spacing(npy_double x); npy_longdouble mpy_spacingl(npy_longdouble x); #pragma omp end declare target #endif
mbf.c
////////////////////////////////////// // Cunren Liang, NASA JPL/Caltech // Copyright 2017 ////////////////////////////////////// #include "resamp.h" #include <fftw3.h> int mbf(char *inputfile, char *outputfile, int nrg, float prf, float prf_frac, float nb, float nbg, float nboff, float bsl, float *kacoeff, float *dopcoeff1, float *dopcoeff2){ /* inputfile: input file outputfile: output file nrg: file width prf: PRF prf_frac: fraction of PRF processed (represents azimuth bandwidth) nb: number of lines in a burst (float, in terms of 1/PRF) nbg: number of lines in a burst gap (float, in terms of 1/PRF) nboff: number of unsynchronized lines in a burst (float, in terms of 1/PRF, with sign, see burst_sync.py for rules of sign) (the image to be processed is always considered to be master) bsl: start line number of a burst (float, the line number of the first line of the full-aperture SLC is zero) (no need to be first burst, any one is OK) kacoeff[0-2]: FM rate coefficients (three coefficients of a quadratic polynomial with regard to) (range sample number. range sample number starts with zero) dopcoeff1[0-3]: Doppler centroid frequency coefficients of this image (four coefficients of a third order polynomial with regard to) (range sample number. range sample number starts with zero) dopcoeff2[0-3]: Doppler centroid frequency coefficients of the other image (four coefficients of a third order polynomial with regard to) (range sample number. range sample number starts with zero) */ FILE *infp; FILE *outfp; fcomplex **in; //data read in fcomplex *out; //data written to output file fcomplex *filter; //multi-band bandpass filter fcomplex *filter_j; fcomplex *deramp; //deramp signal fcomplex *reramp; //reramp signal fcomplex *data; //data to be filtered. //int nrg; //file width int naz; //file length //float prf; //assume prf are the same //float prf_frac; // azimuth processed bandwidth = prf_frac * prf //float nb; //burst length in terms of pri. number of lines //float nbg; //burst gap length in terms of pri. number of lines float nbc; //burst cycle length in terms of pri. number of lines //float nboff; //number of unsynchronized lines in a burst with sign //see burst_sync.py for rules of sign. //the image to be processed is always considered to be master //and the other image is always considered to be slave //float bsl; //burst start line, input float //float kacoeff[3]; //FM rate along range (experessed by quadratic polynomial //as a function of range sample number) //float dopcoeff1[4]; //doppler centroid frequency along range (expressed by quadratic polynomial //as a function of range sample number). this image //float dopcoeff2[4]; //doppler centroid frequency along range (expressed by quadratic polynomial //as a function of range sample number). the other image //ATTENTION: MAKE RANGE NUMBER THE SAME ACCORDING RANGE OFFSET!!! float pri; // 1.0/prf float *ka; float *dop1; float *dop2; float *nfa; //full aperture length in terms of pri. number of lines float *freqs; //burst starting doppler centroid frequency float *freqe; //burst ending doppler centroid frequency float *bis; //burst imaged area start line numbers float *bie; //burst imaged area ending line numbers float *bic; //burst imaged area center line number, corresponding to the center of raw burst, //rather than the actual center of imaged area float *bica; //burst imaged area center line number, corresponding to the actual center of imaged area float deramp_center; //line number where center frequency is zero Hz after deramping float bis_min; float bis_max; float bie_min; float bie_max; int bis_out; //starting line number of the data block written out int bie_out; //ending line number of the data block written out int bis_in; //start line number of the data block read in int bie_in; //ending line number of the data block read in int bis_out2; //starting line number of the data block written out int bie_out2; //ending line number of the data block written out int bis_in2; //start line number of the data block read in int bie_in2; //ending line number of the data block read in float nb_new; float nbg_new; float nbc_new; float bsl_new; int nbc_new_int; int nburst_new; //number of bursts in a burst cycle float bfw; //bandwidth of burst in Hz float bfc; //center frequency of burst in Hz int nfft; //fft length int nfilter; //filter length, MUST BE ODD int hnfilter; //half filter length int edgl; //number of lines on the starting and ending edges float beta; //kaiser window beta float sc; //constant to scale the data read in to avoid large values //during fft and ifft int edgl_flag; //flag to indicate how many lines to keep on the starting and ending edges //0: do not remove data on the edges //1: remove data less than half convolution //2: remove all data of incomplete convolution int deramp_center_flag; //flag to indicate the location with zero center frequency after //deramping //0: center (raw burst center) of the burst whose ending/start line number is used //1: center of the burst cycle being processed //2: center (raw burst center) of the center burst in the burst cycle being processed float tmp1, tmp2, tmp3; int i, j, k; fftwf_plan p_forward; fftwf_plan p_backward; fftwf_plan p_forward_filter; /*****************************************************************************/ //I just put these parametes which can be set here. These can also be set via //arguments before running the programs if modifying the code to accept these //arguments. beta = 1.0; nfilter = 257; //MUST BE ODD sc = 10000.0; edgl_flag = 0; deramp_center_flag = 0; /*****************************************************************************/ //open files infp = openfile(inputfile, "rb"); outfp = openfile(outputfile, "wb"); printf("\n\ninput parameters:\n"); printf("input file: %s\n", inputfile); printf("output file: %s\n", outputfile); printf("nrg: %d\n", nrg); printf("prf: %f\n", prf); printf("prf_frac: %f\n", prf_frac); printf("nb: %f\n", nb); printf("nbg: %f\n", nbg); printf("nboff: %f\n", nboff); printf("bsl: %f\n", bsl); printf("kacoeff: %f, %f, %f\n", kacoeff[0], kacoeff[1], kacoeff[2]); printf("dopcoeff1: %f, %f, %f, %f\n", dopcoeff1[0], dopcoeff1[1], dopcoeff1[2], dopcoeff1[3]); printf("dopcoeff2: %f, %f, %f, %f\n", dopcoeff2[0], dopcoeff2[1], dopcoeff2[2], dopcoeff2[3]); if(nfilter % 2 != 1){ fprintf(stderr, "filter length must be odd!\n"); exit(1); } naz = file_length(infp, nrg, sizeof(fcomplex)); printf("file width: %d, file length: %d\n\n", nrg, naz); ka = array1d_float(nrg); dop1 = array1d_float(nrg); dop2 = array1d_float(nrg); nfa = array1d_float(nrg); freqs = array1d_float(nrg); freqe = array1d_float(nrg); bis = array1d_float(nrg); bie = array1d_float(nrg); bic = array1d_float(nrg); bica = array1d_float(nrg); in = array2d_fcomplex(naz, nrg); out = array1d_fcomplex(naz); pri = 1.0/prf; nbc = nb + nbg; hnfilter = (nfilter - 1) / 2; //find burst starting line closest to first line and after first line for(i = -100000; i < 100000; i++){ tmp1 = bsl + (nb + nbg) * i; if(tmp1 >= 0){ bsl = tmp1; break; } } //calculate something for(i = 0; i < nrg; i++){ //azimuth FM rate. we follow the convention ka > 0 ka[i] = kacoeff[2] * i * i + kacoeff[1] * i + kacoeff[0]; ka[i] = -ka[i]; //doppler centroid frequency dop1[i] = dopcoeff1[0] + dopcoeff1[1] * i + dopcoeff1[2] * i * i + dopcoeff1[3] * i * i * i; dop1[i] *= prf; dop2[i] = dopcoeff2[0] + dopcoeff2[1] * i + dopcoeff2[2] * i * i + dopcoeff2[3] * i * i * i; dop2[i] *= prf; //full aperture length nfa[i] = prf * prf_frac / ka[i] / pri; //consider burst synchronization //these are the same for all columns if(fabs(nboff) >= 0.8 * nb){ fprintf(stderr, "burst synchronization is too small!\n\n"); exit(1); } if(nboff < 0){ bsl_new = bsl - nboff; } else{ bsl_new = bsl; } nb_new = nb - fabs(nboff); nbg_new = nbg + fabs(nboff); nbc_new = nbc; nbc_new_int = (int)(nbc_new + 0.5); //starting and ending doppler centroid frequency of the burst //if the overall doppler centroid frequency = 0 freqs[i] = -(prf * prf_frac - nb_new * pri * ka[i]) / 2.0; freqe[i] = (prf * prf_frac - nb_new * pri * ka[i]) / 2.0; //consider doppler centroid frequency freqs[i] += dop1[i]; freqe[i] += dop1[i]; //consider doppler centroid frequency of the other image tmp1 = dop2[i] - dop1[i]; if(tmp1 > 0){ freqs[i] += tmp1; } else{ freqe[i] += tmp1; } //check if doppler centroid frequency difference too big if(freqe[i] - freqs[i] < nbc_new * pri * ka[i]){ fprintf(stderr, "Doppler centroid frequency difference too large!\n\n"); exit(1); } //starting and ending index of imaged area by the burst bic[i] = bsl_new + (nb_new - 1.0) / 2.0; //this should be the same for all columns bis[i] = freqs[i] / ka[i] / pri + bic[i]; bie[i] = freqe[i] / ka[i] / pri + bic[i]; bica[i] = (bis[i] + bie[i]) / 2.0; } //find the max and min of starting and ending index bis_min = bis[0]; bis_max = bis[0]; bie_min = bie[0]; bie_max = bie[0]; for(i = 0; i < nrg; i++){ if(bis[i] < bis_min){ bis_min = bis[i]; } if(bis[i] > bis_max){ bis_max = bis[i]; } if(bie[i] < bie_min){ bie_min = bie[i]; } if(bie[i] > bie_max){ bie_max = bie[i]; } } //read in data readdata((fcomplex *)in[0], (size_t)naz * (size_t)nrg * sizeof(fcomplex), infp); //initialize output data //for(j = 0; j < naz; j++){ // for(k = 0; k < nrg; k++){ // out[j][k].re = 0.0; // out[j][k].im = 0.0; // } //} for(i = 0; i < nrg; i++){ if((i + 1) % 100 == 0 || (i+1) == nrg) fprintf(stderr,"processing: %6d of %6d\r", i+1, nrg); if((i+1) == nrg) fprintf(stderr,"\n"); //initialize output data memset((void *)out, 0, (size_t)naz*sizeof(fcomplex)); //initialize start and ending line number if(dop1[i] > dop2[i]){ bis_out = roundfi(bie[i]) + 1; //bie_out = roundfi(bie[i]) + 1 + (nbc_new - 1); //changed to use nbc_new_int. 27-JAN-2015 bie_out = roundfi(bie[i]) + 1 + (nbc_new_int - 1); } else{ bis_out = roundfi(bis[i]); //bie_out = roundfi(bis[i]) + (nbc_new - 1); //changed to use nbc_new_int. 27-JAN-2015 bie_out = roundfi(bis[i]) + (nbc_new_int - 1); } //consider the filter length bis_in = bis_out - (nfilter - 1) / 2; bie_in = bie_out + (nfilter - 1) / 2; //to make circular convolution equivalent to linear convolution nfft = next_pow2(bie_in - bis_in + 1 + nfilter - 1); //initialize filter filter = array1d_fcomplex(nfft); filter_j = array1d_fcomplex(nfft); //create plans before initializing data, because FFTW_MEASURE overwrites the in/out arrays. p_forward_filter = fftwf_plan_dft_1d(nfft, (fftwf_complex*)filter, (fftwf_complex*)filter, FFTW_FORWARD, FFTW_ESTIMATE); //for(j = 0; j < nfft; j++){ // filter[j].re = 0.0; // filter[j].im = 0.0; //} //initialize output data memset((void *)filter, 0, (size_t)nfft*sizeof(fcomplex)); nburst_new = (int)ceil( fabs(freqe[i]-freqs[i]) / (nbc_new * pri * ka[i]) ); //choose deramp center if(dop1[i] > dop2[i]){ if(deramp_center_flag == 0){ deramp_center = bic[i]; } else if(deramp_center_flag == 1){ deramp_center = (bica[i] + nbc_new); } else{ deramp_center = bic[i] + (int)((nburst_new+1) / 2) * nbc_new; } } else{ if(deramp_center_flag == 0){ deramp_center = bic[i]; } else if(deramp_center_flag == 1){ deramp_center = bica[i]; } else{ deramp_center = bic[i] + (int)(nburst_new / 2) * nbc_new; } } //create filters for(j = 0; j <= nburst_new; j++){ //center frequency of bandpass filter //determined by distance of raw burst center and deramp center if(dop1[i] > dop2[i]){ bfc = (deramp_center - (bic[i] + j*nbc_new)) * pri * ka[i]; //do not include first burst in this case if(j == 0){ continue; } } else{ bfc = (deramp_center - (bic[i] - j*nbc_new)) * pri * ka[i]; //do not include last burst in this case if(j == nburst_new){ break; } } //bandwidth of bandpass filter bfw = nb_new * pri * ka[i]; //create filter: first sample corresponding to first fully convolution sample bandpass_filter(bfw/prf, bfc/prf, nfilter, nfft, nfilter-1, beta, filter_j); //add the filters to form the filter to be used for(k = 0; k < nfft; k++){ filter[k].re += filter_j[k].re; filter[k].im += filter_j[k].im; } } //forward fft //four1((float *)filter - 1, nfft, -1); fftwf_execute(p_forward_filter); //create deramp signal: this applies no matter whether dop1[i] is larger, //and no matter bic is on the left or right. deramp = array1d_fcomplex(nfft); for(j = 0; j < nfft; j++){ //distance between fft center and deramp center //tmp1 = bis_in + (nfft - 1.0) / 2.0 - bic[i]; tmp1 = bis_in + (nfft - 1.0) / 2.0 - deramp_center; //if(tmp1 <= 0){ // fprintf(stderr, "WARNING: very large doppler centroid frequnecy\n\n"); //} //index used in deramp signal tmp2 = j - (nfft - 1.0) / 2.0 + tmp1; //deramp signal tmp3 = - PI * ka[i] * (tmp2 * pri) * (tmp2 * pri); deramp[j].re = cos(tmp3); deramp[j].im = sin(tmp3); } //rereamp signal reramp = array1d_fcomplex(nfft); for(j = 0; j < nfft; j++){ reramp[j].re = deramp[j].re; reramp[j].im = -deramp[j].im; } //circ_shift(reramp, nfft, -abs(nfilter-1)); circ_shift(reramp, nfft, -abs( (nfilter-1)/2 )); /**********************************************/ /* do the filtering */ /**********************************************/ //filter the data data = array1d_fcomplex(nfft); //create plans before initializing data, because FFTW_MEASURE overwrites the in/out arrays. p_forward = fftwf_plan_dft_1d(nfft, (fftwf_complex*)data, (fftwf_complex*)data, FFTW_FORWARD, FFTW_ESTIMATE); p_backward = fftwf_plan_dft_1d(nfft, (fftwf_complex*)data, (fftwf_complex*)data, FFTW_BACKWARD, FFTW_ESTIMATE); for(j = -10000; j < 10000; j++){ //bis_out2 = bis_out + j * nbc_new; //bie_out2 = bie_out + j * nbc_new; //bis_in2 = bis_in + j * nbc_new; //bie_in2 = bie_in + j * nbc_new; //changed to use nbc_new_int. 27-JAN-2015 bis_out2 = bis_out + j * nbc_new_int; bie_out2 = bie_out + j * nbc_new_int; bis_in2 = bis_in + j * nbc_new_int; bie_in2 = bie_in + j * nbc_new_int; //find data to be filtered if(bie_in2 <= -1){ continue; } else if(bis_in2 >= naz){ break; } else{ //first zero the data //for(k = 0; k < nfft; k++){ // data[k].re = 0.0; // data[k].im = 0.0; //} memset((void *)data, 0, (size_t)nfft*sizeof(fcomplex)); //get data for(k = bis_in2; k <= bie_in2; k++){ if(k <= -1 || k >= naz){ data[k-bis_in2].re = 0.0; data[k-bis_in2].im = 0.0; } else{ data[k-bis_in2].re = in[k][i].re / sc; data[k-bis_in2].im = in[k][i].im / sc; } } } //deramp the data #pragma omp parallel for private(k) shared(nfft, data, deramp) for(k = 0; k < nfft; k++){ data[k] = cmul(data[k], deramp[k]); } //forward fft //four1((float *)data - 1, nfft, -1); fftwf_execute(p_forward); //multiplication in the frequency domain #pragma omp parallel for private(k) shared(nfft, data, filter) for(k = 0; k < nfft; k++) data[k] = cmul(data[k], filter[k]); //backward fft //four1((float *)data - 1, nfft, 1); fftwf_execute(p_backward); //reramp #pragma omp parallel for private(k) shared(nfft, data, reramp) for(k = 0; k < nfft; k++){ data[k] = cmul(data[k], reramp[k]); } //get the filtered data for(k = bis_out2; k <= bie_out2; k++){ if(edgl_flag == 0){ //do not remove data on the edges edgl = 0; } else if(edgl_flag == 1){ //remove data less than half convolution edgl = (nfft - 1) / 2; } else{ //remove data of incomplete convolution edgl = nfft - 1; } if((k >= (0+edgl)) && (k <= naz-1-edgl)){ out[k].re = data[k-bis_out2].re * sc / nfft; out[k].im = data[k-bis_out2].im * sc / nfft; } } }//j: block of data of each column fftwf_destroy_plan(p_forward); fftwf_destroy_plan(p_backward); fftwf_destroy_plan(p_forward_filter); free_array1d_fcomplex(filter); free_array1d_fcomplex(filter_j); free_array1d_fcomplex(deramp); free_array1d_fcomplex(reramp); free_array1d_fcomplex(data); //overwrite original data for(j = 0; j < naz; j++){ in[j][i].re = out[j].re; in[j][i].im = out[j].im; } }//i: each column writedata((fcomplex *)in[0], (size_t)naz * (size_t)nrg * sizeof(fcomplex), outfp); //free arrays free_array1d_float(ka); free_array1d_float(dop1); free_array1d_float(dop2); free_array1d_float(nfa); free_array1d_float(freqs); free_array1d_float(freqe); free_array1d_float(bis); free_array1d_float(bie); free_array1d_float(bic); free_array1d_float(bica); free_array2d_fcomplex(in); free_array1d_fcomplex(out); //close files fclose(infp); fclose(outfp); return 0; }//end main() //////////////////////////////////////////////////////////////////////////////////////////////////////
taskwait-depend.c
// RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s // REQUIRES: ompt // taskwait with depend clause was introduced with gcc-9 // UNSUPPORTED: gcc-4, gcc-5, gcc-6, gcc-7, gcc-8 // clang does not yet support taskwait with depend clause // clang-12 introduced parsing, but no codegen // update expected result when codegen in clang was added // XFAIL: clang #include "callback.h" #include <omp.h> int main() { int x = 0; #pragma omp parallel num_threads(2) { #pragma omp master { print_ids(0); printf("%" PRIu64 ": address of x: %p\n", ompt_get_thread_data()->value, &x); #pragma omp task depend(out : x) { x++; } print_fuzzy_address(1); #pragma omp taskwait depend(in: x) print_fuzzy_address(2); } } return 0; } // Check if libomp supports the callbacks for this test. // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_create' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_dependences' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_depende // CHECK: {{^}}0: NULL_POINTER=[[NULL:.*$]] // make sure initial data pointers are null // CHECK-NOT: 0: new_task_data initially not null // CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_implicit_task_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID:[0-9]+]], // CHECK-SAME: task_id=[[IMPLICIT_TASK_ID:[0-9]+]] // CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], // CHECK-SAME: task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT:0x[0-f]+]], // CHECK-SAME: reenter_frame=[[NULL]] // CHECK: {{^}}[[MASTER_ID]]: address of x: [[ADDRX:0x[0-f]+]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create: // CHECK-SAME: parent_task_id={{[0-9]+}}, parent_task_frame.exit=[[EXIT]], // CHECK-SAME: parent_task_frame.reenter={{0x[0-f]+}}, // CHECK-SAME: new_task_id=[[FIRST_TASK:[0-f]+]], // CHECK-SAME: codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}, // CHECK-SAME: task_type=ompt_task_explicit=4, has_dependences=yes // CHECK: {{^}}[[MASTER_ID]]: ompt_event_dependences: // CHECK-SAME: task_id=[[FIRST_TASK]], deps=[([[ADDRX]], // CHECK-SAME: ompt_dependence_type_inout)], ndeps=1 // CHECK: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create: // CHECK-SAME: parent_task_id={{[0-9]+}}, parent_task_frame.exit=[[EXIT]], // CHECK-SAME: parent_task_frame.reenter={{0x[0-f]+}}, // CHECK-SAME: new_task_id=[[SECOND_TASK:[0-f]+]], // CHECK-SAME: codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}, // CHECK-SAME: task_type=ompt_task_explicit|ompt_task_undeferred| // CHECK-SAME: ompt_task_mergeable=1207959556, has_dependences=yes // CHECK: {{^}}[[MASTER_ID]]: ompt_event_dependences: // CHECK-SAME: task_id=[[SECOND_TASK]], deps=[([[ADDRX]], // CHECK-SAME: ompt_dependence_type_in)], ndeps=1 // CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_end: task_id=[[SECOND_TASK]] // CHECK: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]]
yada.c
/* ============================================================================= * * yada.c * * ============================================================================= * * Copyright (C) Stanford University, 2006. All Rights Reserved. * Author: Chi Cao Minh * * ============================================================================= * * For the license of bayes/sort.h and bayes/sort.c, please see the header * of the files. * * ------------------------------------------------------------------------ * * For the license of kmeans, please see kmeans/LICENSE.kmeans * * ------------------------------------------------------------------------ * * For the license of ssca2, please see ssca2/COPYRIGHT * * ------------------------------------------------------------------------ * * For the license of lib/mt19937ar.c and lib/mt19937ar.h, please see the * header of the files. * * ------------------------------------------------------------------------ * * For the license of lib/rbtree.h and lib/rbtree.c, please see * lib/LEGALNOTICE.rbtree and lib/LICENSE.rbtree * * ------------------------------------------------------------------------ * * Unless otherwise noted, the following license applies to STAMP files: * * Copyright (c) 2007, Stanford University * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of Stanford University nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY STANFORD UNIVERSITY ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL STANFORD UNIVERSITY BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. * * ============================================================================= */ #include <assert.h> #include <getopt.h> #include <stdio.h> #include <stdlib.h> #include "region.h" #include "list.h" #include "mesh.h" #include "heap.h" #include "thread.h" #include "timer.h" #include "tm.h" #define PARAM_DEFAULT_INPUTPREFIX ("") #define PARAM_DEFAULT_NUMTHREAD (1L) #define PARAM_DEFAULT_ANGLE (20.0) const char* global_inputPrefix = PARAM_DEFAULT_INPUTPREFIX; long global_numThread = PARAM_DEFAULT_NUMTHREAD; double global_angleConstraint = PARAM_DEFAULT_ANGLE; mesh_t* global_meshPtr; heap_t* global_workHeapPtr; long* global_totalNumAdded; long* global_numProcess = 0; /* ============================================================================= * displayUsage * ============================================================================= */ static void displayUsage (const char* appName) { printf("Usage: %s [options]\n", appName); puts("\nOptions: (defaults)\n"); printf(" a <FLT> Min [a]ngle constraint (%lf)\n", PARAM_DEFAULT_ANGLE); printf(" i <STR> [i]nput name prefix (%s)\n", PARAM_DEFAULT_INPUTPREFIX); printf(" t <UINT> Number of [t]hreads (%li)\n", PARAM_DEFAULT_NUMTHREAD); exit(1); } /* ============================================================================= * parseArgs * ============================================================================= */ static void parseArgs (long argc, char* const argv[]) { long i; long opt; opterr = 0; while ((opt = getopt(argc, argv, "a:i:t:")) != -1) { switch (opt) { case 'a': global_angleConstraint = atof(optarg); break; case 'i': global_inputPrefix = optarg; break; case 't': global_numThread = atol(optarg); break; case '?': default: opterr++; break; } } for (i = optind; i < argc; i++) { fprintf(stderr, "Non-option argument: %s\n", argv[i]); opterr++; } if (opterr) { displayUsage(argv[0]); } } /* ============================================================================= * initializeWork * ============================================================================= */ static long initializeWork (heap_t* workHeapPtr, mesh_t* meshPtr) { random_t* randomPtr = random_alloc(); random_seed(randomPtr, 0); mesh_shuffleBad(meshPtr, randomPtr); random_free(randomPtr); long numBad = 0; while (1) { element_t* elementPtr = mesh_getBad(meshPtr); if (!elementPtr) { break; } numBad++; bool_t status = heap_insert(workHeapPtr, (void*)elementPtr); assert(status); element_setIsReferenced(elementPtr, TRUE); } return numBad; } /* ============================================================================= * process * ============================================================================= */ void process () { TM_THREAD_ENTER(); heap_t* workHeapPtr = global_workHeapPtr; mesh_t* meshPtr = global_meshPtr; region_t* regionPtr; long totalNumAdded = 0; long numProcess = 0; regionPtr = PREGION_ALLOC(); assert(regionPtr); while (1) { element_t* elementPtr; TM_BEGIN(); elementPtr = (element_t*)TMHEAP_REMOVE(workHeapPtr); TM_END(); if (elementPtr == NULL) { break; } bool_t isGarbage; TM_BEGIN(); isGarbage = TMELEMENT_ISGARBAGE(elementPtr); TM_END(); if (isGarbage) { /* * Handle delayed deallocation */ PELEMENT_FREE(elementPtr); continue; } long numAdded; TM_BEGIN(); PREGION_CLEARBAD(regionPtr); numAdded = TMREGION_REFINE(regionPtr, elementPtr, meshPtr); TM_END(); TM_BEGIN(); TMELEMENT_SETISREFERENCED(elementPtr, FALSE); isGarbage = TMELEMENT_ISGARBAGE(elementPtr); TM_END(); if (isGarbage) { /* * Handle delayed deallocation */ PELEMENT_FREE(elementPtr); } totalNumAdded += numAdded; TM_BEGIN(); TMREGION_TRANSFERBAD(regionPtr, workHeapPtr); TM_END(); numProcess++; } // printf("yada\n"); TM_BEGIN(); TM_SHARED_WRITE_L(*global_totalNumAdded, TM_SHARED_READ_L(*global_totalNumAdded) + totalNumAdded); TM_SHARED_WRITE_L(*global_numProcess, TM_SHARED_READ_L(*global_numProcess) + numProcess); TM_END(); // printf("yadaEnd\n"); PREGION_FREE(regionPtr); TM_THREAD_EXIT(); } comparator_t yada_heapcompare(&element_heapCompare, &TMelement_heapCompare); /* ============================================================================= * main * ============================================================================= */ MAIN(argc, argv) { /* * Initialization */ global_numProcess = (long*)sitemalloc(sizeof(long)); global_totalNumAdded = (long*)sitemalloc(sizeof(long)); parseArgs(argc, (char** const)argv); SIM_GET_NUM_CPU(global_numThread); TM_STARTUP(global_numThread); P_MEMORY_STARTUP(global_numThread); thread_startup(global_numThread); global_meshPtr = mesh_alloc(); assert(global_meshPtr); printf("Angle constraint = %lf\n", global_angleConstraint); printf("Reading input... "); long initNumElement = mesh_read(global_meshPtr, (char*)global_inputPrefix); puts("done."); global_workHeapPtr = heap_alloc(1, &yada_heapcompare); assert(global_workHeapPtr); long initNumBadElement = initializeWork(global_workHeapPtr, global_meshPtr); printf("Initial number of mesh elements = %li\n", initNumElement); printf("Initial number of bad elements = %li\n", initNumBadElement); printf("Starting triangulation..."); fflush(stdout); /* * Run benchmark */ // NB: Since ASF/PTLSim "REAL" is native execution, and since we are using // wallclock time, we want to be sure we read time inside the // simulator, or else we report native cycles spent on the benchmark // instead of simulator cycles. GOTO_SIM(); TIMER_T start; TIMER_READ(start); #ifdef OTM #pragma omp parallel { process(); } #else thread_start((void(*)(void*))process, NULL); #endif TIMER_T stop; TIMER_READ(stop); // NB: As above, timer reads must be done inside of the simulated region // for PTLSim/ASF GOTO_REAL(); puts(" done."); printf("Elapsed time = %0.3lf\n", TIMER_DIFF_SECONDS(start, stop)); fflush(stdout); /* * Check solution */ long finalNumElement = initNumElement + *global_totalNumAdded; printf("Final mesh size = %li\n", finalNumElement); printf("Number of elements processed = %li\n", *global_numProcess); fflush(stdout); #if 1 bool_t isSuccess = mesh_check(global_meshPtr, finalNumElement); #else bool_t isSuccess = TRUE; #endif printf("Final mesh is %s\n", (isSuccess ? "valid." : "INVALID!")); fflush(stdout); //assert(isSuccess); /* * TODO: deallocate mesh and work heap */ TM_SHUTDOWN(); P_MEMORY_SHUTDOWN(); GOTO_SIM(); thread_shutdown(); MAIN_RETURN(0); } /* ============================================================================= * * End of ruppert.c * * ============================================================================= */
GB_ewise_slice.c
//------------------------------------------------------------------------------ // GB_ewise_slice: slice the entries and vectors for an ewise operation //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // Constructs a set of tasks to compute C, for an element-wise operation // (GB_add, GB_emult, and GB_mask) that operates on two input matrices, // C=op(A,B). The mask is ignored for computing where to slice the work, but // it is sliced once the location has been found. #define GB_FREE_WORK \ { \ GB_FREE (Coarse) ; \ GB_FREE (Cwork) ; \ } #define GB_FREE_ALL \ { \ GB_FREE_WORK ; \ GB_FREE (TaskList) ; \ } #include "GB.h" //------------------------------------------------------------------------------ // GB_ewise_slice //------------------------------------------------------------------------------ GrB_Info GB_ewise_slice ( // output: GB_task_struct **p_TaskList, // array of structs, of size max_ntasks int *p_max_ntasks, // size of TaskList int *p_ntasks, // # of tasks constructed int *p_nthreads, // # of threads for eWise operation // input: const int64_t Cnvec, // # of vectors of C const int64_t *GB_RESTRICT Ch, // vectors of C, if hypersparse const int64_t *GB_RESTRICT C_to_M, // mapping of C to M const int64_t *GB_RESTRICT C_to_A, // mapping of C to A const int64_t *GB_RESTRICT C_to_B, // mapping of C to B bool Ch_is_Mh, // if true, then Ch == Mh; GB_add only const GrB_Matrix M, // mask matrix to slice (optional) const GrB_Matrix A, // matrix to slice const GrB_Matrix B, // matrix to slice GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (p_TaskList != NULL) ; ASSERT (p_max_ntasks != NULL) ; ASSERT (p_ntasks != NULL) ; ASSERT (p_nthreads != NULL) ; ASSERT_MATRIX_OK (A, "A for ewise_slice", GB0) ; ASSERT_MATRIX_OK (B, "B for ewise_slice", GB0) ; (*p_TaskList ) = NULL ; (*p_max_ntasks) = 0 ; (*p_ntasks ) = 0 ; (*p_nthreads ) = 1 ; int64_t *GB_RESTRICT Cwork = NULL ; int64_t *GB_RESTRICT Coarse = NULL ; // size ntasks1+1 int ntasks1 = 0 ; //-------------------------------------------------------------------------- // determine # of threads to use //-------------------------------------------------------------------------- GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; //-------------------------------------------------------------------------- // allocate the initial TaskList //-------------------------------------------------------------------------- // Allocate the TaskList to hold at least 2*ntask0 tasks. It will grow // later, if needed. Usually, 64*nthreads_max is enough, but in a few cases // fine tasks can cause this number to be exceeded. If that occurs, // TaskList is reallocated. // When the mask is present, it is often fastest to break the work up // into tasks, even when nthreads_max is 1. GB_task_struct *GB_RESTRICT TaskList = NULL ; int max_ntasks = 0 ; int ntasks0 = (M == NULL && nthreads_max == 1) ? 1 : (32 * nthreads_max) ; GB_REALLOC_TASK_LIST (TaskList, ntasks0, max_ntasks) ; //-------------------------------------------------------------------------- // check for quick return for a single task //-------------------------------------------------------------------------- if (Cnvec == 0 || ntasks0 == 1) { // construct a single coarse task that computes all of C TaskList [0].kfirst = 0 ; TaskList [0].klast = Cnvec-1 ; (*p_TaskList ) = TaskList ; (*p_max_ntasks) = max_ntasks ; (*p_ntasks ) = (Cnvec == 0) ? 0 : 1 ; (*p_nthreads ) = 1 ; return (GrB_SUCCESS) ; } //-------------------------------------------------------------------------- // get A, B, and M //-------------------------------------------------------------------------- const int64_t vlen = A->vlen ; const int64_t *GB_RESTRICT Ap = A->p ; const int64_t *GB_RESTRICT Ai = A->i ; const int64_t *GB_RESTRICT Bp = B->p ; const int64_t *GB_RESTRICT Bi = B->i ; bool Ch_is_Ah = (Ch != NULL && A->h != NULL && Ch == A->h) ; bool Ch_is_Bh = (Ch != NULL && B->h != NULL && Ch == B->h) ; const int64_t *GB_RESTRICT Mp = NULL ; const int64_t *GB_RESTRICT Mi = NULL ; if (M != NULL) { Mp = M->p ; Mi = M->i ; // Ch_is_Mh is true if either true on input (for GB_add, which denotes // that Ch is a deep copy of M->h), or if Ch is a shallow copy of M->h. Ch_is_Mh = Ch_is_Mh || (Ch != NULL && M->h != NULL && Ch == M->h) ; } //-------------------------------------------------------------------------- // allocate workspace //-------------------------------------------------------------------------- Cwork = GB_MALLOC (Cnvec+1, int64_t) ; if (Cwork == NULL) { // out of memory GB_FREE_ALL ; return (GB_OUT_OF_MEMORY) ; } //-------------------------------------------------------------------------- // compute an estimate of the work for each vector of C //-------------------------------------------------------------------------- int nthreads_for_Cwork = GB_nthreads (Cnvec, chunk, nthreads_max) ; int64_t k ; #pragma omp parallel for num_threads(nthreads_for_Cwork) schedule(static) for (k = 0 ; k < Cnvec ; k++) { //---------------------------------------------------------------------- // get the C(:,j) vector //---------------------------------------------------------------------- int64_t j = (Ch == NULL) ? k : Ch [k] ; //---------------------------------------------------------------------- // get the corresponding vector of A //---------------------------------------------------------------------- int64_t kA ; if (C_to_A != NULL) { // A is hypersparse and the C_to_A mapping has been created ASSERT (A->is_hyper || A->is_slice) ; kA = C_to_A [k] ; ASSERT (kA >= -1 && kA < A->nvec) ; if (kA >= 0) { ASSERT (j == ((A->is_hyper) ? A->h [kA] : (A->hfirst + kA))) ; } } else if (Ch_is_Ah) { // A is hypersparse, but Ch is a shallow copy of A->h kA = k ; ASSERT (j == A->h [kA]) ; } else { // A is standard ASSERT (!A->is_hyper) ; ASSERT (!A->is_slice) ; ASSERT (A->h == NULL) ; kA = j ; } //---------------------------------------------------------------------- // get the corresponding vector of B //---------------------------------------------------------------------- int64_t kB ; if (C_to_B != NULL) { // B is hypersparse and the C_to_B mapping has been created ASSERT (B->is_hyper || B->is_slice) ; kB = C_to_B [k] ; ASSERT (kB >= -1 && kB < B->nvec) ; if (kB >= 0) { ASSERT (j == ((B->is_hyper) ? B->h [kB] : (B->hfirst + kB))) ; } } else if (Ch_is_Bh) { // B is hypersparse, but Ch is a shallow copy of B->h kB = k ; ASSERT (j == B->h [kB]) ; } else { // B is standard ASSERT (!B->is_hyper) ; ASSERT (!B->is_slice) ; ASSERT (B->h == NULL) ; kB = j ; } //---------------------------------------------------------------------- // estimate the work for C(:,j) //---------------------------------------------------------------------- ASSERT (kA >= -1 && kA < A->nvec) ; ASSERT (kB >= -1 && kB < B->nvec) ; int64_t aknz = (kA < 0) ? 0 : (Ap [kA+1] - Ap [kA]) ; int64_t bknz = (kB < 0) ? 0 : (Bp [kB+1] - Bp [kB]) ; Cwork [k] = aknz + bknz + 1 ; } //-------------------------------------------------------------------------- // replace Cwork with its cumulative sum //-------------------------------------------------------------------------- GB_cumsum (Cwork, Cnvec, NULL, nthreads_for_Cwork) ; double cwork = (double) Cwork [Cnvec] ; //-------------------------------------------------------------------------- // determine # of threads and tasks for the eWise operation //-------------------------------------------------------------------------- int nthreads = GB_nthreads (cwork, chunk, nthreads_max) ; ntasks0 = (M == NULL && nthreads == 1) ? 1 : (32 * nthreads) ; double target_task_size = cwork / (double) (ntasks0) ; target_task_size = GB_IMAX (target_task_size, chunk) ; ntasks1 = cwork / target_task_size ; ntasks1 = GB_IMAX (ntasks1, 1) ; //-------------------------------------------------------------------------- // slice the work into coarse tasks //-------------------------------------------------------------------------- if (!GB_pslice (&Coarse, Cwork, Cnvec, ntasks1)) { // out of memory GB_FREE_ALL ; return (GB_OUT_OF_MEMORY) ; } //-------------------------------------------------------------------------- // construct all tasks, both coarse and fine //-------------------------------------------------------------------------- int ntasks = 0 ; for (int t = 0 ; t < ntasks1 ; t++) { //---------------------------------------------------------------------- // coarse task computes C (:,k:klast) //---------------------------------------------------------------------- int64_t k = Coarse [t] ; int64_t klast = Coarse [t+1] - 1 ; if (k >= Cnvec) { //------------------------------------------------------------------ // all tasks have been constructed //------------------------------------------------------------------ break ; } else if (k < klast) { //------------------------------------------------------------------ // coarse task has 2 or more vectors //------------------------------------------------------------------ // This is a non-empty coarse-grain task that does two or more // entire vectors of C, vectors k:klast, inclusive. GB_REALLOC_TASK_LIST (TaskList, ntasks + 1, max_ntasks) ; TaskList [ntasks].kfirst = k ; TaskList [ntasks].klast = klast ; ntasks++ ; } else { //------------------------------------------------------------------ // coarse task has 0 or 1 vectors //------------------------------------------------------------------ // As a coarse-grain task, this task is empty or does a single // vector, k. Vector k must be removed from the work done by this // and any other coarse-grain task, and split into one or more // fine-grain tasks. for (int tt = t ; tt < ntasks1 ; tt++) { // remove k from the initial slice tt if (Coarse [tt] == k) { // remove k from task tt Coarse [tt] = k+1 ; } else { // break, k not in task tt break ; } } //------------------------------------------------------------------ // get the vector of C //------------------------------------------------------------------ int64_t j = (Ch == NULL) ? k : Ch [k] ; //------------------------------------------------------------------ // get the corresponding vector of A //------------------------------------------------------------------ int64_t kA ; if (C_to_A != NULL) { // A is hypersparse and the C_to_A mapping has been created kA = C_to_A [k] ; } else if (Ch_is_Ah) { // A is hypersparse, but Ch is a shallow copy of A->h kA = k ; } else { // A is standard kA = j ; } int64_t pA_start = (kA < 0) ? -1 : Ap [kA] ; int64_t pA_end = (kA < 0) ? -1 : Ap [kA+1] ; bool a_empty = (pA_end == pA_start) ; //------------------------------------------------------------------ // get the corresponding vector of B //------------------------------------------------------------------ int64_t kB ; if (C_to_B != NULL) { // B is hypersparse and the C_to_B mapping has been created kB = C_to_B [k] ; } else if (Ch_is_Bh) { // B is hypersparse, but Ch is a shallow copy of B->h kB = k ; } else { // B is standard kB = j ; } int64_t pB_start = (kB < 0) ? -1 : Bp [kB] ; int64_t pB_end = (kB < 0) ? -1 : Bp [kB+1] ; bool b_empty = (pB_end == pB_start) ; //------------------------------------------------------------------ // get the corresponding vector of M, if present //------------------------------------------------------------------ int64_t pM_start = -1 ; int64_t pM_end = -1 ; if (M != NULL) { int64_t kM ; if (C_to_M != NULL) { // M is hypersparse and the C_to_M mapping has been created kM = C_to_M [k] ; } else if (Ch_is_Mh) { // Ch is a deep or shallow copy of Mh kM = k ; } else { // M is standard kM = j ; } pM_start = (kM < 0) ? -1 : Mp [kM] ; pM_end = (kM < 0) ? -1 : Mp [kM+1] ; } bool m_empty = (pM_end == pM_start) ; //------------------------------------------------------------------ // determine the # of fine-grain tasks to create for vector k //------------------------------------------------------------------ double ckwork = Cwork [k+1] - Cwork [k] ; int nfine = ckwork / target_task_size ; nfine = GB_IMAX (nfine, 1) ; // make the TaskList bigger, if needed GB_REALLOC_TASK_LIST (TaskList, ntasks + nfine, max_ntasks) ; //------------------------------------------------------------------ // create the fine-grain tasks //------------------------------------------------------------------ if (nfine == 1) { //-------------------------------------------------------------- // this is a single coarse task for all of vector k //-------------------------------------------------------------- TaskList [ntasks].kfirst = k ; TaskList [ntasks].klast = k ; ntasks++ ; } else { //-------------------------------------------------------------- // slice vector k into nfine fine tasks //-------------------------------------------------------------- // first fine task starts at the top of vector k ASSERT (ntasks < max_ntasks) ; TaskList [ntasks].kfirst = k ; TaskList [ntasks].klast = -1 ; // this is a fine task TaskList [ntasks].pM = (m_empty) ? -1 : pM_start ; TaskList [ntasks].pA = (a_empty) ? -1 : pA_start ; TaskList [ntasks].pB = (b_empty) ? -1 : pB_start ; TaskList [ntasks].len = 0 ; // to be determined below ntasks++ ; int64_t ilast = 0, i = 0 ; for (int tfine = 1 ; tfine < nfine ; tfine++) { double target_work = ((nfine-tfine) * ckwork) / nfine ; int64_t pM, pA, pB ; GB_slice_vector (&i, &pM, &pA, &pB, pM_start, pM_end, Mi, // Mi NULL if M not present pA_start, pA_end, Ai, 0, // Ai always explicit list pB_start, pB_end, Bi, // Bi always explicit list vlen, target_work) ; // prior task ends at pM-1, pA-1, and pB-1 TaskList [ntasks-1].pM_end = pM ; TaskList [ntasks-1].pA_end = pA ; TaskList [ntasks-1].pB_end = pB ; // prior task handles indices ilast:i-1 TaskList [ntasks-1].len = i - ilast ; // this task starts at pM, pA, and pB ASSERT (ntasks < max_ntasks) ; TaskList [ntasks].kfirst = k ; TaskList [ntasks].klast = -1 ; // this is a fine task TaskList [ntasks].pM = pM ; TaskList [ntasks].pA = pA ; TaskList [ntasks].pB = pB ; // advance to the next task ntasks++ ; ilast = i ; } // Terminate the last fine task. ASSERT (ntasks <= max_ntasks) ; TaskList [ntasks-1].pM_end = (m_empty) ? -1 : pM_end ; TaskList [ntasks-1].pA_end = (a_empty) ? -1 : pA_end ; TaskList [ntasks-1].pB_end = (b_empty) ? -1 : pB_end ; TaskList [ntasks-1].len = vlen - i ; } } } ASSERT (ntasks <= max_ntasks) ; //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- GB_FREE_WORK ; (*p_TaskList ) = TaskList ; (*p_max_ntasks) = max_ntasks ; (*p_ntasks ) = ntasks ; (*p_nthreads ) = nthreads ; return (GrB_SUCCESS) ; }
valid.res12.src.h
#pragma once #include "ukr.h" #include "omp.h" #include "transpose.h" #include "gen_ukr_A6B2gemm_1_512_7_7_512_3_3.h" #include "gen_ukr_A1B2gemm_1_512_7_7_512_3_3.h" void testrun(float* A ,float*B, float*C, float*oriB ){ int tid = omp_get_thread_num(); int Nx = 7; int Ny = 7; int Nh = 3; long long Astrides[6] = {0,1,2,3,4,5}; int b1 = 0; for (int fpck = (tid%1)*16; fpck < uNf; fpck+=1*16){ for(int cwh = (tid/1)*8; cwh < uNc*uNw*uNh/8*8; cwh+=8*1){ transpose8x8_avx(oriB+ (fpck+0)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 0, uNc*uNw*uNh, 16); transpose8x8_avx(oriB+ (fpck+8)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 8, uNc*uNw*uNh, 16); } } #pragma omp barrier// begin push button generated block for(int c5=0;c5<512+0;c5+=512) { for(int xy5=0;xy5<49+0;xy5+=49) { for(int f5=0;f5<512+0;f5+=512) { for(int c4=c5;c4<min(512, 512+c5);c4+=512) { for(int xy4=xy5;xy4<min(49, 49+xy5);xy4+=49) { for(int f4=f5;f4<min(512, 512+f5);f4+=512) { for(int c3=c4;c3<min(512, 512+c4);c3+=Tc1) { for(int f3=f4;f3<min(512, 512+f4);f3+=Tf2) { for(int xy3=xy4;xy3<min(49, 49+xy4);xy3+=Txy3) { for(int xy2=xy3;xy2<min(49, Txy3+xy3);xy2+=6) { for(int f2=f3;f2<min(512, Tf2+f3);f2+=16) { for(int c2=c3;c2<min(512, Tc1+c3);c2+=Tc1) { for(int c1=c2;c1<min(512, Tc1+c2);c1+=Tc1) { for(int xy1=xy2;xy1<min(49, 6+xy2);xy1+=6) { for(int f1=f2;f1<min(512, 16+f2);f1+=16) { int ctile=min(Tc1, 512-c1); int x1=xy1/7; int y1=xy1%7/1; int c1_1=c1/1; int c1_2=c1%1/1; int kf1_1=f1/16; int kf1_2=f1%16/1; int of1_1=f1/1; int of1_2=f1%1/1; int offsetA=0+b1*41472+c1_1*81+1*x1*9+1*y1*1+c1_2*1; int offsetB=0+kf1_1*73728+c1*144+0*48+0*16+kf1_2*1; int offsetC=0+b1*25088+of1_1*49+x1*7+y1*1+of1_2*1; if(7-y1>=6){ cnn_ukr_float_scatter_6x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides); } else if(7*7-xy1>=6){ for(int sti=7-y1;sti<6;sti+=1) { Astrides[sti]+=2; } cnn_ukr_float_scatter_6x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides); for(int sti=7-y1;sti<6;sti+=1) { Astrides[sti]-=2; } } else{ cnn_ukr_float_scatter_1x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides); } } } } } } } } } } } } } } } } // end push button generated block }
HelloMP.c
#include <omp.h> // Nur für Hilfsfunktionen #include <stdio.h> #include <stdlib.h> int main(void) { int i; omp_set_num_threads(4); #pragma omp parallel for for (i = 0; i < 4; ++i) { const int id = omp_get_thread_num(); printf("Hello World from thread %d\n", id); if (id == 0) /* Nur im Masterthread ausführen */ printf("There are %d threads\n", omp_get_num_threads()); } return EXIT_SUCCESS; }
convolution_packnto1_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convolution_packnto1_fp16s_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_fp16, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt) { const int packn = csrr_vlenb() / 2; const word_type vl = vsetvl_e16m1(packn); int w = bottom_blob.w; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int maxk = kernel_w * kernel_h; // kernel offsets std::vector<int> _space_ofs(maxk); int* space_ofs = &_space_ofs[0]; { int p1 = 0; int p2 = 0; int gap = w * dilation_h - kernel_w * dilation_w; for (int i = 0; i < kernel_h; i++) { for (int j = 0; j < kernel_w; j++) { space_ofs[p1] = p2; p1++; p2 += dilation_w; } p2 += gap; } } const float* bias_data_ptr = bias_data; // num_output #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { __fp16* outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { float sum = 0.f; if (bias_data_ptr) { sum = bias_data_ptr[p]; } vfloat32m2_t _sum = vfmv_v_f_f32m2(0.f, vl); const __fp16* kptr = weight_data_fp16.channel(p); // channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); const __fp16* sptr = m.row<const __fp16>(i * stride_h) + j * stride_w * packn; for (int k = 0; k < maxk; k++) { vfloat32m2_t _val = vfwcvt_f_f_v_f32m2(vle16_v_f16m1(sptr + space_ofs[k] * packn, vl), vl); vfloat32m2_t _w = vfwcvt_f_f_v_f32m2(vle16_v_f16m1(kptr, vl), vl); _sum = vfmacc_vv_f32m2(_sum, _val, _w, vl); kptr += packn; } } sum = vfmv_f_s_f32m1_f32(vfredsum_vs_f32m2_f32m1(vfloat32m1_t(), _sum, vfmv_s_f_f32m1(vfloat32m1_t(), sum, vl), vl)); sum = activation_ss(sum, activation_type, activation_params); outptr[j] = (__fp16)sum; } outptr += outw; } } } static void convolution_packnto1_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_fp16, const Mat& bias_data_fp16, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt) { const int packn = csrr_vlenb() / 2; const word_type vl = vsetvl_e16m1(packn); int w = bottom_blob.w; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int maxk = kernel_w * kernel_h; // kernel offsets std::vector<int> _space_ofs(maxk); int* space_ofs = &_space_ofs[0]; { int p1 = 0; int p2 = 0; int gap = w * dilation_h - kernel_w * dilation_w; for (int i = 0; i < kernel_h; i++) { for (int j = 0; j < kernel_w; j++) { space_ofs[p1] = p2; p1++; p2 += dilation_w; } p2 += gap; } } const __fp16* bias_data_ptr = bias_data_fp16; // num_output #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { __fp16* outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { __fp16 sum = 0.f; if (bias_data_ptr) { sum = bias_data_ptr[p]; } vfloat16m1_t _sum = vfmv_v_f_f16m1(0.f, vl); const __fp16* kptr = weight_data_fp16.channel(p); // channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); const __fp16* sptr = m.row<const __fp16>(i * stride_h) + j * stride_w * packn; for (int k = 0; k < maxk; k++) { vfloat16m1_t _val = vle16_v_f16m1(sptr + space_ofs[k] * packn, vl); vfloat16m1_t _w = vle16_v_f16m1(kptr, vl); _sum = vfmacc_vv_f16m1(_sum, _val, _w, vl); kptr += packn; } } sum = vfmv_f_s_f16m1_f16(vfredsum_vs_f16m1_f16m1(vfloat16m1_t(), _sum, vfmv_s_f_f16m1(vfloat16m1_t(), sum, vl), vl)); sum = activation_ss(sum, activation_type, activation_params); outptr[j] = sum; } outptr += outw; } } }
metadirective_ast_print.c
// RUN: %clang_cc1 -verify -fopenmp -triple x86_64-unknown-linux-gnu -x c -std=c99 -ast-print %s -o - | FileCheck %s // RUN: %clang_cc1 -verify -fopenmp-simd -triple x86_64-unknown-linux-gnu -x c -std=c99 -ast-print %s -o - | FileCheck %s // expected-no-diagnostics #ifndef HEADER #define HEADER void bar(void); #define N 10 void foo(void) { #pragma omp metadirective when(device = {kind(cpu)} \ : parallel) default() bar(); #pragma omp metadirective when(implementation = {vendor(score(0) \ : llvm)}, \ device = {kind(cpu)} \ : parallel) default(target teams) bar(); #pragma omp metadirective when(device = {kind(gpu)} \ : target teams) when(implementation = {vendor(llvm)} \ : parallel) default() bar(); #pragma omp metadirective default(target) when(implementation = {vendor(score(5) \ : llvm)}, \ device = {kind(cpu, host)} \ : parallel) bar(); #pragma omp metadirective when(user = {condition(N > 10)} \ : target) when(user = {condition(N == 10)} \ : parallel) bar(); #pragma omp metadirective when(device = {kind(host)} \ : parallel for) for (int i = 0; i < 100; i++) ; #pragma omp metadirective when(implementation = {extension(match_all)} \ : parallel) default(parallel for) for (int i = 0; i < 100; i++) ; #pragma omp metadirective when(implementation = {extension(match_any)} \ : parallel) default(parallel for) for (int i = 0; i < 100; i++) ; #pragma omp metadirective when(implementation = {extension(match_none)} \ : parallel) default(parallel for) for (int i = 0; i < 100; i++) ; } // CHECK: void bar(); // CHECK: void foo() // CHECK-NEXT: #pragma omp parallel // CHECK-NEXT: bar() // CHECK-NEXT: #pragma omp parallel // CHECK-NEXT: bar() // CHECK-NEXT: #pragma omp parallel // CHECK-NEXT: bar() // CHECK-NEXT: #pragma omp parallel // CHECK-NEXT: bar() // CHECK-NEXT: #pragma omp parallel // CHECK-NEXT: bar() // CHECK-NEXT: #pragma omp parallel for // CHECK-NEXT: for (int i = 0; i < 100; i++) // CHECK: #pragma omp parallel // CHECK-NEXT: for (int i = 0; i < 100; i++) // CHECK: #pragma omp parallel for // CHECK-NEXT: for (int i = 0; i < 100; i++) // CHECK: #pragma omp parallel // CHECK-NEXT: for (int i = 0; i < 100; i++) #endif
cf72ae4_ac_icc_so4.c
#define _POSIX_C_SOURCE 200809L #include "stdlib.h" #include "math.h" #include "sys/time.h" #include "xmmintrin.h" #include "pmmintrin.h" #include <stdio.h> #include "omp.h" #define min(a, b) (((a) < (b)) ? (a) : (b)) #define max(a, b) (((a) > (b)) ? (a) : (b)) struct dataobj { void *restrict data; int *size; int *npsize; int *dsize; int *hsize; int *hofs; int *oofs; }; struct profiler { double section0; double section1; }; void bf0(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict u_vec, struct dataobj *restrict vp_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, const int t0, const int t1, const int t2, const int x0_blk0_size, const int x_M, const int x_m, const int y0_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int sp_zi_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, const int time, const int tw); int Forward(struct dataobj *restrict block_sizes_vec, struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict u_vec, struct dataobj *restrict vp_vec, const int sp_zi_m, const int time_M, const int time_m, struct profiler *timers, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads, const int nthreads_nonaffine) { int(*restrict block_sizes) __attribute__((aligned(64))) = (int(*))block_sizes_vec->data; int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data; float(*restrict save_src_u)[save_src_u_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_u_vec->size[1]])save_src_u_vec->data; int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data; int(*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data; int(*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data; float(*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float(*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data; /* Flush denormal numbers to zero in hardware */ _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON); _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON); int xb_size = block_sizes[0]; int y0_blk0_size = block_sizes[3]; int x0_blk0_size = block_sizes[2]; int yb_size = block_sizes[1]; printf(" Tiles: %d, %d ::: Blocks %d, %d \n", xb_size, yb_size, x0_blk0_size, y0_blk0_size); //for (int time = time_m, t0 = (time + 2)%(3), t1 = (time)%(3), t2 = (time + 1)%(3); time <= time_M; time += 1, t0 = (time + 2)%(3), t1 = (time)%(3), t2 = (time + 1)%(3)) //{ int sf = 2; int t_blk_size = 2 * sf * (time_M - time_m); struct timeval start_section0, end_section0; gettimeofday(&start_section0, NULL); for (int t_blk = time_m; t_blk < sf * (time_M - time_m); t_blk += sf * t_blk_size) // for each t block { for (int xb = x_m; xb <= (x_M + sf * (time_M - time_m)); xb += xb_size + 1) { for (int yb = y_m; yb <= (y_M + sf * (time_M - time_m)); yb += yb_size + 1) { for (int time = t_blk, t0 = (time + 2) % (3), t1 = (time) % (3), t2 = (time + 1) % (3); time <= 1 + min(t_blk + t_blk_size - 1, sf * (time_M - time_m)); time += sf, t0 = (((time / sf) % (time_M - time_m + 1)) + 2) % (3), t1 = (((time / sf) % (time_M - time_m + 1))) % (3), t2 = (((time / sf) % (time_M - time_m + 1)) + 1) % (3)) { int tw = ((time / sf) % (time_M - time_m + 1)); bf0(damp_vec, dt, u_vec, vp_vec, nnz_sp_source_mask_vec, sp_source_mask_vec, save_src_u_vec, source_id_vec, source_mask_vec, t0, t1, t2, x0_blk0_size, x_M - (x_M - x_m + 1) % (x0_blk0_size), x_m, y0_blk0_size, y_M - (y_M - y_m + 1) % (y0_blk0_size), y_m, z_M, z_m, sp_zi_m, nthreads, xb, yb, xb_size, yb_size, time, tw); //bf0(damp_vec, dt, u_vec, vp_vec, t0, t1, t2, x0_blk0_size, x_M - (x_M - x_m + 1) % (x0_blk0_size), x_m, (y_M - y_m + 1) % (y0_blk0_size), y_M, y_M - (y_M - y_m + 1) % (y0_blk0_size) + 1, z_M, z_m, nthreads); //bf0(damp_vec, dt, u_vec, vp_vec, t0, t1, t2, (x_M - x_m + 1) % (x0_blk0_size), x_M, x_M - (x_M - x_m + 1) % (x0_blk0_size) + 1, y0_blk0_size, y_M - (y_M - y_m + 1) % (y0_blk0_size), y_m, z_M, z_m, nthreads); //bf0(damp_vec, dt, u_vec, vp_vec, t0, t1, t2, (x_M - x_m + 1) % (x0_blk0_size), x_M, x_M - (x_M - x_m + 1) % (x0_blk0_size) + 1, (y_M - y_m + 1) % (y0_blk0_size), y_M, y_M - (y_M - y_m + 1) % (y0_blk0_size) + 1, z_M, z_m, nthreads); } } } /* End section0 */ gettimeofday(&end_section0, NULL); timers->section0 += (double)(end_section0.tv_sec - start_section0.tv_sec) + (double)(end_section0.tv_usec - start_section0.tv_usec) / 1000000; } for (int time = time_m, t2 = (time + 1) % (3); time <= time_M; time += 1, t2 = (time + 1) % (3)) { struct timeval start_section1, end_section1; gettimeofday(&start_section1, NULL); /* Begin section1 */ /* End section1 */ gettimeofday(&end_section1, NULL); timers->section1 += (double)(end_section1.tv_sec - start_section1.tv_sec) + (double)(end_section1.tv_usec - start_section1.tv_usec) / 1000000; } return 0; } void bf0(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict u_vec, struct dataobj *restrict vp_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, const int t0, const int t1, const int t2, const int x0_blk0_size, const int x_M, const int x_m, const int y0_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int sp_zi_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, const int time, const int tw) { float(*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[damp_vec->size[1]][damp_vec->size[2]])damp_vec->data; float(*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float(*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data; float(*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[vp_vec->size[1]][vp_vec->size[2]])vp_vec->data; int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data; float(*restrict save_src_u)[save_src_u_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_u_vec->size[1]])save_src_u_vec->data; int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data; int(*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data; int(*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data; if (x0_blk0_size == 0 || y0_blk0_size == 0) { return; } #pragma omp parallel num_threads(nthreads) { #pragma omp for collapse(2) schedule(dynamic, 1) for (int x0_blk0 = max((x_m + time), xb); x0_blk0 <= min((x_M + time), (xb + xb_size)); x0_blk0 += x0_blk0_size) { for (int y0_blk0 = max((y_m + time), yb); y0_blk0 <= min((y_M + time), (yb + yb_size)); y0_blk0 += y0_blk0_size) { for (int x = x0_blk0; x <= min(min((x_M + time), (xb + xb_size)), (x0_blk0 + x0_blk0_size - 1)); x++) { for (int y = y0_blk0; y <= min(min((y_M + time), (yb + yb_size)), (y0_blk0 + y0_blk0_size - 1)); y++) { #pragma omp simd aligned(damp, u, vp : 32) for (int z = z_m; z <= z_M; z += 1) { float r8 = 1.0/dt; float r7 = 1.0/(dt*dt); float r6 = 1.0/(vp[x - time + 4][y - time + 4][z + 4]*vp[x - time + 4][y - time + 4][z + 4]); u[t2][x - time + 4][y - time + 4][z + 4] = (r6*(-r7*(u[t0][x - time + 4][y - time + 4][z + 4] - 2.0F*u[t1][x - time + 4][y - time + 4][z + 4])) + r8*(damp[x - time + 1][y - time + 1][z + 1]*u[t1][x - time + 4][y - time + 4][z + 4]) - 3.70370379e-4F*(u[t1][x - time + 2][y - time + 4][z + 4] + u[t1][x - time + 4][y - time + 2][z + 4] + u[t1][x - time + 4][y - time + 4][z + 2] + u[t1][x - time + 4][y - time + 4][z + 6] + u[t1][x - time + 4][y - time + 6][z + 4] + u[t1][x - time + 6][y - time + 4][z + 4]) + 5.92592607e-3F*(u[t1][x - time + 3][y - time + 4][z + 4] + u[t1][x - time + 4][y - time + 3][z + 4] + u[t1][x - time + 4][y - time + 4][z + 3] + u[t1][x - time + 4][y - time + 4][z + 5] + u[t1][x - time + 4][y - time + 5][z + 4] + u[t1][x - time + 5][y - time + 4][z + 4]) - 3.33333341e-2F*u[t1][x - time + 4][y - time + 4][z + 4])/(r6*r7 + r8*damp[x - time + 1][y - time + 1][z + 1]); } #pragma omp simd aligned(damp, u, vp : 32) for (int sp_zi = sp_zi_m; sp_zi <= nnz_sp_source_mask[x - time][y - time] - 1; sp_zi += 1) { int zind = sp_source_mask[x - time][y - time][sp_zi]; float r0 = save_src_u[tw][source_id[x - time][y - time][zind]] * source_mask[x - time][y - time][zind]; u[t2][x - time + 4][y - time + 4][zind + 4] += r0; } } } } } } }
blake2bp.c
/* BLAKE2 reference source code package - optimized C implementations Written in 2012 by Samuel Neves <sneves@dei.uc.pt> To the extent possible under law, the author(s) have dedicated all copyright and related and neighboring rights to this software to the public domain worldwide. This software is distributed without any warranty. You should have received a copy of the CC0 Public Domain Dedication along with this software. If not, see <http://creativecommons.org/publicdomain/zero/1.0/>. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdint.h> #if defined(_OPENMP) #include <omp.h> #endif #include "blake2.h" #include "blake2-impl.h" #define PARALLELISM_DEGREE 4 static inline int blake2bp_init_leaf( blake2b_state *S, uint8_t outlen, uint8_t keylen, uint64_t offset ) { blake2b_param P[1]; P->digest_length = outlen; P->key_length = keylen; P->fanout = PARALLELISM_DEGREE; P->depth = 2; P->leaf_length = 0; P->node_offset = offset; P->node_depth = 0; P->inner_length = BLAKE2B_OUTBYTES; memset( P->reserved, 0, sizeof( P->reserved ) ); memset( P->salt, 0, sizeof( P->salt ) ); memset( P->personal, 0, sizeof( P->personal ) ); return blake2b_init_param( S, P ); } static inline int blake2bp_init_root( blake2b_state *S, uint8_t outlen, uint8_t keylen ) { blake2b_param P[1]; P->digest_length = outlen; P->key_length = keylen; P->fanout = PARALLELISM_DEGREE; P->depth = 2; P->leaf_length = 0; P->node_offset = 0; P->node_depth = 1; P->inner_length = BLAKE2B_OUTBYTES; memset( P->reserved, 0, sizeof( P->reserved ) ); memset( P->salt, 0, sizeof( P->salt ) ); memset( P->personal, 0, sizeof( P->personal ) ); return blake2b_init_param( S, P ); } int blake2bp_init( blake2bp_state *S, const uint8_t outlen ) { if( !outlen || outlen > BLAKE2B_OUTBYTES ) return -1; memset( S->buf, 0, sizeof( S->buf ) ); S->buflen = 0; if( blake2bp_init_root( S->R, outlen, 0 ) < 0 ) return -1; for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) if( blake2bp_init_leaf( S->S[i], outlen, 0, i ) < 0 ) return -1; S->R->last_node = 1; S->S[PARALLELISM_DEGREE - 1]->last_node = 1; return 0; } int blake2bp_init_key( blake2bp_state *S, const uint8_t outlen, const void *key, const uint8_t keylen ) { if( !outlen || outlen > BLAKE2B_OUTBYTES ) return -1; if( !key || !keylen || keylen > BLAKE2B_KEYBYTES ) return -1; memset( S->buf, 0, sizeof( S->buf ) ); S->buflen = 0; if( blake2bp_init_root( S->R, outlen, keylen ) < 0 ) return -1; for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) if( blake2bp_init_leaf( S->S[i], outlen, keylen, i ) < 0 ) return -1; S->R->last_node = 1; S->S[PARALLELISM_DEGREE - 1]->last_node = 1; { uint8_t block[BLAKE2B_BLOCKBYTES]; memset( block, 0, BLAKE2B_BLOCKBYTES ); memcpy( block, key, keylen ); for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2b_update( S->S[i], block, BLAKE2B_BLOCKBYTES ); secure_zero_memory( block, BLAKE2B_BLOCKBYTES ); /* Burn the key from stack */ } return 0; } int blake2bp_update( blake2bp_state *S, const uint8_t *in, uint64_t inlen ) { size_t left = S->buflen; size_t fill = sizeof( S->buf ) - left; if( left && inlen >= fill ) { memcpy( S->buf + left, in, fill ); for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2b_update( S->S[i], S->buf + i * BLAKE2B_BLOCKBYTES, BLAKE2B_BLOCKBYTES ); in += fill; inlen -= fill; left = 0; } #if defined(_OPENMP) #pragma omp parallel shared(S), num_threads(PARALLELISM_DEGREE) #else for( size_t id__ = 0; id__ < PARALLELISM_DEGREE; ++id__ ) #endif { #if defined(_OPENMP) size_t id__ = omp_get_thread_num(); #endif uint64_t inlen__ = inlen; const uint8_t *in__ = ( const uint8_t * )in; in__ += id__ * BLAKE2B_BLOCKBYTES; while( inlen__ >= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES ) { blake2b_update( S->S[id__], in__, BLAKE2B_BLOCKBYTES ); in__ += PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES; inlen__ -= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES; } } in += inlen - inlen % ( PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES ); inlen %= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES; if( inlen > 0 ) memcpy( S->buf + left, in, inlen ); S->buflen = left + inlen; return 0; } int blake2bp_final( blake2bp_state *S, uint8_t *out, const uint8_t outlen ) { uint8_t hash[PARALLELISM_DEGREE][BLAKE2B_OUTBYTES]; for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) { if( S->buflen > i * BLAKE2B_BLOCKBYTES ) { size_t left = S->buflen - i * BLAKE2B_BLOCKBYTES; if( left > BLAKE2B_BLOCKBYTES ) left = BLAKE2B_BLOCKBYTES; blake2b_update( S->S[i], S->buf + i * BLAKE2B_BLOCKBYTES, left ); } blake2b_final( S->S[i], hash[i], BLAKE2B_OUTBYTES ); } for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2b_update( S->R, hash[i], BLAKE2B_OUTBYTES ); return blake2b_final( S->R, out, outlen ); } int blake2bp( uint8_t *out, const void *in, const void *key, uint8_t outlen, uint64_t inlen, uint8_t keylen ) { uint8_t hash[PARALLELISM_DEGREE][BLAKE2B_OUTBYTES]; blake2b_state S[PARALLELISM_DEGREE][1]; blake2b_state FS[1]; /* Verify parameters */ if ( NULL == in && inlen > 0 ) return -1; if ( NULL == out ) return -1; if( NULL == key && keylen > 0 ) return -1; if( !outlen || outlen > BLAKE2B_OUTBYTES ) return -1; if( keylen > BLAKE2B_KEYBYTES ) return -1; for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) if( blake2bp_init_leaf( S[i], outlen, keylen, i ) < 0 ) return -1; S[PARALLELISM_DEGREE - 1]->last_node = 1; // mark last node if( keylen > 0 ) { uint8_t block[BLAKE2B_BLOCKBYTES]; memset( block, 0, BLAKE2B_BLOCKBYTES ); memcpy( block, key, keylen ); for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2b_update( S[i], block, BLAKE2B_BLOCKBYTES ); secure_zero_memory( block, BLAKE2B_BLOCKBYTES ); /* Burn the key from stack */ } #if defined(_OPENMP) #pragma omp parallel shared(S,hash), num_threads(PARALLELISM_DEGREE) #else for( size_t id__ = 0; id__ < PARALLELISM_DEGREE; ++id__ ) #endif { #if defined(_OPENMP) size_t id__ = omp_get_thread_num(); #endif uint64_t inlen__ = inlen; const uint8_t *in__ = ( const uint8_t * )in; in__ += id__ * BLAKE2B_BLOCKBYTES; while( inlen__ >= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES ) { blake2b_update( S[id__], in__, BLAKE2B_BLOCKBYTES ); in__ += PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES; inlen__ -= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES; } if( inlen__ > id__ * BLAKE2B_BLOCKBYTES ) { const size_t left = inlen__ - id__ * BLAKE2B_BLOCKBYTES; const size_t len = left <= BLAKE2B_BLOCKBYTES ? left : BLAKE2B_BLOCKBYTES; blake2b_update( S[id__], in__, len ); } blake2b_final( S[id__], hash[id__], BLAKE2B_OUTBYTES ); } if( blake2bp_init_root( FS, outlen, keylen ) < 0 ) return -1; FS->last_node = 1; // Mark as last node for( size_t i = 0; i < PARALLELISM_DEGREE; ++i ) blake2b_update( FS, hash[i], BLAKE2B_OUTBYTES ); return blake2b_final( FS, out, outlen ); } #if defined(BLAKE2BP_SELFTEST) #include <string.h> #include "blake2-kat.h" int main( int argc, char **argv ) { uint8_t key[BLAKE2B_KEYBYTES]; uint8_t buf[KAT_LENGTH]; for( size_t i = 0; i < BLAKE2B_KEYBYTES; ++i ) key[i] = ( uint8_t )i; for( size_t i = 0; i < KAT_LENGTH; ++i ) buf[i] = ( uint8_t )i; for( size_t i = 0; i < KAT_LENGTH; ++i ) { uint8_t hash[BLAKE2B_OUTBYTES]; //blake2bp( hash, buf, key, BLAKE2B_OUTBYTES, i, BLAKE2B_KEYBYTES ); blake2bp_state S[1]; blake2bp_init_key( S, BLAKE2B_OUTBYTES, key, BLAKE2B_KEYBYTES ); blake2bp_update( S, buf, i ); blake2bp_final( S, hash, BLAKE2B_OUTBYTES ); if( 0 != memcmp( hash, blake2bp_keyed_kat[i], BLAKE2B_OUTBYTES ) ) { puts( "error" ); return -1; } } puts( "ok" ); return 0; } #endif
iwork_fmt_plug.c
/* JtR format to crack iWork '09, and '13 / '14 files. * * This software is Copyright (c) 2015, Dhiru Kholia <kholia at kth.se> and * Maxime Hulliger <hulliger at kth.se>, and it is hereby released to the * general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * This code may be freely used and modified for any purpose. * * Big thanks to Sean Patrick O'Brien for making this format possible. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_iwork; #elif FMT_REGISTERS_H john_register_one(&fmt_iwork); #else #include <string.h> #include <assert.h> #include <errno.h> #include <openssl/des.h> #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 64 #endif #endif #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "johnswap.h" #include "pbkdf2_hmac_sha1.h" #include "jumbo.h" #include "memdbg.h" #include "iwork_common.h" #define FORMAT_LABEL "iwork" #define FORMAT_NAME "Apple iWork '09 / '13 / '14" #ifdef SIMD_COEF_32 #define ALGORITHM_NAME "PBKDF2-SHA1 AES " SHA1_ALGORITHM_NAME #else #define ALGORITHM_NAME "PBKDF2-SHA1 AES 32/" ARCH_BITS_STR #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define BINARY_SIZE 0 #define PLAINTEXT_LENGTH 125 #define SALT_SIZE sizeof(*fctx) #define BINARY_ALIGN 1 #define SALT_ALIGN sizeof(int) #ifdef SIMD_COEF_32 #define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1 #define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1 #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif static struct fmt_tests iwork_tests[] = { {"$iwork$1$2$1$100000$d77ce46a68697e08b76ac91de9117541$e7b72b2848dc27efed883963b00b1ac7$e794144cd2f04bd50e23957b30affb2898554a99a3accb7506c17132654e09c04bbeff45dc4f8a8a1db5fd1592f699eeff2f9a8c31b503e9631a25a344b517f7" ,"12345678"}, {FORMAT_TAG "1$2$1$100000$c773f06bcd580e4afa35618a7d0bee39$8b241504af92416f226d0eea4bf26443$18358e736a0401061f2dca103fceb29e88606d3ec80d09841360cbb8b9dc1d2908c270d3ff4c05cf7a46591e02ff3c9d75f4582f631721a3257dc087f98f523e", "password"}, // iWork '09 Keynote file {"$iwork$2$1$1$4000$736f6d6553616c74$a9d975f8b3e1bf0c388944b457127df4$09eb5d093584376001d4c94e9d0a41eb8a2993132849c5aed8e56e7bd0e8ed50ba38aced793e3480675990c828c01d25fe245cc6aa603c6cb1a0425988f1d3dc", "openwall"}, {NULL} }; #if defined (_OPENMP) static int omp_t = 1; #endif static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static int *cracked, cracked_count; static struct format_context *fctx; static void init(struct fmt_main *self) { #if defined (_OPENMP) omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt); cracked = mem_calloc(sizeof(*cracked), self->params.max_keys_per_crypt); cracked_count = self->params.max_keys_per_crypt; } static void done(void) { MEM_FREE(cracked); MEM_FREE(saved_key); } static void set_salt(void *salt) { fctx = (struct format_context *)salt; } static void iwork_set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char *get_key(int index) { return saved_key[index]; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; memset(cracked, 0, sizeof(cracked[0])*cracked_count); #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) #endif { unsigned char master[MAX_KEYS_PER_CRYPT][16]; int i; #ifdef SIMD_COEF_32 int lens[MAX_KEYS_PER_CRYPT]; unsigned char *pin[MAX_KEYS_PER_CRYPT], *pout[MAX_KEYS_PER_CRYPT]; for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { lens[i] = strlen(saved_key[index+i]); pin[i] = (unsigned char*)saved_key[index+i]; pout[i] = master[i]; } pbkdf2_sha1_sse((const unsigned char**)pin, lens, fctx->salt, fctx->salt_length, fctx->iterations, pout, 16, 0); #else pbkdf2_sha1((unsigned char *)saved_key[index], strlen(saved_key[index]), fctx->salt, fctx->salt_length, fctx->iterations, master[0], 16, 0); #endif for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { cracked[index+i] = iwork_common_decrypt(fctx, master[i], fctx->iv, fctx->blob); } } return count; } static int cmp_all(void *binary, int count) { int index; for (index = 0; index < count; index++) if (cracked[index]) return 1; return 0; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return 1; } struct fmt_main fmt_iwork = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { "iteration count", }, { FORMAT_TAG }, iwork_tests }, { init, done, fmt_default_reset, fmt_default_prepare, iwork_common_valid, fmt_default_split, fmt_default_binary, iwork_common_get_salt, { iwork_common_iteration_count, }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, iwork_set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
util.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #include <math.h> #include <gsl/gsl_matrix.h> #include <gsl/gsl_blas.h> #include <gsl/gsl_rng.h> #include <gsl/gsl_randist.h> void read_matrix(int** index, int** matrix, double scaling, int N_kw, char* input_fileName) { FILE *fp = fopen(input_fileName, "r"); fscanf(fp, "%*[^\n]\n"); for (int ii = 0; ii < N_kw; ii++) fscanf(fp, "%d,", &((*index)[ii])); fscanf(fp, "%*[^\n]\n"); int tmp; for (int ii = 0; ii < N_kw; ii++) { for (int jj = 0; jj < N_kw; jj++) { fscanf(fp, "%d,", &tmp); (*matrix)[ii*N_kw + jj] = (int) (tmp * scaling); } fscanf(fp, "%*[^\n]\n"); } fclose(fp); } void pad_matrix(int** matrix_padded, int** matrix, double lambda, int N_kw, int N_doc) { // Initialising RNG const gsl_rng_type * T; gsl_rng *r1, *r2; gsl_rng_env_setup(); r1 = gsl_rng_alloc(gsl_rng_default); r2 = gsl_rng_alloc (gsl_rng_taus); // perform padding int ii, jj; int max_resp_len = 0; for (ii = 0; ii < N_kw; ii++) if ((*matrix)[ii*N_kw + ii] > max_resp_len) max_resp_len = (*matrix)[ii*N_kw + ii]; #pragma omp parallel for private(ii) for (ii = 0; ii < N_kw; ii++) (*matrix_padded)[ii*N_kw + ii] = (int) (lambda * (max_resp_len)) + gsl_rng_uniform_int(r2, max_resp_len+1); #pragma omp parallel for private(ii, jj) for (ii = 0; ii < N_kw; ii++) { for (jj = 0; jj < N_kw; jj++) { if (ii > jj) { int n1, n2, n3; n3 = 0; if ((*matrix_padded)[ii*N_kw + ii] > (*matrix)[ii*N_kw + ii]) { n1 = (*matrix)[ii*N_kw + jj]; n3 += gsl_ran_hypergeometric(r1, (*matrix_padded)[ii*N_kw + ii] - (*matrix)[ii*N_kw + ii], N_doc, (*matrix_padded)[jj*N_kw + jj]); } else n1 = gsl_ran_hypergeometric(r1, (*matrix)[ii*N_kw + jj], (*matrix)[ii*N_kw + ii] - (*matrix)[ii*N_kw + jj], (*matrix_padded)[ii*N_kw + ii] - (*matrix)[ii*N_kw + jj]); if ((*matrix_padded)[jj*N_kw + jj] > (*matrix)[jj*N_kw + jj]) { n2 = n1; n3 += gsl_ran_hypergeometric(r1, (*matrix_padded)[jj*N_kw + jj] - (*matrix)[jj*N_kw + jj], N_doc, (*matrix_padded)[ii*N_kw + ii] - (*matrix)[ii*N_kw + jj]); } else n2 = gsl_ran_hypergeometric(r1, n1, (*matrix)[jj*N_kw + jj] - n1, (*matrix_padded)[jj*N_kw + jj]); (*matrix_padded)[ii*N_kw + jj] = n2 + n3; (*matrix_padded)[jj*N_kw + ii] = n2 + n3; } } } gsl_rng_free(r1); gsl_rng_free(r2); } void observe_matrix(gsl_matrix* matrix_obs, int** matrix_padded, int N_kw) { // perform observed count generation for (int ii = 0; ii < N_kw; ii++) for (int jj = 0; jj < N_kw; jj++) gsl_matrix_set(matrix_obs, ii, jj, (double) ((*matrix_padded)[ii*N_kw + jj])); } void permutation_generation(int* idx1, int* idx2, int** permutation_tmp, int** permutation, int** permutation_inv, int N_kw, int N_obs) { int count = 0; *idx1 = rand() % N_obs; *idx2 = -1; int idx_old = (*permutation)[*idx1]; int idx_new = rand() % N_kw; (*permutation_tmp)[*idx1] = idx_new; if ((*permutation_inv)[idx_new] >= 0) { *idx2 = (*permutation_inv)[idx_new]; (*permutation_tmp)[*idx2] = idx_old; } }
convolution_pack4.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convolution_pack4_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_packed, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt) { int w = bottom_blob.w; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int maxk = kernel_w * kernel_h; // kernel offsets std::vector<int> _space_ofs(maxk); int* space_ofs = &_space_ofs[0]; { int p1 = 0; int p2 = 0; int gap = w * dilation_h - kernel_w * dilation_w; for (int i = 0; i < kernel_h; i++) { for (int j = 0; j < kernel_w; j++) { space_ofs[p1] = p2; p1++; p2 += dilation_w; } p2 += gap; } } const float* bias_data_ptr = bias_data; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { float* outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { __m128 _sum = _mm_setzero_ps(); if (bias_data_ptr) { _sum = _mm_loadu_ps(bias_data_ptr + p * 4); } const float* kptr = weight_data_packed.channel(p); // channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); const float* sptr = m.row(i * stride_h) + j * stride_w * 4; for (int k = 0; k < maxk; k++) { const float* slptr = sptr + space_ofs[k] * 4; __m128 _val0 = _mm_load1_ps(slptr); __m128 _val1 = _mm_load1_ps(slptr + 1); __m128 _val2 = _mm_load1_ps(slptr + 2); __m128 _val3 = _mm_load1_ps(slptr + 3); __m128 _w0 = _mm_load_ps(kptr); __m128 _w1 = _mm_load_ps(kptr + 4); __m128 _w2 = _mm_load_ps(kptr + 8); __m128 _w3 = _mm_load_ps(kptr + 12); _sum = _mm_comp_fmadd_ps(_val0, _w0, _sum); _sum = _mm_comp_fmadd_ps(_val1, _w1, _sum); _sum = _mm_comp_fmadd_ps(_val2, _w2, _sum); _sum = _mm_comp_fmadd_ps(_val3, _w3, _sum); kptr += 16; } } _sum = activation_sse(_sum, activation_type, activation_params); _mm_storeu_ps(outptr + j * 4, _sum); } outptr += outw * 4; } } }
3d25pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 24; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=2*Nt-2;t1++) { lbp=ceild(t1+2,2); ubp=min(floord(4*Nt+Nz-9,4),floord(2*t1+Nz-4,4)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(ceild(t1-8,12),ceild(4*t2-Nz-11,24));t3<=min(min(floord(4*Nt+Ny-9,24),floord(2*t1+Ny-3,24)),floord(4*t2+Ny-9,24));t3++) { for (t4=max(max(ceild(t1-28,32),ceild(4*t2-Nz-51,64)),ceild(24*t3-Ny-51,64));t4<=min(min(min(floord(4*Nt+Nx-9,64),floord(2*t1+Nx-3,64)),floord(4*t2+Nx-9,64)),floord(24*t3+Nx+11,64));t4++) { for (t5=max(max(max(ceild(t1,2),ceild(4*t2-Nz+5,4)),ceild(24*t3-Ny+5,4)),ceild(64*t4-Nx+5,4));t5<=floord(t1+1,2);t5++) { for (t6=max(4*t2,-4*t1+4*t2+8*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=max(24*t3,4*t5+4);t7<=min(24*t3+23,4*t5+Ny-5);t7++) { lbv=max(64*t4,4*t5+4); ubv=min(64*t4+63,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
BSConstantSumWots.h
#pragma once #include "wots/ConstantSumWots.h" #include <iostream> template <class D, int W, int T, int S> class BSConstantSumWots : public virtual ConstantSumWots<D, W, T, S> { public: BSConstantSumWots() noexcept {}; BSConstantSumWots(const ByteArray& seed) noexcept : ConstantSumWots<D,W,T,S>(seed) {}; mpz_class prank(int blocks, int max, int sum, int j) { return this->rank(blocks, max, sum, j); }; mpz_class pconst(int blocks, int max, int sum) { return this->constantSumLen(blocks, max, sum); }; virtual bool check_encoding(ByteArray& data, std::vector<unsigned int>& blocks) { ByteArray aux = this->digest(data); mpz_class I; I.set_str(std::to_string(aux), 16); int s = S; mpz_class keep; mpz_class keep2; for(int r = 1; r <=T; r++) { keep = rank(T-r, W, s, blocks[r-1]-1); if( I < keep ) { return false; } if( I > rank(T-r, W, s, blocks[r-1]) ) { return false; } s -=blocks[r-1]; I -= keep; } return true; }; virtual bool fast_verify(ByteArray& data, std::vector<ByteArray>& signature, std::vector<unsigned int>& blocks) { if(not this->pubKeyIsLoaded()) return false; if(! check_encoding(data, blocks) ) return false; ByteArray check; //#pragma omp parallel for for(long unsigned int i = 0; i < blocks.size(); i++) { check += this->digestChain(signature[i], blocks[i]); } check = this->digest(check); //TODO( We can improve this using xor and vactor iterator) if( std::to_string(this->public_key).compare(std::to_string(check)) == 0 ) return true; return false; }; protected: virtual mpz_class rank(int blocks, int max, int sum, int j) { mpz_class ret = 0; int aux = floor(float(sum)/float(max+1)); int kmax = std::min(blocks, aux); for(int k = 0; k <= kmax; k++ ) { ret += std::pow(-1,k) * this->binomial(blocks, k) * ( this->binomial(sum - (max+1)*k + blocks, blocks) - this->binomial(sum - (max+1)*k + blocks -1 -j, blocks) ); } return ret; }; /* * std::upper_bound (C++20) implementation for the binary search * https://en.cppreference.com/w/cpp/algorithm/lower_bound */ virtual std::vector<unsigned int> toConstantSum(mpz_class& I, int blocks, int max, int sum) { //Surpress (void)blocks; (void)max; (void)sum; unsigned int count, k, it, step; std::vector<unsigned int> ret; unsigned int s = S; for (unsigned int b = 1; b <= T; b++ ) { count = ( W < s)?W:s; k = 0; mpz_class keep; while ( count > 0 ) { it = k; step = count/2; it += step; keep = this->rank(T-b, W, s, it); if ( keep < I ) { k = ++it; count -= step + 1; } else { count = step; } } ret.push_back(k); if(k>0) { I -= this->rank(T-b, W, s, k-1); s -= k; } } return ret; } };
graph.c
/****************************************************************************** * INCLUDES *****************************************************************************/ #include "base.h" #include "graph.h" #include "csf.h" #include "sort.h" #include "util.h" #ifdef SPLATT_USE_PATOH #include <patoh.h> #endif #ifdef SPLATT_USE_ASHADO #include <ashado.h> #endif #ifdef SPLATT_USE_METIS /* don't let metis types conflict with splatt */ #define idx_t metis_idx_t #include <metis.h> #undef idx_t #endif /* use multi-constraint balancing for m-partite graphs */ #ifndef SPLATT_USE_VTX_WGTS #define SPLATT_USE_VTX_WGTS 0 #endif /****************************************************************************** * TYPES *****************************************************************************/ /** * @brief Represents a set with a known (and reasonable) maximum value. */ typedef struct { wgt_t * counts; /** The number of times an element was updated. */ vtx_t * seen; /** The (unsorted) list of elements that have been seen. */ vtx_t nseen; /** The length of seen[]. */ } adj_set; /****************************************************************************** * PRIVATE FUNCTIONS *****************************************************************************/ /** * @brief Allocate/initialize a set. * * @param set The set to allocate. * @param max_size The maximum element in the set. 2x this memory is allocated. */ static void p_set_init( adj_set * set, vtx_t max_size) { set->counts = calloc(max_size, sizeof(*(set->counts))); set->seen = calloc(max_size, sizeof(*(set->seen))); set->nseen = 0; } /** * @brief Free all memory allocated for a set. * * @param set The set to free. */ static void p_set_free( adj_set * set) { set->nseen = 0; free(set->counts); free(set->seen); } /** * @brief Remove (but do not free) all elements from a set. This runs in * O(nseen) time. * * @param set the set to clear. */ static void p_set_clear( adj_set * set) { wgt_t * const counts = set->counts; vtx_t * const seen = set->seen; for(vtx_t i=0; i < set->nseen; ++i) { counts[seen[i]] = 0; seen[i] = 0; } set->nseen = 0; } /** * @brief Add a new element to the set or update its count. * * @param set The set to modify. * @param vid The id of the element. * @param upd How much to modify counts[] by. */ static void p_set_update( adj_set * set, vtx_t vid, wgt_t upd) { /* add to set if necessary */ if(set->counts[vid] == 0) { set->seen[set->nseen] = vid; set->nseen += 1; } /* update count */ set->counts[vid] += upd; } /** * @brief Count the number of edges (i.e., the size of adjacency list) of a * sparse tensor converted to m-partite graph. * * @param csf The tensor to convert. * * @return The number of edges. */ static adj_t p_count_adj_size( splatt_csf * const csf) { adj_t ncon = 0; assert(csf->ntiles == 1); csf_sparsity * pt = csf->pt; vtx_t const nvtxs = pt->nfibs[0]; /* type better be big enough */ assert((idx_t) nvtxs == (vtx_t) nvtxs); adj_set set; p_set_init(&set, csf->dims[argmax_elem(csf->dims, csf->nmodes)]); idx_t parent_start = 0; idx_t parent_end = 0; for(vtx_t v=0; v < nvtxs; ++v) { parent_start = v; parent_end = v+1; for(idx_t d=1; d < csf->nmodes; ++d) { idx_t const start = pt->fptr[d-1][parent_start]; idx_t const end = pt->fptr[d-1][parent_end]; idx_t const * const fids = pt->fids[d]; for(idx_t f=start; f < end; ++f) { p_set_update(&set, fids[f], 1); } ncon += set.nseen; /* prepare for next level in the tree */ parent_start = start; parent_end = end; p_set_clear(&set); } } p_set_free(&set); return ncon; } /** * @brief Compute the offset of a certain CSF tree depth (when all indices are * mapped to vertices). This accounts for CSF mode permutation. * * For example, with no permutation and depth=2, this returns * csf->dims[0] + csf->dims[1]. * * @param csf The tensor to use for calculation. * @param depth The depth to work on. * * @return The offset. */ static idx_t p_calc_offset( splatt_csf const * const csf, idx_t const depth) { idx_t const mode = csf_depth_to_mode(csf, depth); idx_t offset = 0; for(idx_t m=0; m < mode; ++m) { offset += csf->dims[m]; } return offset; } /** * @brief Count the nonzeros below a given node in a CSF tensor. * * @param fptr The adjacency pointer of the CSF tensor. * @param nmodes The number of modes in the tensor. * @param depth The depth of the node * @param fiber The id of the node. * * @return The nonzeros below fptr[depth][fiber]. */ static wgt_t p_count_nnz( idx_t * * fptr, idx_t const nmodes, idx_t depth, idx_t const fiber) { if(depth == nmodes-1) { return 1; } idx_t left = fptr[depth][fiber]; idx_t right = fptr[depth][fiber+1]; ++depth; for(; depth < nmodes-1; ++depth) { left = fptr[depth][left]; right = fptr[depth][right]; } return right - left; } /** * @brief Fill the contents of a splatt_graph. The graph must already be * allocated! * * @param csf The tensor to convert. * @param graph The graph to fill, ALREADY ALLOCATED! */ static void p_fill_ijk_graph( splatt_csf const * const csf, splatt_graph * graph) { csf_sparsity * pt = csf->pt; vtx_t const nvtxs = graph->nvtxs; adj_set set; p_set_init(&set, csf->dims[argmax_elem(csf->dims, csf->nmodes)]); /* pointing into eind */ adj_t ncon = 0; /* start/end of my subtree */ idx_t parent_start; idx_t parent_end; for(vtx_t v=0; v < nvtxs; ++v) { parent_start = v; parent_end = v+1; graph->eptr[v] = ncon; for(idx_t d=1; d < csf->nmodes; ++d) { idx_t const start = pt->fptr[d-1][parent_start]; idx_t const end = pt->fptr[d-1][parent_end]; /* compute adjacency info */ idx_t const * const fids = pt->fids[d]; for(idx_t f=start; f < end; ++f) { p_set_update(&set, fids[f], p_count_nnz(pt->fptr, csf->nmodes, d, f)); } /* things break if vtx size isn't our sorting size... */ if(sizeof(*(set.seen)) == sizeof(splatt_idx_t)) { quicksort((idx_t *) set.seen, set.nseen); } /* fill in graph->eind */ idx_t const id_offset = p_calc_offset(csf, d); for(vtx_t e=0; e < set.nseen; ++e) { graph->eind[ncon] = set.seen[e] + id_offset; if(graph->ewgts != NULL) { graph->ewgts[ncon] = set.counts[set.seen[e]]; } ++ncon; } /* prepare for next level in the tree */ parent_start = start; parent_end = end; p_set_clear(&set); } } p_set_free(&set); graph->eptr[nvtxs] = graph->nedges; } /** * @brief Fill the multi-constraint vertex weights with the #nnz that appear in * each index. * * @param graph The graph to fill. * @param tt The tensor we are converting. */ static void p_fill_graph_vwgts( splatt_graph * const graph, sptensor_t const * const tt) { idx_t const nnz = tt->nnz; assert(graph->nvwgts == tt->nmodes); wgt_t * const vwgts = graph->vwgts; memset(vwgts, 0, graph->nvtxs * graph->nvwgts * sizeof(*vwgts)); idx_t offset = 0; for(idx_t m=0; m < tt->nmodes; ++m) { idx_t const * const inds = tt->ind[m]; /* each nnz appearance is 1 weight */ for(idx_t x=0; x < nnz; ++x) { idx_t const v = inds[x] + offset; vwgts[m + (v * graph->nvwgts)] += 1; } offset += tt->dims[m]; } } /** * @brief Takes a list of graphs and returns them stacked on top of each other. * No adjacency lists are altered, only vertices added. * * @param graphs The graphs to merge. * @param ngraphs The number of graphs. * * @return All graphs stacked. */ static splatt_graph * p_merge_graphs( splatt_graph * * graphs, idx_t const ngraphs) { /* count total size */ vtx_t nvtxs = 0; adj_t ncon = 0; for(idx_t m=0; m < ngraphs; ++m) { nvtxs += graphs[m]->nvtxs; ncon += graphs[m]->nedges; } splatt_graph * ret = graph_alloc(nvtxs, ncon, graphs[0]->nvwgts, graphs[0]->ewgts != NULL); /* fill in ret */ vtx_t voffset = 0; adj_t eoffset = 0; for(idx_t m=0; m < ngraphs; ++m) { for(vtx_t v=0; v < graphs[m]->nvtxs; ++v) { vtx_t const * const eptr = graphs[m]->eptr; adj_t const * const eind = graphs[m]->eind; wgt_t const * const ewgts = graphs[m]->ewgts; ret->eptr[v + voffset] = eptr[v] + eoffset; for(adj_t e=eptr[v]; e < eptr[v+1]; ++e) { ret->eind[e + eoffset] = eind[e]; if(ret->ewgts != NULL) { ret->ewgts[e + eoffset] = ewgts[e]; } } } voffset += graphs[m]->nvtxs; eoffset += graphs[m]->nedges; } return ret; } /** * @brief Fill the vertex weights array. * * @param ft The CSF tensor to derive vertex weights from. * @param hg The hypegraph structure to modify. * @param which Vertex weight model to follow, see graph.h. */ static void p_fill_vwts( ftensor_t const * const ft, hgraph_t * const hg, hgraph_vwt_type const which) { switch(which) { case VTX_WT_NONE: hg->vwts = NULL; break; /* weight based on nnz in fiber */ case VTX_WT_FIB_NNZ: hg->vwts = (idx_t *) splatt_malloc(hg->nvtxs * sizeof(idx_t)); #pragma omp parallel for for(idx_t v=0; v < hg->nvtxs; ++v) { hg->vwts[v] = ft->fptr[v+1] - ft->fptr[v]; } } } /** * @brief Maps an index in a mode of a permuted CSF tensor to a global vertex * index. This accounts for the mode permutation using the CSF dim-perm. * * @param id The index we are converting (local to the mode). * @param mode The mode the index lies in (LOCAL TO THE CSF TENSOR). * EXAMPLE: a 3 mode tensor would use mode-0 to represent slices, * mode-1 to represent fids, and mode-2 to represent the fiber nnz * @param ft The CSF tensor with dim_perm. * * @return 'id', converted to global vertex indices. EXAMPLE: k -> (I+J+k). */ static idx_t p_map_idx( idx_t id, idx_t const mode, ftensor_t const * const ft) { idx_t m = 0; while(m != ft->dim_perm[mode]) { id += ft->dims[m++]; } return id; } /****************************************************************************** * PUBLIC FUNCTIONS *****************************************************************************/ hgraph_t * hgraph_nnz_alloc( sptensor_t const * const tt) { hgraph_t * hg = (hgraph_t *) splatt_malloc(sizeof(hgraph_t)); hg->nvtxs = tt->nnz; p_fill_vwts(NULL, hg, VTX_WT_NONE); /* # hyper-edges = I + J + K + ... */ hg->hewts = NULL; hg->nhedges = 0; for(idx_t m=0; m < tt->nmodes; ++m) { hg->nhedges += tt->dims[m]; } /* fill in eptr shifted by 1 index. */ hg->eptr = (idx_t *) calloc(hg->nhedges+1, sizeof(idx_t)); idx_t * const restrict eptr = hg->eptr; idx_t offset = 1; for(idx_t m=0; m < tt->nmodes; ++m) { idx_t const * const restrict ind = tt->ind[m]; for(idx_t n=0; n < tt->nnz; ++n) { eptr[offset+ind[n]] += 1; } offset += tt->dims[m]; } /* do a shifted prefix sum to get eptr */ idx_t saved = eptr[1]; eptr[1] = 0; for(idx_t i=2; i <= hg->nhedges; ++i) { idx_t tmp = eptr[i]; eptr[i] = eptr[i-1] + saved; saved = tmp; } /* each nnz causes 'nmodes' connections */ hg->eind = (idx_t *) splatt_malloc(tt->nnz * tt->nmodes * sizeof(idx_t)); idx_t * const restrict eind = hg->eind; offset = 1; for(idx_t m=0; m < tt->nmodes; ++m) { idx_t const * const restrict ind = tt->ind[m]; for(idx_t n=0; n < tt->nnz; ++n) { eind[eptr[offset+ind[n]]++] = n; } offset += tt->dims[m]; } assert(eptr[hg->nhedges] == tt->nnz * tt->nmodes); return hg; } hgraph_t * hgraph_fib_alloc( ftensor_t const * const ft, idx_t const mode) { hgraph_t * hg = (hgraph_t *) splatt_malloc(sizeof(hgraph_t)); /* vertex weights are nnz per fiber */ hg->nvtxs = ft->nfibs; p_fill_vwts(ft, hg, VTX_WT_FIB_NNZ); /* # hyper-edges = I + J + K + ... */ hg->hewts = NULL; hg->nhedges = 0; for(idx_t m=0; m < ft->nmodes; ++m) { hg->nhedges += ft->dims[m]; } /* fill in eptr shifted by 1 idx: * a) each nnz induces a hyperedge connection * b) each non-fiber mode accounts for a hyperedge connection */ hg->eptr = (idx_t *) calloc(hg->nhedges+1, sizeof(idx_t)); idx_t * const restrict eptr = hg->eptr; for(idx_t s=0; s < ft->nslcs; ++s) { /* the slice hyperedge has nfibers more connections */ eptr[1+p_map_idx(s, 0, ft)] += ft->sptr[s+1] - ft->sptr[s]; for(idx_t f=ft->sptr[s]; f < ft->sptr[s+1]; ++f) { /* fiber makes another connection with fid */ eptr[1+p_map_idx(ft->fids[f], 1, ft)] += 1; /* each nnz now has a contribution too */ for(idx_t jj=ft->fptr[f]; jj < ft->fptr[f+1]; ++jj) { eptr[1+p_map_idx(ft->inds[jj], 2, ft)] += 1; } } } /* do a shifted prefix sum to get eptr */ idx_t ncon = eptr[1]; idx_t saved = eptr[1]; eptr[1] = 0; for(idx_t i=2; i <= hg->nhedges; ++i) { ncon += eptr[i]; idx_t tmp = eptr[i]; eptr[i] = eptr[i-1] + saved; saved = tmp; } hg->eind = (idx_t *) splatt_malloc(ncon * sizeof(idx_t)); idx_t * const restrict eind = hg->eind; /* now fill in eind while using eptr as a marker */ for(idx_t s=0; s < ft->nslcs; ++s) { idx_t const sid = p_map_idx(s, 0, ft); for(idx_t f = ft->sptr[s]; f < ft->sptr[s+1]; ++f) { idx_t const fid = p_map_idx(ft->fids[f], 1, ft); eind[eptr[1+sid]++] = f; eind[eptr[1+fid]++] = f; for(idx_t jj=ft->fptr[f]; jj < ft->fptr[f+1]; ++jj) { idx_t const nid = p_map_idx(ft->inds[jj], 2, ft); eind[eptr[1+nid]++] = f; } } } return hg; } idx_t * hgraph_uncut( hgraph_t const * const hg, idx_t const * const parts, idx_t * const ret_nnotcut) { idx_t const nhedges = (idx_t) hg->nhedges; idx_t const nvtxs = (idx_t)hg->nvtxs; idx_t const * const eptr = hg->eptr; idx_t const * const eind = hg->eind; idx_t ncut = 0; for(idx_t h=0; h < nhedges; ++h) { int iscut = 0; idx_t const firstpart = parts[eind[eptr[h]]]; for(idx_t e=eptr[h]+1; e < eptr[h+1]; ++e) { idx_t const vtx = eind[e]; if(parts[vtx] != firstpart) { iscut = 1; break; } } if(iscut == 0) { ++ncut; } } *ret_nnotcut = ncut; /* go back and fill in uncut edges */ idx_t * cut = (idx_t *) splatt_malloc(ncut * sizeof(idx_t)); idx_t ptr = 0; for(idx_t h=0; h < nhedges; ++h) { int iscut = 0; idx_t const firstpart = parts[eind[eptr[h]]]; for(idx_t e=eptr[h]+1; e < eptr[h+1]; ++e) { idx_t const vtx = eind[e]; if(parts[vtx] != firstpart) { iscut = 1; break; } } if(iscut == 0) { cut[ptr++] = h; } } return cut; } void hgraph_free( hgraph_t * hg) { free(hg->eptr); free(hg->eind); free(hg->vwts); free(hg->hewts); free(hg); } splatt_graph * graph_convert( sptensor_t * const tt) { double * opts = splatt_default_opts(); opts[SPLATT_OPTION_TILE] = SPLATT_NOTILE; splatt_graph * graphs[MAX_NMODES]; splatt_csf csf; for(idx_t m=0; m < tt->nmodes; ++m) { csf_alloc_mode(tt, CSF_INORDER_MINUSONE, m, &csf, opts); /* count size of adjacency list */ adj_t const ncon = p_count_adj_size(&csf); #if SPLATT_USE_VTX_WGTS == 0 graphs[m] = graph_alloc(tt->dims[m], ncon, 0, 1); #else graphs[m] = graph_alloc(tt->dims[m], ncon, tt->nmodes, 1); #endif p_fill_ijk_graph(&csf, graphs[m]); csf_free_mode(&csf); } /* merge graphs and write */ splatt_graph * full_graph = p_merge_graphs(graphs, tt->nmodes); /* cleanup */ splatt_free_opts(opts); for(idx_t m=0; m < tt->nmodes; ++m) { graph_free(graphs[m]); } /* handle vertex weights */ if(full_graph->nvwgts > 0) { p_fill_graph_vwgts(full_graph, tt); } return full_graph; } splatt_graph * graph_alloc( vtx_t nvtxs, adj_t nedges, int num_vtx_wgts, int use_edge_wgts) { splatt_graph * ret = splatt_malloc(sizeof(*ret)); ret->nvtxs = nvtxs; ret->nedges = nedges; ret->eptr = splatt_malloc((nvtxs+1) * sizeof(*(ret->eptr))); ret->eind = splatt_malloc(nedges * sizeof(*(ret->eind))); ret->eptr[nvtxs] = nedges; ret->nvwgts = num_vtx_wgts; if(num_vtx_wgts) { ret->vwgts = splatt_malloc(nvtxs * ret->nvwgts * sizeof(*(ret->vwgts))); } else { ret->vwgts = NULL; } if(use_edge_wgts) { ret->ewgts = splatt_malloc(nedges * sizeof(*(ret->ewgts))); } else { ret->ewgts = NULL; } return ret; } void graph_free( splatt_graph * graph) { free(graph->eptr); free(graph->eind); free(graph->vwgts); free(graph->ewgts); free(graph); } #ifdef SPLATT_USE_PATOH idx_t * patoh_part( hgraph_t const * const hg, idx_t const nparts) { PaToH_Parameters args; PaToH_Initialize_Parameters(&args, PATOH_CUTPART, PATOH_SUGPARAM_SPEED); int const nvtxs = hg->nvtxs; int const nnets = hg->nhedges; int const ncon = 1; /* vertex weights */ int * vwts = (int *) splatt_malloc(nvtxs * sizeof(int)); if(hg->vwts != NULL) { for(int v=0; v < nvtxs; ++v) { vwts[v] = (int) hg->vwts[v]; } } else { for(int v=0; v < nvtxs; ++v) { vwts[v] = 1; } } /* edge weights */ int * hwts = NULL; if(hg->hewts != NULL) { hwts = (int *) splatt_malloc(nnets * sizeof(int)); for(int h=0; h < nnets; ++h) { hwts[h] = (int) hg->hewts[h]; } } /* net start/end */ int * eptr = (int *) splatt_malloc((nnets+1) * sizeof(int)); for(int v=0; v <= nnets; ++v) { eptr[v] = (int) hg->eptr[v]; } /* netted vertices */ int * eind = (int *) splatt_malloc(eptr[nnets] * sizeof(int)); for(int v=0; v < eptr[nnets]; ++v) { eind[v] = (int) hg->eind[v]; } int * pvec = (int *) splatt_malloc(nvtxs * sizeof(int)); int * pwts = (int *) splatt_malloc(nparts * sizeof(int)); int cut; args._k = (int) nparts; PaToH_Alloc(&args, nvtxs, nnets, ncon, vwts, hwts, eptr, eind); /* do the partitioning! */ PaToH_Part(&args, nvtxs, nnets, ncon, 0, vwts, hwts, eptr, eind, NULL, pvec, pwts, &cut); /* copy patoh output to idx_t */ idx_t * parts = (idx_t *) splatt_malloc(nvtxs * sizeof(idx_t)); for(idx_t p=0; p < hg->nvtxs; ++p) { parts[p] = (idx_t) pvec[p]; } PaToH_Free(); free(vwts); free(hwts); free(eptr); free(eind); free(pvec); free(pwts); return parts; } #endif #ifdef SPLATT_USE_ASHADO idx_t * ashado_part( hgraph_t const * const hg, idx_t const nparts) { double * opts = ashado_default_opts(); idx_t * part = (idx_t *) splatt_malloc(hg->nvtxs * sizeof(idx_t)); ashado_partition(nparts, hg->nvtxs, hg->nhedges, hg->eptr, hg->eind, hg->vwts, hg->hewts, opts, 5, part); free(opts); return part; } #endif #ifdef SPLATT_USE_METIS splatt_idx_t * metis_part( splatt_graph * graph, splatt_idx_t const num_partitions, splatt_idx_t * edgecut) { metis_idx_t nvtxs = graph->nvtxs; metis_idx_t ncon = 1; metis_idx_t nparts = num_partitions; metis_idx_t cut = 0; /* copy the adj structure */ metis_idx_t * xadj = splatt_malloc((nvtxs+1) * sizeof(*xadj)); for(metis_idx_t v=0; v <= nvtxs; ++v) { xadj[v] = graph->eptr[v]; } metis_idx_t * adjncy = splatt_malloc(xadj[nvtxs] * sizeof(*adjncy)); for(metis_idx_t e=0; e < xadj[nvtxs]; ++e) { adjncy[e] = graph->eind[e]; } /* weights */ metis_idx_t * vwgt = NULL; metis_idx_t * ewgt = NULL; if(graph->vwgts != NULL) { /* graph number of vertex weights */ ncon = graph->nvwgts; vwgt = splatt_malloc(nvtxs * ncon * sizeof(*vwgt)); for(metis_idx_t v=0; v < nvtxs * ncon; ++v) { vwgt[v] = graph->vwgts[v]; } } if(graph->ewgts != NULL) { ewgt = splatt_malloc(xadj[nvtxs] * sizeof(*ewgt)); for(metis_idx_t e=0; e < xadj[nvtxs]; ++e) { ewgt[e] = graph->ewgts[e]; } } /* allocate partitioning info */ metis_idx_t * metis_parts = splatt_malloc(nvtxs * sizeof(*metis_parts)); /* do the partitioning! */ int ret = METIS_PartGraphRecursive(&nvtxs, &ncon, xadj, adjncy, vwgt, NULL, ewgt, &nparts, NULL, NULL, NULL /* opts */, &cut, metis_parts); if(ret != METIS_OK) { fprintf(stderr, "METIS_PartGraphRecursive returned %d\n", ret); } *edgecut = cut; /* cleanup */ splatt_free(xadj); splatt_free(adjncy); if(graph->vwgts != NULL) { splatt_free(vwgt); } if(graph->ewgts != NULL) { splatt_free(ewgt); } /* copy into splatt_idx_t */ splatt_idx_t * parts = splatt_malloc(nvtxs * sizeof(*parts)); for(metis_idx_t v=0; v < nvtxs; ++v) { parts[v] = metis_parts[v]; } splatt_free(metis_parts); return parts; } #endif
atomic.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> main() { float *x,*y,*work1,*work2; int *index; int n,i; n=10; x=(float*)malloc(n*sizeof(float)); y=(float*)malloc(n*sizeof(float)); work1=(float*)malloc(n*sizeof(float)); work2=(float*)malloc(n*sizeof(float)); index=(int*)malloc(10*sizeof(float)); for( i=0;i < n;i++) { index[i]=(n-i)-1; x[i]=0.0; y[i]=0.0; work1[i]=i; work2[i]=i*i; } #pragma omp parallel for shared(x,y,index,n) for( i=0;i< n;i++) { #pragma omp atomic x[index[i]] += work1[i]; y[i] += work2[i]; } for( i=0;i < n;i++) printf("%d %g %g\n",i,x[i],y[i]); }
dd_linalg.c
/* Double-double linear algebra library * * Implementations were partly inspired by LAPACK, partly from Fredrik * Johansson's excellent MPMATH library. * * Copyright (C) 2021 Markus Wallerberger and others * SPDX-License-Identifier: MIT */ #include "dd_linalg.h" // 2**500 and 2**(-500); static const double LARGE = 3.273390607896142e+150; static const double INV_LARGE = 3.054936363499605e-151; static ddouble normq_scaled(const ddouble *x, long nn, long sxn, double scaling) { ddouble sum = Q_ZERO; for (long n = 0; n < nn; ++n, x += sxn) { ddouble curr = mul_pwr2(*x, scaling); sum = addqq(sum, sqrq(curr)); }; return mul_pwr2(sqrtq(sum), 1.0/scaling); } ddouble normq(const ddouble *x, long nn, long sxn) { ddouble sum = normq_scaled(x, nn, sxn, 1.0); // fall back to other routines in case of over/underflow if (sum.hi > LARGE) return normq_scaled(x, nn, sxn, INV_LARGE); else if (sum.hi < INV_LARGE) return normq_scaled(x, nn, sxn, LARGE); else return sum; } ddouble householderq(const ddouble *x, ddouble *v, long nn, long sx, long sv) { if (nn == 0) return Q_ZERO; ddouble norm_x = normq(x + sx, nn - 1, sx); if (iszeroq(norm_x)) return Q_ZERO; ddouble alpha = *x; ddouble beta = copysignqq(hypotqq(alpha, norm_x), alpha); ddouble diff = subqq(beta, alpha); ddouble tau = divqq(diff, beta); ddouble scale = reciprocalq(negq(diff)); v[0] = Q_ONE; for (long n = 1; n != nn; ++n) v[n * sv] = mulqq(scale, x[n * sx]); return tau; } void rank1updateq(ddouble *a, long ais, long ajs, const ddouble *v, long vs, const ddouble *w, long ws, long ii, long jj) { #pragma omp parallel for collapse(2) for (long i = 0; i < ii; ++i) { for (long j = 0; j < jj; ++j) { ddouble tmp = mulqq(v[i * vs], w[j * ws]); a[i * ais + j * ajs] = addqq(a[i * ais + j * ajs], tmp); } } } void givensq(ddouble f, ddouble g, ddouble *c, ddouble *s, ddouble *r) { /* ACM Trans. Math. Softw. 28(2), 206, Alg 1 */ if (iszeroq(g)) { *c = Q_ONE; *s = Q_ZERO; *r = f; } else if (iszeroq(f)) { *c = Q_ZERO; *s = (ddouble) {signbitq(g), 0.0}; *r = absq(g); } else { *r = copysignqq(hypotqq(f, g), f); /* This may come at a slight loss of precision, however, we should * not really have to care ... */ ddouble inv_r = reciprocalq(*r); *c = mulqq(f, inv_r); *s = mulqq(g, inv_r); } } static void svd_tri2x2( ddouble f, ddouble g, ddouble h, ddouble *smin, ddouble *smax, ddouble *cv, ddouble *sv, ddouble *cu, ddouble *su) { ddouble fa = absq(f); ddouble ga = absq(g); ddouble ha = absq(h); bool compute_uv = cv != NULL; if (lessqq(fa, ha)) { // switch h <-> f, cu <-> sv, cv <-> su svd_tri2x2(h, g, f, smin, smax, su, cu, sv, cv); return; } if (iszeroq(ga)) { // already diagonal *smin = ha; *smax = fa; if (compute_uv) { *cu = Q_ONE; *su = Q_ZERO; *cv = Q_ONE; *sv = Q_ZERO; } return; } if (fa.hi < Q_EPS.hi * ga.hi) { // ga is very large *smax = ga; if (ha.hi > 1.0) *smin = divqq(fa, divqq(ga, ha)); else *smin = mulqq(divqq(fa, ga), ha); if (compute_uv) { *cu = Q_ONE; *su = divqq(h, g); *cv = Q_ONE; *sv = divqq(f, g); } return; } // normal case ddouble fmh = subqq(fa, ha); ddouble d = divqq(fmh, fa); ddouble q = divqq(g, f); ddouble s = subdq(2.0, d); ddouble spq = hypotqq(q, s); ddouble dpq = hypotqq(d, q); ddouble a = mul_pwr2(addqq(spq, dpq), 0.5); *smin = absq(divqq(ha, a)); *smax = absq(mulqq(fa, a)); if (compute_uv) { ddouble tmp = addqq(divqq(q, addqq(spq, s)), divqq(q, addqq(dpq, d))); tmp = mulqq(tmp, adddq(1.0, a)); ddouble tt = hypotqd(tmp, 2.0); *cv = divdq(2.0, tt); *sv = divqq(tmp, tt); *cu = divqq(addqq(*cv, mulqq(*sv, q)), a); *su = divqq(mulqq(divqq(h, f), *sv), a); } } void svd_2x2(ddouble a11, ddouble a12, ddouble a21, ddouble a22, ddouble *smin, ddouble *smax, ddouble *cv, ddouble *sv, ddouble *cu, ddouble *su) { bool compute_uv = cv != NULL; if(iszeroq(a21)) return svd_tri2x2(a11, a12, a22, smin, smax, cv, sv, cu, su); /* First, we use a givens rotation Rx * [ cx sx ] [ a11 a12 ] = [ rx a12' ] * [ -sx cx ] [ a21 a22 ] [ 0 a22' ] */ ddouble cx, sx, rx; givensq(a11, a21, &cx, &sx, &rx); a11 = rx; a21 = Q_ZERO; lmul_givensq(&a12, &a22, cx, sx, a12, a22); /* Next, use the triangular routine * [ f g ] = [ cu -su ] [ smax 0 ] [ cv sv ] * [ 0 h ] [ su cu ] [ 0 smin ] [ -sv cv ] */ svd_tri2x2(a11, a12, a22, smin, smax, cv, sv, cu, su); /* Finally, update the LHS (U) transform as follows: * [ cx -sx ] [ cu -su ] = [ cu' -su' ] * [ sx cx ] [ su cu ] [ su' cu' ] */ if (compute_uv) lmul_givensq(cu, su, cx, negq(sx), *cu, *su); } ddouble jacobi_sweep(ddouble *u, long sui, long suj, ddouble *vt, long svi, long svj, long ii, long jj) { ddouble _cu, _su, cv, sv, _smin, _smax; ddouble offd = Q_ZERO; if (ii < jj) return nanq(); // Note that the inner loop only runs over the square portion! for (long i = 0; i < jj - 1; ++i) { for (long j = i + 1; j < jj; ++j) { // Construct the matrix to be diagonalized ddouble Hii = Q_ZERO, Hij = Q_ZERO, Hjj = Q_ZERO; for (long k = 0; k != ii; ++k) { ddouble u_ki = u[k * sui + i * suj]; ddouble u_kj = u[k * sui + j * suj]; Hii = addqq(Hii, mulqq(u_ki, u_ki)); Hij = addqq(Hij, mulqq(u_ki, u_kj)); Hjj = addqq(Hjj, mulqq(u_kj, u_kj)); } offd = addqq(offd, sqrq(Hij)); // diagonalize svd_2x2(Hii, Hij, Hij, Hjj, &_smin, &_smax, &cv, &sv, &_cu, &_su); // apply rotation to VT for (long k = 0; k < jj; ++k) { ddouble *vt_ik = &vt[i * svi + k * svj]; ddouble *vt_jk = &vt[j * svi + k * svj]; lmul_givensq(vt_ik, vt_jk, cv, sv, *vt_ik, *vt_jk); } // apply transposed rotation to U for (long k = 0; k < ii; ++k) { ddouble *u_ki = &u[k * sui + i * suj]; ddouble *u_kj = &u[k * sui + j * suj]; lmul_givensq(u_ki, u_kj, cv, sv, *u_ki, *u_kj); } } } offd = sqrtq(offd); return offd; } static ddouble gk_shift(ddouble d1, ddouble e1, ddouble d2) { /* Get singular values of 2x2 triangular matrix formed from the lower * right corner in the array: * * [ d[ii-2] e[ii-2] ] * [ 0 d[ii-1] ] */ ddouble smin, smax; svd_tri2x2(d1, e1, d2, &smin, &smax, NULL, NULL, NULL, NULL); ddouble smin_dist = absq(subqq(smin, d2)); ddouble smax_dist = absq(subqq(smax, d2)); return lessqq(smin_dist, smax_dist) ? smin : smax; } void golub_kahan_chaseq(ddouble *d, long sd, ddouble *e, long se, long ii, ddouble *rot) { if (ii < 2) return; ddouble shift = gk_shift(d[(ii-2)*sd], e[(ii-2)*se], d[(ii-1)*sd]); ddouble g = e[0]; ddouble f = addqq(copysigndq(1.0, d[0]), divqq(shift, d[0])); f = mulqq(f, subqq(absq(d[0]), shift)); for (long i = 0; i < (ii - 1); ++i) { ddouble r, cosr, sinr; givensq(f, g, &cosr, &sinr, &r); if (i != 0) e[(i-1)*se] = r; lmul_givensq(&f, &e[i*se], cosr, sinr, d[i*sd], e[i*se]); lmul_givensq(&g, &d[(i+1)*sd], cosr, sinr, Q_ZERO, d[(i+1)*sd]); *(rot++) = cosr; *(rot++) = sinr; ddouble cosl, sinl; givensq(f, g, &cosl, &sinl, &r); d[i*sd] = r; lmul_givensq(&f, &d[(i+1)*sd], cosl, sinl, e[i*se], d[(i+1)*sd]); if (i < ii - 2) { lmul_givensq(&g, &e[(i+1)*se], cosl, sinl, Q_ZERO, e[(i+1)*se]); } *(rot++) = cosl; *(rot++) = sinl; } e[(ii-2)*se] = f; }
DRACC_OMP_032_MxV_outdated_Data_yes.c
/* Matrix Vector multiplication without copying back the result c. */ #include <stdio.h> #include <stdbool.h> #include <stdlib.h> #define C 512 int *a; int *b; int *c; int init(){ for(int i=0; i<C; i++){ for(int j=0; j<C; j++){ b[j+i*C]=1; } a[i]=1; c[i]=0; } return 0; } int Mult(){ #pragma omp target map(to:a[0:C],b[0:C*C],c[0:C]) device(0) { #pragma omp teams distribute parallel for for(int i=0; i<C; i++){ for(int j=0; j<C; j++){ c[i]+=b[j+i*C]*a[j]; } } } return 0; } int check(){ bool test = false; for(int i=0; i<C; i++){ if(c[i]!=C){ test = true; } } printf("Memory Access Issue visible: %s\n",test ? "true" : "false"); return 0; } int main(){ a = malloc(C*sizeof(int)); b = malloc(C*C*sizeof(int)); c = malloc(C*sizeof(int)); init(); Mult(); check(); free(a); free(b); free(c); return 0; }
GB_unaryop__identity_int32_int32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_int32_int32 // op(A') function: GB_tran__identity_int32_int32 // C type: int32_t // A type: int32_t // cast: int32_t cij = (int32_t) aij // unaryop: cij = aij #define GB_ATYPE \ int32_t #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ int32_t z = (int32_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_int32_int32 ( int32_t *restrict Cx, const int32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_int32_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
mandelbrot.c
/* To compile: gcc -O3 -o mandelbrot mandelbrot.c png_util.c -I. -lpng -lm -fopenmp Or just type: module load gcc make To create an image with 4096 x 4096 pixels (last argument will be used to set number of threads): ./mandelbrot 4096 4096 1 */ #include <math.h> #include <stdio.h> #include <stdlib.h> #include "png_util.h" #include <omp.h> #define MXITER 1000 typedef struct { double r; double i; }complex_t; // return iterations before z leaves mandelbrot set for given c int testpoint(complex_t c){ int iter; complex_t z; double temp; z = c; for(iter=0; iter<MXITER; iter++){ temp = (z.r*z.r) - (z.i*z.i) + c.r; z.i = z.r*z.i*2. + c.i; z.r = temp; if((z.r*z.r+z.i*z.i)>4.0){ return iter; } } return iter; } // perform Mandelbrot iteration on a grid of numbers in the complex plane // record the iteration counts in the count array void mandelbrot(int Nre, int Nim, complex_t cmin, complex_t cmax, float *count){ int n,m; complex_t c; double dr = (cmax.r-cmin.r)/(Nre-1); double di = (cmax.i-cmin.i)/(Nim-1);; // Q2c: add a compiler directive to split the outer for loop amongst threads here #pragma omp parallel for for(n=0;n<Nim;++n){ for(m=0;m<Nre;++m){ c.r = cmin.r + dr*m; c.i = cmin.i + di*n; count[m+n*Nre] = testpoint(c); } } } int main(int argc, char **argv){ // to create a 4096x4096 pixel image [ last argument is placeholder for number of threads ] // usage: ./mandelbrot 4096 4096 1 int Nre = atoi(argv[1]); int Nim = atoi(argv[2]); int Nthreads = atoi(argv[3]); // Q2b: set the number of OpenMP threads to be Nthreads here: Nthreads = atoi(argv[argc-1]); omp_set_num_threads(Nthreads); // storage for the iteration counts float *count = (float*) malloc(Nre*Nim*sizeof(float)); // Parameters for a bounding box for "c" that generates an interesting image const float centRe = -.759856, centIm= .125547; const float diam = 0.151579; complex_t cmin; complex_t cmax; cmin.r = centRe - 0.5*diam; cmax.r = centRe + 0.5*diam; cmin.i = centIm - 0.5*diam; cmax.i = centIm + 0.5*diam; // Q2d: complete this to read time before calling mandelbrot with OpenMP API wall clock time double start = omp_get_wtime(); // compute mandelbrot set mandelbrot(Nre, Nim, cmin, cmax, count); // Q2d: complete this to read time after calling mandelbrot using OpenMP wall clock time double end = omp_get_wtime(); // print elapsed time printf("elapsed = %g\n", end-start); // output mandelbrot to png format image FILE *fp = fopen("mandelbrot.png", "w"); write_hot_png(fp, Nre, Nim, count, 0, 80); exit(0); return 0; }
gooseberry_bin.c
/** * @file bowstring_bin.c * @brief Command line interface for gooseberry * @author Dominique LaSalle <lasalle@cs.umn.edu> * Copyright 2014 * @version 1 * @date 2014-04-28 */ #ifndef GOOSEBERRY_C #define GOOSEBERRY_C #include "base.h" #include "matrix.h" #include "io.h" #include "blas.h" #include "cgd.h" #include "permute.h" #include "analyze.h" /****************************************************************************** * MACROS ********************************************************************** ******************************************************************************/ #define ARRAY_SIZE(a) \ (sizeof(a) > 0 ? (sizeof(a) / sizeof((a)[0])) : 0) #ifndef NO_OMP #define DEFAULT_NUMTHREADS omp_get_max_threads() #else #define DEFAULT_NUMTHREADS 1 #endif /****************************************************************************** * TYPES *********************************************************************** ******************************************************************************/ /* COMMANDS ******************************************************************/ typedef enum command_t { COMMAND_HELP, COMMAND_ANALYSIS, COMMAND_PERMUTE, COMMAND_TRANSFORM, COMMAND_GENERATE, COMMAND_BLAS, COMMAND_CGD, COMMAND_SGD, COMMAND_PAGERANK } command_t; /* ANALYSIS ******************************************************************/ typedef enum analysis_t { ANALYSIS_MATRIXSTATS, ANALYSIS_CHOLESKY } analysis_t; typedef enum analysis_option_t { ANALYSIS_OPTION_HELP, ANALYSIS_OPTION_INFILE, ANALYSIS_OPTION_TIME, ANALYSIS_OPTION_TYPE, ANALYSIS_OPTION_PERMFILE } analysis_option_t; /* PERMUTE *******************************************************************/ typedef enum permute_option_t { PERMUTE_OPTION_HELP, PERMUTE_OPTION_INFILE, PERMUTE_OPTION_OUTFILE, PERMUTE_OPTION_PERMUTATION, PERMUTE_OPTION_TIME, PERMUTE_OPTION_ROWPERM, PERMUTE_OPTION_COLPERM } permute_option_t; typedef enum permute_permutation_t { PERMUTE_PERMUTATION_FILE, PERMUTE_PERMUTATION_RANDOM, PERMUTE_PERMUTATION_ROWRANDOM, PERMUTE_PERMUTATION_COLRANDOM, PERMUTE_PERMUTATION_BANDWIDTH } permute_permutation_t; /* TRANSFORM *****************************************************************/ typedef enum transform_option_t { TRANSFORM_OPTION_HELP, TRANSFORM_OPTION_INFILE, TRANSFORM_OPTION_OUTFILE, TRANSFORM_OPTION_PARTFILE, TRANSFORM_OPTION_TIME, TRANSFORM_OPTION_OPERATION } transform_option_t; typedef enum transform_operation_t { TRANSFORM_OPERATION_CONVERT, TRANSFORM_OPERATION_SYMMETRIFY, TRANSFORM_OPERATION_DEBIPARTIFY, TRANSFORM_OPERATION_ROWSPLIT, TRANSFORM_OPERATION_COLSPLIT, TRANSFORM_OPERATION_ROWJOIN, TRANSFORM_OPERATION_COLJOIN, TRANSFORM_OPERATION_TRANSPOSE } transform_operation_t; /* GENERATE ******************************************************************/ typedef enum generate_option_t { GENERATE_OPTION_HELP, GENERATE_OPTION_OUTFILE, GENERATE_OPTION_TYPE, GENERATE_OPTION_SIZE, GENERATE_OPTION_TIME } generate_option_t; typedef enum generate_type_t { GENERATE_TYPE_NULL, GENERATE_TYPE_DENSE_VECTOR } generate_type_t; /* BLAS **********************************************************************/ typedef enum blas_option_t { BLAS_OPTION_HELP, BLAS_OPTION_OPERATION, BLAS_OPTION_INFILE, BLAS_OPTION_OUTFILE, BLAS_OPTION_TIME, BLAS_OPTION_RUNS, BLAS_OPTION_THREADS, BLAS_OPTION_ROWPERM, BLAS_OPTION_COLPERM, BLAS_OPTION_REDUCEBANDWIDTH } blas_option_t; typedef enum blas_operation_t { BLAS_OPERATION_NOOP, BLAS_OPERATION_MULTIPLY } blas_operation_t; /* CGD ***********************************************************************/ typedef enum cgd_option_t { CGD_OPTION_HELP, CGD_OPTION_INFILE, CGD_OPTION_OUTFILE, CGD_OPTION_ERROR, CGD_OPTION_NITER, CGD_OPTION_TIME, CGD_OPTION_RUNS, CGD_OPTION_THREADS, CGD_OPTION_ROWPERM, CGD_OPTION_COLPERM } cgd_option_t; /* PAGERANK ******************************************************************/ typedef enum pagerank_option_t { PAGERANK_OPTION_HELP, PAGERANK_OPTION_INFILE, PAGERANK_OPTION_OUTFILE, PAGERANK_OPTION_ERROR, PAGERANK_OPTION_DAMPING, PAGERANK_OPTION_NITER, PAGERANK_OPTION_TIME, PAGERANK_OPTION_RUNS, PAGERANK_OPTION_THREADS, PAGERANK_OPTION_PERM } pagerank_option_t; /****************************************************************************** * OPTION ARRAYS *************************************************************** ******************************************************************************/ /* COMMANDS ******************************************************************/ static const cmd_opt_pair_t COMMANDS[] = { [COMMAND_HELP] = {"help","Display list of available commands.", \ COMMAND_HELP}, [COMMAND_ANALYSIS] = {"analysis","Perform an analysis on a matrix/vector.", \ COMMAND_ANALYSIS}, [COMMAND_TRANSFORM] = {"transform","Transform a matrix/vector.", \ COMMAND_TRANSFORM}, [COMMAND_PERMUTE] = {"permute","Permute a matrix/vector.",COMMAND_PERMUTE}, [COMMAND_GENERATE] = {"generate","Generate a matrix/vector.", \ COMMAND_GENERATE}, [COMMAND_BLAS] = {"blas","Perform a blas operation.",COMMAND_BLAS}, [COMMAND_CGD] = {"cgd","Perform conjugate gradient descent.",COMMAND_CGD}, [COMMAND_SGD] = {"sgd","Perform stocastic gradient descent.",COMMAND_SGD}, [COMMAND_PAGERANK] = {"pagerank","Perform a pagerank on a square matrix.", \ COMMAND_PAGERANK} }; static const size_t NCOMMANDS = ARRAY_SIZE(COMMANDS); /* ANALYSIS ******************************************************************/ static const cmd_opt_pair_t ANALYSIS[] = { {"matrixstats","Calculate statistics of a matrix/vector", \ ANALYSIS_MATRIXSTATS}, {"cholesky","Calculate the stats associated with a cholesky decomposition", \ ANALYSIS_CHOLESKY} }; static const cmd_opt_t ANALYSIS_OPTS[] = { {ANALYSIS_OPTION_HELP,'h',"help","Display this help page.",CMD_OPT_FLAG, \ NULL,0}, {ANALYSIS_OPTION_INFILE,'i',"infile","The input matrix.",CMD_OPT_STRING, \ NULL,0}, {ANALYSIS_OPTION_PERMFILE,'p',"permfile","The permutation vector.", \ CMD_OPT_STRING,NULL,0}, {ANALYSIS_OPTION_TYPE,'a',"type","The type of analysis to perform.", \ CMD_OPT_CHOICE,ANALYSIS,ARRAY_SIZE(ANALYSIS)}, {PERMUTE_OPTION_TIME,'t',"times","Print timing of the analysis.", \ CMD_OPT_FLAG,NULL,0} }; static const size_t NANALYSIS_OPTS = ARRAY_SIZE(ANALYSIS_OPTS); /* PERMUTE *******************************************************************/ static const cmd_opt_pair_t PERMUTE_PERMUTATIONS[] = { {"random","Perform a random permutation on the rows and columns.", \ PERMUTE_PERMUTATION_RANDOM}, {"file","Perform a permuation based on input files (specified with -R " \ "and/or -C, can be permutations or partitions).", \ PERMUTE_PERMUTATION_FILE}, {"bandwidth","Perform a bandwidth reducing permutation.", \ PERMUTE_PERMUTATION_BANDWIDTH}, }; static const cmd_opt_t PERMUTE_OPTS[] = { {PERMUTE_OPTION_HELP,'h',"help","Display this help page.",CMD_OPT_FLAG, \ NULL,0}, {PERMUTE_OPTION_INFILE,'i',"infile","An input matrix/vector file.", \ CMD_OPT_STRING,NULL,0}, {PERMUTE_OPTION_OUTFILE,'o',"outfile","The output vector file.", \ CMD_OPT_STRING,NULL,0}, {PERMUTE_OPTION_PERMUTATION,'p',"permutation","The type of permutation to " \ "perform.",CMD_OPT_CHOICE,PERMUTE_PERMUTATIONS, \ ARRAY_SIZE(PERMUTE_PERMUTATIONS)}, {PERMUTE_OPTION_TIME,'t',"times","Print timing of the permutation.", \ CMD_OPT_FLAG,NULL,0}, {PERMUTE_OPTION_ROWPERM,'R',"rowperm","Row permutation/partition file.", \ CMD_OPT_STRING,NULL,0}, {PERMUTE_OPTION_COLPERM,'C',"colperm","Column permutation/partition file.", \ CMD_OPT_STRING,NULL,0} }; static const size_t NPERMUTE_OPTS = ARRAY_SIZE(PERMUTE_OPTS); /* TRANSFROM *****************************************************************/ static const cmd_opt_pair_t TRANSFORM_OPERATIONS[] = { {"convert","Convert from one matrix/vector format to another.", \ TRANSFORM_OPERATION_CONVERT}, {"symmetrify","Transform to a symmetric matrix: B = A + A^T.", \ TRANSFORM_OPERATION_SYMMETRIFY}, {"debipartify","Transform to a symmetric matrix: B = [ 0 , A ; A^T , 0].", \ TRANSFORM_OPERATION_DEBIPARTIFY}, {"rowsplit","Split a matrix row-wise into submatrices: " \ "[ B.0 ; B.1 ; ... ] = A.",TRANSFORM_OPERATION_ROWSPLIT}, {"colsplit","Split a matrix column-wise into submatrices: " \ "[ B.0 , B.1 , ... ] = A.",TRANSFORM_OPERATION_COLSPLIT}, #ifdef XXX {"rowjoin","Join submatrices row-wise into a single matrix: " \ "B = [ A.0 ; A.1 ; ... ].",TRANSFORM_OPERATION_ROWJOIN}, {"coljoin","Join submatrices column-wise into a single matrix: " \ "B = [ A.0 , A.1 , ... ].",TRANSFORM_OPERATION_COLJOIN}, #endif {"transpose","Transpose a matrix: " \ "B = A^T.",TRANSFORM_OPERATION_TRANSPOSE} }; static const cmd_opt_t TRANSFORM_OPTS[] = { {TRANSFORM_OPTION_HELP,'h',"help","Display this help page.",CMD_OPT_FLAG, \ NULL,0}, {TRANSFORM_OPTION_INFILE,'i',"infile","An input matrix/vector file.", \ CMD_OPT_STRING,NULL,0}, {TRANSFORM_OPTION_OUTFILE,'o',"outfile","The output vector file.", \ CMD_OPT_STRING,NULL,0}, {TRANSFORM_OPTION_PARTFILE,'p',"partfile","The partition vector file.", \ CMD_OPT_STRING,NULL,0}, {TRANSFORM_OPTION_OPERATION,'x',"operation","The type of permutation to " \ "perform.",CMD_OPT_CHOICE,TRANSFORM_OPERATIONS, \ ARRAY_SIZE(TRANSFORM_OPERATIONS)}, {TRANSFORM_OPTION_TIME,'t',"times","Print timing of the permutation.", \ CMD_OPT_FLAG,NULL,0} }; static const size_t NTRANSFORM_OPTS = ARRAY_SIZE(TRANSFORM_OPTS); /* TRANSFROM *****************************************************************/ static const cmd_opt_pair_t GENERATE_OPERATIONS[] = { {"vector","Generate a dense vector.",GENERATE_TYPE_DENSE_VECTOR} }; static const cmd_opt_t GENERATE_OPTS[] = { {GENERATE_OPTION_HELP,'h',"help","Display this help page.",CMD_OPT_FLAG, \ NULL,0}, {GENERATE_OPTION_OUTFILE,'o',"outfile","The output vector file.", \ CMD_OPT_STRING,NULL,0}, {GENERATE_OPTION_TYPE,'g',"type","The type of permutation to " \ "perform.",CMD_OPT_CHOICE,GENERATE_OPERATIONS, \ ARRAY_SIZE(GENERATE_OPERATIONS)}, {GENERATE_OPTION_SIZE,'s',"size","Size of the generate matrix/vector", \ CMD_OPT_INT,NULL,0}, {GENERATE_OPTION_TIME,'t',"times","Print timing of the permutation.", \ CMD_OPT_FLAG,NULL,0} }; static const size_t NGENERATE_OPTS = ARRAY_SIZE(GENERATE_OPTS); /* BLAS **********************************************************************/ static const cmd_opt_pair_t BLAS_OPERATIONS[] = { {"noop","Perform no operation, just copy the input matrix/vector.", \ BLAS_OPERATION_NOOP}, {"multiply","Multiply a matrix/vector with a matrix/vector.", \ BLAS_OPERATION_MULTIPLY} }; static const cmd_opt_t BLAS_OPTS[] = { {BLAS_OPTION_HELP,'h',"help","Display this help page.",CMD_OPT_FLAG, \ NULL,0}, {BLAS_OPTION_OPERATION,'x',"operation","The type of operation to perform.", \ CMD_OPT_CHOICE,BLAS_OPERATIONS,ARRAY_SIZE(BLAS_OPERATIONS)}, {BLAS_OPTION_INFILE,'i',"infile","An input matrix/vector file.", \ CMD_OPT_STRING,NULL,0}, {BLAS_OPTION_OUTFILE,'o',"outfile","The output matrix/vector file.", \ CMD_OPT_STRING,NULL,0}, {BLAS_OPTION_TIME,'t',"times","Print timing of the blas routines.", \ CMD_OPT_FLAG,NULL,0}, {BLAS_OPTION_RUNS,'r',"runs","Number of repeated runs (only useful for " \ "timing purposes).",CMD_OPT_INT,NULL,0}, #ifndef NO_OMP {BLAS_OPTION_THREADS,'T',"threads","Number of threads.",CMD_OPT_INT,NULL,0}, #endif {BLAS_OPTION_ROWPERM,'R',"rowperm","Row permutation file.",CMD_OPT_STRING, \ NULL,0}, {BLAS_OPTION_COLPERM,'C',"colperm","Column permutation file.", \ CMD_OPT_STRING,NULL,0}, {BLAS_OPTION_REDUCEBANDWIDTH,'b',"bandwidthreduce","Re-order the matrix " \ "to reduce bandwidth.",CMD_OPT_FLAG, NULL,0} }; static const size_t NBLAS_OPTS = ARRAY_SIZE(BLAS_OPTS); /* CGD ****************************************************************/ static const cmd_opt_t CGD_OPTS[] = { {CGD_OPTION_HELP,'h',"help","Display this help page.",CMD_OPT_FLAG, \ NULL,0}, {CGD_OPTION_INFILE,'i',"infile","An input matrix/vector file.", \ CMD_OPT_STRING,NULL,0}, {CGD_OPTION_OUTFILE,'o',"outfile","The output vector file.", \ CMD_OPT_STRING,NULL,0}, {CGD_OPTION_ERROR,'e',"error","The RMSE to achieve before exiting.", \ CMD_OPT_FLOAT,NULL,0}, {CGD_OPTION_NITER,'I',"iter","The number of iterations to run before " \ "exiting.",CMD_OPT_INT,NULL,0}, {CGD_OPTION_TIME,'t',"times","Print timing of the cgd routines.", \ CMD_OPT_FLAG,NULL,0}, {CGD_OPTION_RUNS,'r',"runs","Number of repeated runs (only useful for " \ "timing purposes).",CMD_OPT_INT,NULL,0}, #ifndef NO_OMP {CGD_OPTION_THREADS,'T',"threads","Number of threads.",CMD_OPT_INT,NULL,0}, #endif {CGD_OPTION_ROWPERM,'R',"rowperm","Row permutation file.",CMD_OPT_STRING, \ NULL,0}, {CGD_OPTION_COLPERM,'C',"colperm","Column permutation file.", \ CMD_OPT_STRING,NULL,0} }; static const size_t NCGD_OPTS = ARRAY_SIZE(CGD_OPTS); /* PAGERANK ******************************************************************/ static const cmd_opt_t PAGERANK_OPTS[] = { {PAGERANK_OPTION_HELP,'h',"help","Display this help page.",CMD_OPT_FLAG, \ NULL,0}, {PAGERANK_OPTION_INFILE,'i',"infile","An input matrix file.", \ CMD_OPT_STRING,NULL,0}, {PAGERANK_OPTION_OUTFILE,'o',"outfile","The output matrix/vector file.", \ CMD_OPT_STRING,NULL,0}, {PAGERANK_OPTION_TIME,'t',"times","Print timing of the pagerank " \ "calcuation.", CMD_OPT_FLAG,NULL,0}, {PAGERANK_OPTION_RUNS,'r',"runs","Number of repeated runs (only useful " \ "for timing purposes).",CMD_OPT_INT,NULL,0}, {PAGERANK_OPTION_NITER,'I',"iterations","Maximum number of iterations.", \ CMD_OPT_INT,NULL,0}, {PAGERANK_OPTION_ERROR,'e',"error","Error threshold for stopping.", \ CMD_OPT_FLOAT,NULL,0}, {PAGERANK_OPTION_DAMPING,'d',"damping","Damping factor to use.", \ CMD_OPT_FLOAT,NULL,0}, #ifndef NO_OMP {PAGERANK_OPTION_THREADS,'T',"threads","Number of threads.",CMD_OPT_INT, \ NULL,0}, #endif {PAGERANK_OPTION_PERM,'p',"perm","Row and column permutation file.", \ CMD_OPT_STRING,NULL,0} }; static const size_t NPAGERANK_OPTS = ARRAY_SIZE(PAGERANK_OPTS); /* FILE TYPES ****************************************************************/ static const char * RAW_EXTENSIONS[] = {"raw",NULL}; static const char * GRID_EXTENSIONS[] = {"mat","grid","vec","txt",NULL}; static const char * CSR_EXTENSIONS[] = {"csr",NULL}; static const char * SVM_EXTENSIONS[] = {"svm","libfm",NULL}; static const char * POINT_EXTENSIONS[] = {"ij","point",NULL}; static const char * GRAPH_EXTENSIONS[] = {"metis","chaco","graph",NULL}; static const char * CLU_EXTENSIONS[] = {"clu",NULL}; static const char * MATRIXMARKET_EXTENSIONS[] = {"mm","mtx",NULL}; static const char * const * const FILE_TYPES[] = { [GOOSEBERRY_FORMAT_RAW] = RAW_EXTENSIONS, [GOOSEBERRY_FORMAT_GRID] = GRID_EXTENSIONS, [GOOSEBERRY_FORMAT_CSR] = CSR_EXTENSIONS, [GOOSEBERRY_FORMAT_SVM] = SVM_EXTENSIONS, [GOOSEBERRY_FORMAT_POINT] = POINT_EXTENSIONS, [GOOSEBERRY_FORMAT_GRAPH] = GRAPH_EXTENSIONS, [GOOSEBERRY_FORMAT_CLU] = CLU_EXTENSIONS, [GOOSEBERRY_FORMAT_MATRIXMARKET] = MATRIXMARKET_EXTENSIONS }; /****************************************************************************** * PRIVATE FUNCTIONS *********************************************************** ******************************************************************************/ static int __is_dense( int type) { switch (type) { case GOOSEBERRY_FORMAT_RAW: case GOOSEBERRY_FORMAT_GRID: return 1; default: return 0; } } static int __get_file_type( const char * const name) { size_t i,j; for (i=0;i<ARRAY_SIZE(FILE_TYPES);++i) { for (j=0;FILE_TYPES[i][j] != NULL;++j) { if (dl_string_endswith(name,FILE_TYPES[i][j])) { return i; } } } return -1; } static int __usage( const char * const name, FILE * fout) { size_t i; fprintf(fout,"USAGE:\n"); fprintf(fout,"%s <command> [options]\n",name); fprintf(fout,"\n"); fprintf(fout,"Commands:\n"); for (i=0;i<NCOMMANDS;++i) { fprintf(fout,"\t%s : %s\n",COMMANDS[i].str,COMMANDS[i].desc); } return 1; } static int __command_usage( const char * const name, const char * const cmd, const cmd_opt_t * const opts, const size_t nopts, FILE * fout) { fprintf(stdout,"USAGE:\n"); fprintf(stdout,"%s %s [options]\n",name,cmd); fprintf(stdout,"\n"); fprint_cmd_opts(fout,opts,nopts); return 1; } /* COMMAND FUNCTIONS *********************************************************/ static int __help( int argc, char ** argv) { __usage(argv[0],stdout); return GOOSEBERRY_SUCCESS; } static int __analyze( int argc, char ** argv) { dl_timer_t io_tmr, op_tmr; size_t nargs, i; dim_t k, nec, ner, prows; ind_t j, nnz; real_t v, minvalue, maxvalue; dim_t * rowsize, * colsize, * perm, * order, * pk = NULL; int times, err, type, analysis; cmd_arg_t * args = NULL; matrix_t * mat = NULL; char const * matfile = NULL, * pfile = NULL; double nops; /* set defaults */ times = 0; analysis = ANALYSIS_MATRIXSTATS; err = cmd_parse_args(argc-2,argv+2,ANALYSIS_OPTS,NANALYSIS_OPTS,&args, \ &nargs); if (err != DL_CMDLINE_SUCCESS) { return GOOSEBERRY_ERROR_INVALIDINPUT; } err = GOOSEBERRY_SUCCESS; if (nargs < 2) { __command_usage(argv[0],argv[1],ANALYSIS_OPTS,NANALYSIS_OPTS,stderr); goto END; } for (i=0;i<nargs;++i) { switch (args[i].id) { case ANALYSIS_OPTION_HELP: __command_usage(argv[0],argv[1],ANALYSIS_OPTS,NANALYSIS_OPTS,stdout); goto END; break; case ANALYSIS_OPTION_TYPE: analysis = (analysis_t)args[i].val.o; break; case ANALYSIS_OPTION_INFILE: if (matfile == NULL) { matfile = args[i].val.s; } else { eprintf("Too many input files specified\n"); err = GOOSEBERRY_ERROR_INVALIDINPUT; } break; case ANALYSIS_OPTION_PERMFILE: pfile = args[i].val.s; break; case ANALYSIS_OPTION_TIME: times = 1; break; default: eprintf("Unknown argument '%s'\n",args[i].val.s); err = GOOSEBERRY_ERROR_INVALIDINPUT; break; } } if (matfile == NULL) { eprintf("You must specify a matrix/vector input file.\n"); err = GOOSEBERRY_ERROR_INVALIDINPUT; } if (err != GOOSEBERRY_SUCCESS) { goto END; } if (times) { dl_init_timer(&io_tmr); dl_init_timer(&op_tmr); dl_start_timer(&io_tmr); } /* read in input files */ type = __get_file_type(matfile); if (type < 0) { eprintf("Unknown file format of '%s'\n",matfile); err = GOOSEBERRY_ERROR_INVALIDINPUT; goto END; } else { /* read in the matrix */ mat = matrix_alloc(1); memset(mat,0,sizeof(matrix_t)); if (__is_dense(type)) { err = gooseberry_read_dense_matrix(type,matfile,&(mat->nrows), &(mat->ncols),&(mat->rowval)); if (mat->ncols == 1 || mat->nrows == 1) { mat->type = MATRIX_TYPE_DENSE_VECTOR; } else { mat->type = MATRIX_TYPE_DENSE_MATRIX; } } else { err = gooseberry_read_sparse_matrix(type,matfile,&(mat->nrows), &(mat->ncols),&(mat->rowptr),&(mat->rowind),&(mat->rowval)); if (mat->ncols == 1 || mat->nrows == 1) { mat->type = MATRIX_TYPE_SPARSE_VECTOR; } else { mat->type = MATRIX_TYPE_SPARSE_MATRIX; } } if (err != GOOSEBERRY_SUCCESS) { goto END; } } if (pfile) { prows = mat->nrows; err = gooseberry_read_labels(pfile,&prows,&pk); if (err != GOOSEBERRY_SUCCESS) { goto END; } if (prows != mat->nrows) { eprintf("Matrix is "PF_DIM_T"x"PF_DIM_T" but permutation file has " \ PF_DIM_T" rows.\n",mat->nrows,mat->ncols,prows); err = GOOSEBERRY_ERROR_INVALIDINPUT; goto END; } } if (times) { dl_stop_timer(&io_tmr); dl_start_timer(&op_tmr); } if (pk) { perm = dim_alloc(mat->nrows); order = dim_alloc(mat->nrows); dim_incset(order,0,1,mat->nrows); dd_countingsort_kv(pk,order,0,mat->nrows,mat->nrows,perm,NULL); matrix_permute(mat,perm,perm); dl_free(order); dl_free(perm); } nnz = mat->rowptr[mat->nrows]; switch (analysis) { case ANALYSIS_MATRIXSTATS: if (nnz > 0) { minvalue = maxvalue = mat->rowval[0]; } else { minvalue = maxvalue = 0; } rowsize = dim_alloc(mat->nrows); colsize = dim_init_alloc(0,mat->ncols); ner = 0; for (i=0;i<mat->nrows;++i) { rowsize[i] = mat->rowptr[i+1] - mat->rowptr[i]; if (rowsize[i] == 0) { ++ner; } } for (i=0;i<mat->nrows;++i) { for (j=mat->rowptr[i];j<mat->rowptr[i+1];++j) { k = mat->rowind[j]; v = mat->rowval[j]; ++colsize[k]; if (v < minvalue) { minvalue = v; } if (v > maxvalue) { maxvalue = v; } } } nec = 0; for (i=0;i<mat->ncols;++i) { if (colsize[i] == 0) { ++nec; } } printf("Number of rows = %16zu\n",(size_t)mat->nrows); printf("Number of columns = %16zu\n",(size_t)mat->ncols); printf("Number of non-zeros = %16zu\n",(size_t)mat->rowptr[mat->nrows]); printf("Median nnz / row = %16zu\n", \ (size_t)dim_median(rowsize,mat->nrows)); printf("Mean nnz / row = %16.3lf\n", \ dim_arithmetic_mean(rowsize,mat->nrows)); printf("Median nnz / column = %16zu\n", \ (size_t)dim_median(colsize,mat->ncols)); printf("Mean nnz / column = %16.3lf\n", \ dim_arithmetic_mean(colsize,mat->ncols)); printf("Maximum value = %16.3lf\n",maxvalue); printf("Minimum value = %16.3lf\n",minvalue); printf("Number of empty rows = %16zu\n",(size_t)ner); printf("Number of empty columns = %16zu\n",(size_t)nec); dl_free(rowsize); dl_free(colsize); break; case ANALYSIS_CHOLESKY: analyze_cholesky(mat->nrows,mat->rowptr,mat->rowind,&nnz,&nops); printf("Number of non-zeroes = "PF_IND_T"\n",nnz); printf("Number of operations = %g\n",nops); break; default: eprintf("Unknown analysis '%d'\n",analysis); err = GOOSEBERRY_ERROR_INVALIDINPUT; goto END; } if (times) { dl_stop_timer(&op_tmr); } if (times) { dl_stop_timer(&io_tmr); dl_print_header("Times",'#'); printf(" I/O: %0.04lf\n",dl_poll_timer(&io_tmr)); printf(" Compute: %0.04lf\n",dl_poll_timer(&op_tmr)); dl_print_footer('#'); } END: if (pk) { dl_free(pk); } if (mat) { matrix_free(mat); } if (args) { dl_free(args); } return err; } static int __permute( int argc, char ** argv) { dl_timer_t io_tmr, op_tmr; size_t nargs,i; int times, j, err, permutation; dim_t prows; cmd_arg_t * args = NULL; dim_t * rpk = NULL, * cpk = NULL, *rperm = NULL, *cperm = NULL, *order; matrix_t * mat = NULL; const char * matfile = NULL, * outfile = NULL, * rpf = NULL, * cpf = NULL; /* set defaults */ times = 0; permutation = PERMUTE_PERMUTATION_RANDOM; err = cmd_parse_args(argc-2,argv+2,PERMUTE_OPTS,NPERMUTE_OPTS,&args,&nargs); if (err != DL_CMDLINE_SUCCESS) { return GOOSEBERRY_ERROR_INVALIDINPUT; } err = GOOSEBERRY_SUCCESS; if (nargs < 2) { __command_usage(argv[0],argv[1],PERMUTE_OPTS,NPERMUTE_OPTS,stderr); goto END; } for (i=0;i<nargs;++i) { switch (args[i].id) { case PERMUTE_OPTION_HELP: __command_usage(argv[0],argv[1],PERMUTE_OPTS,NPERMUTE_OPTS,stdout); goto END; break; case PERMUTE_OPTION_INFILE: if (matfile == NULL) { matfile = args[i].val.s; } else { eprintf("Too many input files specified\n"); err = GOOSEBERRY_ERROR_INVALIDINPUT; } break; case PERMUTE_OPTION_OUTFILE: outfile = args[i].val.s; break; case PERMUTE_OPTION_TIME: times = 1; break; case PERMUTE_OPTION_ROWPERM: rpf = args[i].val.s; break; case PERMUTE_OPTION_COLPERM: cpf = args[i].val.s; break; case PERMUTE_OPTION_PERMUTATION: permutation = (permute_permutation_t)args[i].val.o; break; default: eprintf("Unknown argument '%s'\n",args[i].val.s); err = GOOSEBERRY_ERROR_INVALIDINPUT; break; } } if (matfile == NULL) { eprintf("You must specify a matrix/vector input file.\n"); err = GOOSEBERRY_ERROR_INVALIDINPUT; } if (outfile == NULL) { eprintf("You must specify an output matrix/vector file.\n"); err = GOOSEBERRY_ERROR_INVALIDINPUT; } if (permutation == PERMUTE_PERMUTATION_FILE) { if (rpf == NULL && cpf == NULL) { eprintf("You must specify a row permutation and/or a column permutation " "to permute from a file.\n"); err = GOOSEBERRY_ERROR_INVALIDINPUT; } } else { if (rpf || cpf) { eprintf("Input row and column permutation files are only for use with " "file based permutations.\n"); err = GOOSEBERRY_ERROR_INVALIDINPUT; } } if (err != GOOSEBERRY_SUCCESS) { goto END; } if (times) { dl_init_timer(&io_tmr); dl_init_timer(&op_tmr); dl_start_timer(&io_tmr); } /* read in input files */ j = __get_file_type(matfile); if (j < 0) { eprintf("Unknown file format of '%s'\n",matfile); err = GOOSEBERRY_ERROR_INVALIDINPUT; goto END; } else { /* read in the matrix */ mat = matrix_calloc(1); if (__is_dense(j)) { err = gooseberry_read_dense_matrix(j,matfile,&(mat->nrows), &(mat->ncols),&(mat->rowval)); if (mat->ncols == 1 || mat->nrows == 1) { mat->type = MATRIX_TYPE_DENSE_VECTOR; } else { mat->type = MATRIX_TYPE_DENSE_MATRIX; } } else { err = gooseberry_read_sparse_matrix(j,matfile,&(mat->nrows), &(mat->ncols),&(mat->rowptr),&(mat->rowind),&(mat->rowval)); if (mat->ncols == 1 || mat->nrows == 1) { mat->type = MATRIX_TYPE_SPARSE_VECTOR; } else { mat->type = MATRIX_TYPE_SPARSE_MATRIX; } } if (err != GOOSEBERRY_SUCCESS) { goto END; } } if (permutation == PERMUTE_PERMUTATION_RANDOM) { if (mat->nrows != mat->ncols) { eprintf("Cannot apply a single permutation to columns and rows of a " "non-square matrix\n"); err = GOOSEBERRY_ERROR_INVALIDINPUT; goto END; } } /* read in permutation files if provided */ if (permutation == PERMUTE_PERMUTATION_FILE) { if (rpf) { prows = mat->nrows; err = gooseberry_read_labels(rpf,&prows,&rpk); if (err != GOOSEBERRY_SUCCESS) { goto END; } } if (cpf) { prows = mat->ncols; err = gooseberry_read_labels(cpf,&prows,&cpk); if (err != GOOSEBERRY_SUCCESS) { goto END; } } } if (times) { dl_stop_timer(&io_tmr); dl_start_timer(&op_tmr); } /* permute the input matrices */ switch (permutation) { case PERMUTE_PERMUTATION_FILE: if (rpk) { rperm = dim_alloc(mat->nrows); order = dim_alloc(mat->nrows); dim_incset(order,0,1,mat->nrows); dd_countingsort_kv(rpk,order,0,mat->nrows,mat->nrows,rperm, &mat->rdist); dl_free(order); dl_free(rpk); rpk = NULL; } if (cpk) { cperm = dim_alloc(mat->ncols); order = dim_alloc(mat->ncols); dim_incset(order,0,1,mat->ncols); dd_countingsort_kv(cpk,order,0,mat->ncols,mat->ncols,cperm,NULL); dl_free(order); dl_free(cpk); cpk = NULL; } break; case PERMUTE_PERMUTATION_RANDOM: rperm = dim_alloc(mat->nrows); dim_incset(rperm,0,1,mat->nrows); dim_pseudo_shuffle(rperm,mat->nrows/4,mat->nrows); cperm = dim_duplicate(rperm,mat->nrows); break; case PERMUTE_PERMUTATION_ROWRANDOM: rperm = dim_alloc(mat->nrows); dim_incset(rperm,0,1,mat->nrows); dim_pseudo_shuffle(rperm,mat->nrows/4,mat->nrows); break; case PERMUTE_PERMUTATION_COLRANDOM: cperm = dim_alloc(mat->ncols); dim_incset(cperm,0,1,mat->ncols); dim_pseudo_shuffle(cperm,mat->ncols/4,mat->ncols); break; case PERMUTE_PERMUTATION_BANDWIDTH: rperm = dim_alloc(mat->nrows); if ((err = permute_cuthillmckee(mat->nrows,mat->ncols,mat->rowptr, \ mat->rowind,mat->rowval,NULL,0,rperm)) != GOOSEBERRY_SUCCESS) { goto END; } cperm = dim_duplicate(rperm,mat->nrows); break; } matrix_permute(mat,rperm,cperm); if (times) { dl_stop_timer(&op_tmr); dl_start_timer(&io_tmr); } /* save the output */ j = __get_file_type(outfile); if (__is_dense(j)) { if (mat->type != MATRIX_TYPE_DENSE_VECTOR && mat->type != MATRIX_TYPE_DENSE_MATRIX) { matrix_densify(mat); } err = gooseberry_write_dense_matrix(j,outfile,mat->nrows,mat->ncols, mat->rowval); } else { if (mat->type != MATRIX_TYPE_SPARSE_VECTOR && mat->type != MATRIX_TYPE_SPARSE_MATRIX) { matrix_sparsify(mat); } err = gooseberry_write_sparse_matrix(j,outfile,mat->nrows,mat->ncols, mat->rowptr,mat->rowind,mat->rowval); } if (err != GOOSEBERRY_SUCCESS) { goto END; } if (times) { dl_stop_timer(&io_tmr); dl_print_header("Times",'#'); printf(" I/O: %0.04lf\n",dl_poll_timer(&io_tmr)); printf(" Compute: %0.04lf\n",dl_poll_timer(&op_tmr)); dl_print_footer('#'); } END: if (mat) { matrix_free(mat); } if (rperm) { dl_free(rperm); } if (rpk) { dl_free(rpk); } if (cperm) { dl_free(cperm); } if (cpk) { dl_free(cpk); } if (args) { dl_free(args); } return err; } static int __transform( int argc, char ** argv) { dl_timer_t io_tmr, op_tmr; size_t nargs, i; int times, j, err, operation; ind_t offset; dim_t prows, pcols, nout, p, nparts; cmd_arg_t * args = NULL; matrix_t * mat = NULL, * out = NULL; dim_t * dist = NULL, * map = NULL; char * sfile; char const * matfile = NULL, * outfile = NULL, * partfile = NULL; /* set defaults */ nparts = 0; nout = 0; times = 0; pcols = prows = 0; operation = -1; err = cmd_parse_args(argc-2,argv+2,TRANSFORM_OPTS,NTRANSFORM_OPTS,&args, \ &nargs); if (err != DL_CMDLINE_SUCCESS) { return GOOSEBERRY_ERROR_INVALIDINPUT; } err = GOOSEBERRY_SUCCESS; if (nargs < 2) { __command_usage(argv[0],argv[1],TRANSFORM_OPTS,NTRANSFORM_OPTS,stderr); goto END; } for (i=0;i<nargs;++i) { switch (args[i].id) { case TRANSFORM_OPTION_HELP: __command_usage(argv[0],argv[1],TRANSFORM_OPTS,NTRANSFORM_OPTS,stdout); goto END; break; case TRANSFORM_OPTION_INFILE: if (matfile == NULL) { matfile = args[i].val.s; } else { eprintf("Extra input file specified: '%s'\n",args[i].val.s); err = GOOSEBERRY_ERROR_INVALIDINPUT; } break; case TRANSFORM_OPTION_OUTFILE: if (outfile == NULL) { outfile = args[i].val.s; } else { eprintf("Extra output file specified: '%s'\n",args[i].val.s); err = GOOSEBERRY_ERROR_INVALIDINPUT; } break; case TRANSFORM_OPTION_TIME: times = 1; break; case TRANSFORM_OPTION_OPERATION: operation = (transform_operation_t)args[i].val.o; break; case TRANSFORM_OPTION_PARTFILE: if (partfile == NULL) { partfile = args[i].val.s; } else { eprintf("Extra part file specified: '%s'\n",args[i].val.s); err = GOOSEBERRY_ERROR_INVALIDINPUT; } break; default: eprintf("Unknown argument '%s'\n",args[i].val.s); err = GOOSEBERRY_ERROR_INVALIDINPUT; break; } } if (matfile == NULL) { eprintf("You must specify a matrix/vector input file.\n"); err = GOOSEBERRY_ERROR_INVALIDINPUT; } if (outfile == NULL) { eprintf("You must specify an output matrix/vector file.\n"); err = GOOSEBERRY_ERROR_INVALIDINPUT; } if ((operation == TRANSFORM_OPERATION_ROWSPLIT || \ operation == TRANSFORM_OPERATION_COLSPLIT) && \ (partfile == NULL && nparts == 0)) { eprintf("You must specify a part file for splitting on or a number of " "partitions.\n"); err = GOOSEBERRY_ERROR_INVALIDINPUT; } if (err != GOOSEBERRY_SUCCESS) { goto END; } if (times) { dl_init_timer(&io_tmr); dl_init_timer(&op_tmr); dl_start_timer(&io_tmr); } /* read in input file */ j = __get_file_type(matfile); if (j < 0) { eprintf("Unknown file format of '%s'\n",matfile); err = GOOSEBERRY_ERROR_INVALIDINPUT; goto END; } else { /* read in the matrix */ mat = matrix_calloc(1); if (__is_dense(j)) { err = gooseberry_read_dense_matrix(j,matfile,&(mat->nrows), &(mat->ncols),&(mat->rowval)); if (mat->ncols == 1 || mat->nrows == 1) { mat->type = MATRIX_TYPE_DENSE_VECTOR; } else { mat->type = MATRIX_TYPE_DENSE_MATRIX; } } else { err = gooseberry_read_sparse_matrix(j,matfile,&(mat->nrows), \ &(mat->ncols),&(mat->rowptr),&(mat->rowind),&(mat->rowval)); if (mat->ncols == 1 || mat->nrows == 1) { mat->type = MATRIX_TYPE_SPARSE_VECTOR; } else { mat->type = MATRIX_TYPE_SPARSE_MATRIX; } } if (err != GOOSEBERRY_SUCCESS) { goto END; } } if (operation == TRANSFORM_OPERATION_ROWSPLIT) { if (partfile != NULL) { err = gooseberry_read_partition(partfile,&prows,&nout,&map,NULL,&dist); if (err != GOOSEBERRY_SUCCESS) { goto END; } else if (prows != mat->nrows) { eprintf("Invalid number of rows in partition file: found '"PF_DIM_T"' " "but matrix has '"PF_DIM_T"'\n",prows,mat->nrows); err = GOOSEBERRY_ERROR_INVALIDINPUT; } } else { dl_error("Unimplemented\n"); } } else if (operation == TRANSFORM_OPERATION_COLSPLIT) { if (partfile) { err = gooseberry_read_partition(partfile,&pcols,&nout,&map,NULL,&dist); if (err != GOOSEBERRY_SUCCESS) { goto END; } else if (pcols != mat->ncols) { eprintf("Invalid number of columns in partition file: found '"PF_DIM_T "' but matrix has '"PF_DIM_T"'\n",pcols,mat->ncols); err = GOOSEBERRY_ERROR_INVALIDINPUT; } } else { dl_error("Unimplemented\n"); } } if (err != GOOSEBERRY_SUCCESS) { goto END; } if (times) { dl_stop_timer(&io_tmr); dl_start_timer(&op_tmr); } out = matrix_calloc(1); out->type = MATRIX_TYPE_SPARSE_MATRIX; switch (operation) { case TRANSFORM_OPERATION_CONVERT: out->nrows = mat->nrows; out->ncols = mat->ncols; if (mat->type == out->type) { out->rowptr = ind_duplicate(mat->rowptr,mat->nrows+1); out->rowind = dim_duplicate(mat->rowind,mat->rowptr[mat->nrows]); out->rowval = real_duplicate(mat->rowval,mat->rowptr[mat->nrows]); } else { dl_error("Not finished yet"); } break; case TRANSFORM_OPERATION_SYMMETRIFY: out->ncols = out->nrows = dl_max(mat->nrows,mat->ncols); out->type = MATRIX_TYPE_SPARSE_MATRIX; err = gooseberry_symmetrify_sparse(mat->nrows,mat->ncols,mat->rowptr, mat->rowind,mat->rowval,&out->rowptr,&out->rowind,&out->rowval); break; case TRANSFORM_OPERATION_DEBIPARTIFY: out->ncols = out->nrows = mat->nrows+mat->ncols; out->type = MATRIX_TYPE_SPARSE_MATRIX; err = gooseberry_debipartify_sparse(mat->nrows,mat->ncols,mat->rowptr, mat->rowind,mat->rowval,&out->rowptr,&out->rowind,&out->rowval); break; case TRANSFORM_OPERATION_TRANSPOSE: out->ncols = mat->nrows; out->nrows = mat->ncols; err = gooseberry_transpose_sparse(mat->nrows,mat->ncols,mat->rowptr, mat->rowind,mat->rowval,&out->rowptr,&out->rowind,&out->rowval); break; case TRANSFORM_OPERATION_ROWSPLIT: out->ncols = mat->ncols; out->nrows = mat->nrows; gooseberry_rowsplit_sparse(mat->nrows,mat->ncols,mat->rowptr,mat->rowind, mat->rowval,nout,dist,map,&out->rowptr,&out->rowind,&out->rowval); break; case TRANSFORM_OPERATION_COLSPLIT: out->ncols = 0; for (p=0;p<nout;++p) { if (dist[p+1] - dist[p] > out->ncols) { out->ncols = dist[p+1] - dist[p]; } } out->nrows = mat->nrows; gooseberry_colsplit_sparse(mat->nrows,mat->ncols,mat->rowptr,mat->rowind, mat->rowval,nout,dist,map,&out->rowptr,&out->rowind,&out->rowval); break; default: eprintf("Unknown transform operation: '%d'\n",operation); err = GOOSEBERRY_ERROR_INVALIDINPUT; break; } if (err != GOOSEBERRY_SUCCESS) { goto END; } if (times) { dl_stop_timer(&op_tmr); dl_start_timer(&io_tmr); } j = __get_file_type(outfile); if (operation == TRANSFORM_OPERATION_COLSPLIT || operation == TRANSFORM_OPERATION_ROWSPLIT) { /* long enough to append a 64 bit number and null character */ sfile = malloc(strlen(outfile)+22); for (p=0;p<nout;++p) { sprintf(sfile,"%s."PF_DIM_T,outfile,p); if (operation == TRANSFORM_OPERATION_COLSPLIT) { prows = mat->nrows; pcols = dist[p+1] - dist[p]; offset = p*mat->nrows; } else { prows = dist[p+1] - dist[p]; pcols = mat->ncols; offset = dist[p]; } if (__is_dense(j)) { offset = mat->nrows*mat->ncols*p; if (out->type != MATRIX_TYPE_DENSE_VECTOR && out->type != MATRIX_TYPE_DENSE_MATRIX) { matrix_densify(out); } err = gooseberry_write_dense_matrix(j,sfile,prows,pcols, out->rowval+offset); } else { if (out->type != MATRIX_TYPE_SPARSE_VECTOR && out->type != MATRIX_TYPE_SPARSE_MATRIX) { matrix_sparsify(out); } err = gooseberry_write_sparse_matrix(j,sfile,prows,pcols, out->rowptr+offset,out->rowind,out->rowval); } } dl_free(sfile); } else { /* save the output */ if (__is_dense(j)) { if (out->type != MATRIX_TYPE_DENSE_VECTOR && out->type != MATRIX_TYPE_DENSE_MATRIX) { matrix_densify(out); } err = gooseberry_write_dense_matrix(j,outfile,out->nrows,out->ncols, out->rowval); } else { if (out->type != MATRIX_TYPE_SPARSE_VECTOR && out->type != MATRIX_TYPE_SPARSE_MATRIX) { matrix_sparsify(out); } err = gooseberry_write_sparse_matrix(j,outfile,out->nrows,out->ncols, out->rowptr,out->rowind,out->rowval); } } if (err != GOOSEBERRY_SUCCESS) { goto END; } if (times) { dl_stop_timer(&io_tmr); dl_print_header("Times",'#'); printf(" I/O: %0.04lf\n",dl_poll_timer(&io_tmr)); printf(" Compute: %0.04lf\n",dl_poll_timer(&op_tmr)); dl_print_footer('#'); } END: if (mat) { matrix_free(mat); } if (out) { matrix_free(out); } if (args) { dl_free(args); } return err; } static int __generate( int argc, char ** argv) { dl_timer_t io_tmr, op_tmr; size_t nargs, i; int times, j, err, type; dim_t size; cmd_arg_t * args = NULL; matrix_t * out = NULL; const char * outfile = NULL; /* set defaults */ size = 0; times = 0; type = GENERATE_TYPE_NULL; err = cmd_parse_args(argc-2,argv+2,GENERATE_OPTS,NGENERATE_OPTS,&args, &nargs); if (err != DL_CMDLINE_SUCCESS) { return GOOSEBERRY_ERROR_INVALIDINPUT; } err = GOOSEBERRY_SUCCESS; if (nargs < 2) { __command_usage(argv[0],argv[1],GENERATE_OPTS,NGENERATE_OPTS,stderr); goto END; } for (i=0;i<nargs;++i) { switch (args[i].id) { case GENERATE_OPTION_HELP: __command_usage(argv[0],argv[1],GENERATE_OPTS,NGENERATE_OPTS,stdout); goto END; break; case GENERATE_OPTION_OUTFILE: if (outfile == NULL) { outfile = args[i].val.s; } else { eprintf("Extra output file specified: '%s'\n",args[i].val.s); err = GOOSEBERRY_ERROR_INVALIDINPUT; } break; case GENERATE_OPTION_TIME: times = 1; break; case GENERATE_OPTION_TYPE: type = (generate_type_t)args[i].val.o; break; case GENERATE_OPTION_SIZE: size = (dim_t)args[i].val.i; break; default: eprintf("Unknown argument '%s'\n",args[i].val.s); err = GOOSEBERRY_ERROR_INVALIDINPUT; break; } } if (size == 0) { eprintf("You must specify a size greater than zero.\n"); err = GOOSEBERRY_ERROR_INVALIDINPUT; } if (type == GENERATE_TYPE_NULL) { eprintf("You must specify a type to generate.\n"); err = GOOSEBERRY_ERROR_INVALIDINPUT; } if (outfile == NULL) { eprintf("You must specify an output matrix/vector file.\n"); err = GOOSEBERRY_ERROR_INVALIDINPUT; } if (err != GOOSEBERRY_SUCCESS) { goto END; } if (times) { dl_init_timer(&io_tmr); dl_init_timer(&op_tmr); dl_start_timer(&op_tmr); } out = matrix_calloc(1); switch (type) { case GENERATE_TYPE_DENSE_VECTOR: out->type = MATRIX_TYPE_DENSE_VECTOR; out->nrows = size; out->ncols = 1; out->rowval = real_alloc(out->nrows); real_fill_rand(-1.0,1.0,out->rowval,out->nrows); break; default: eprintf("Unknown generate type: '%d'\n",type); err = GOOSEBERRY_ERROR_INVALIDINPUT; break; } if (times) { dl_stop_timer(&op_tmr); dl_start_timer(&io_tmr); } j = __get_file_type(outfile); if (__is_dense(j)) { if (out->type != MATRIX_TYPE_DENSE_VECTOR && out->type != MATRIX_TYPE_DENSE_MATRIX) { matrix_densify(out); } err = gooseberry_write_dense_matrix(j,outfile,out->nrows,out->ncols, out->rowval); } else { if (out->type != MATRIX_TYPE_SPARSE_VECTOR && out->type != MATRIX_TYPE_SPARSE_MATRIX) { matrix_sparsify(out); } err = gooseberry_write_sparse_matrix(j,outfile,out->nrows,out->ncols, out->rowptr,out->rowind,out->rowval); } if (err != GOOSEBERRY_SUCCESS) { goto END; } if (times) { dl_stop_timer(&io_tmr); dl_print_header("Times",'#'); printf(" I/O: %0.04lf\n",dl_poll_timer(&io_tmr)); printf(" Compute: %0.04lf\n",dl_poll_timer(&op_tmr)); dl_print_footer('#'); } END: if (out) { matrix_free(out); } if (args) { dl_free(args); } return err; } static int __blas( int argc, char ** argv) { dl_timer_t io_tmr, op_tmr, aux_tmr; size_t nargs, runs, r, i, ninfiles = 0,nthreads; int times, j, err, oper, redband; dim_t outrows, outcols, prows; cmd_arg_t * args = NULL; dim_t *rperm = NULL, *cperm = NULL, *bperm = NULL; matrix_t * in[256], * out = NULL; const char * infiles[256], * outfile = NULL, * rpf = NULL, * cpf = NULL; /* set defaults */ redband = 0; times = 0; runs = 1; oper = BLAS_OPERATION_NOOP; err = cmd_parse_args(argc-2,argv+2,BLAS_OPTS,NBLAS_OPTS,&args,&nargs); if (err != DL_CMDLINE_SUCCESS) { return GOOSEBERRY_ERROR_INVALIDINPUT; } err = GOOSEBERRY_SUCCESS; nthreads = DEFAULT_NUMTHREADS; if (nargs < 2) { __command_usage(argv[0],argv[1],BLAS_OPTS,NBLAS_OPTS,stderr); goto END; } for (i=0;i<nargs;++i) { switch (args[i].id) { case BLAS_OPTION_HELP: __command_usage(argv[0],argv[1],BLAS_OPTS,NBLAS_OPTS,stdout); goto END; break; case BLAS_OPTION_OPERATION: oper = (blas_operation_t)args[i].val.o; break; case BLAS_OPTION_INFILE: infiles[ninfiles++] = args[i].val.s; break; case BLAS_OPTION_OUTFILE: outfile = args[i].val.s; break; case BLAS_OPTION_TIME: times = 1; break; case BLAS_OPTION_RUNS: runs = (size_t)args[i].val.i; break; #ifndef NO_OMP case BLAS_OPTION_THREADS: nthreads = (size_t)args[i].val.i; omp_set_num_threads(nthreads); break; #endif case BLAS_OPTION_ROWPERM: rpf = args[i].val.s; break; case BLAS_OPTION_COLPERM: cpf = args[i].val.s; break; case BLAS_OPTION_REDUCEBANDWIDTH: redband = 1; break; default: eprintf("Unknown argument '%s'\n",args[i].val.s); err = GOOSEBERRY_ERROR_INVALIDINPUT; break; } } if (err != GOOSEBERRY_SUCCESS) { goto END; } if (times) { dl_init_timer(&io_tmr); dl_init_timer(&op_tmr); dl_init_timer(&aux_tmr); dl_start_timer(&io_tmr); } /* read in input files */ for (i=0;i<ninfiles;++i) { j = __get_file_type(infiles[i]); if (j < 0) { eprintf("Unknown file format of '%s'\n",infiles[i]); err = GOOSEBERRY_ERROR_INVALIDINPUT; goto END; } else { /* read in the matrix/vector */ in[i] = matrix_calloc(1); if (__is_dense(j)) { err = gooseberry_read_dense_matrix(j,infiles[i],&(in[i]->nrows), &(in[i]->ncols),&(in[i]->rowval)); if (in[i]->ncols == 1 || in[i]->nrows == 1) { in[i]->type = MATRIX_TYPE_DENSE_VECTOR; } else { in[i]->type = MATRIX_TYPE_DENSE_MATRIX; } } else { err = gooseberry_read_sparse_matrix(j,infiles[i],&(in[i]->nrows), &(in[i]->ncols),&(in[i]->rowptr),&(in[i]->rowind), &(in[i]->rowval)); if (in[i]->ncols == 1 || in[i]->nrows == 1) { in[i]->type = MATRIX_TYPE_SPARSE_VECTOR; } else { in[i]->type = MATRIX_TYPE_SPARSE_MATRIX; } } if (err != GOOSEBERRY_SUCCESS) { goto END; } } } /* read in permutation files if provided */ if (rpf) { prows = in[0]->nrows; err = gooseberry_read_partition(rpf,&prows,&in[0]->nrdist,NULL,&rperm, &in[0]->rdist); if (err != GOOSEBERRY_SUCCESS) { goto END; } printf("Read "PF_DIM_T"-way row partition\n",in[0]->nrdist); } else { in[0]->nrdist = 1; } if (cpf) { prows = in[0]->ncols; err = gooseberry_read_partition(cpf,&prows,&in[0]->ncdist,NULL,&cperm, &in[0]->cdist); if (err != GOOSEBERRY_SUCCESS) { goto END; } printf("Read "PF_DIM_T"-way column partition\n",in[0]->ncdist); } if (times) { dl_stop_timer(&io_tmr); dl_start_timer(&aux_tmr); } matrix_permute(in[0],rperm,cperm); matrix_permute(in[1],cperm,NULL); if (redband) { bperm = dim_alloc(in[0]->nrows); if ((err = permute_cuthillmckee(in[0]->nrows,in[0]->ncols,in[0]->rowptr, in[0]->rowind,in[0]->rowval,in[0]->rdist,in[0]->nrdist,bperm)) != GOOSEBERRY_SUCCESS) { goto END; } if (rperm && cperm) { for (i=0;i<in[0]->nrows;++i) { rperm[i] = bperm[rperm[i]]; } for (i=0;i<in[0]->nrows;++i) { cperm[i] = bperm[cperm[i]]; } } matrix_permute(in[0],bperm,bperm); matrix_permute(in[1],bperm,NULL); dl_free(bperm); } /* allocate the output matrix */ outrows = in[0]->nrows; outcols = in[ninfiles-1]->ncols; out = matrix_alloc(1); j = __get_file_type(outfile); if (__is_dense(j)) { if (outrows == 1|| outcols == 1) { matrix_init(MATRIX_TYPE_DENSE_VECTOR,outrows,outcols,0,out); } else { matrix_init(MATRIX_TYPE_DENSE_MATRIX,outrows,outcols,0,out); } } else { if (outrows == 1|| outcols == 1) { matrix_init(MATRIX_TYPE_SPARSE_VECTOR,outrows,outcols,NULL_IND,out); } else { matrix_init(MATRIX_TYPE_SPARSE_MATRIX,outrows,outcols,NULL_IND,out); } } if (times) { dl_stop_timer(&aux_tmr); dl_start_timer(&op_tmr); } for (r=0;r<runs;++r) { /* perform operation */ switch (oper) { case BLAS_OPERATION_MULTIPLY: if (in[0]->ncols > in[1]->nrows) { eprintf("Matrix dimensions do not match for multiplication: " PF_DIM_T"x"PF_DIM_T" and "PF_DIM_T"x"PF_DIM_T"\n",in[0]->nrows, in[0]->ncols,in[1]->nrows,in[1]->ncols); err = GOOSEBERRY_ERROR_INVALIDINPUT; goto END; } switch(in[0]->type) { case MATRIX_TYPE_SPARSE_VECTOR: case MATRIX_TYPE_SPARSE_MATRIX: switch (in[1]->type) { case MATRIX_TYPE_DENSE_VECTOR: case MATRIX_TYPE_DENSE_MATRIX: matrix_buildindex(in[1]); if ((err = blas_spmult(in[0]->nrows,in[0]->ncols,in[1]->ncols, in[0]->rowptr,in[0]->rowind,in[0]->rowval, in[1]->colval,out->rowval,in[0]->rdist,in[0]->nrdist)) != GOOSEBERRY_SUCCESS) { goto END; } break; case MATRIX_TYPE_SPARSE_VECTOR: case MATRIX_TYPE_SPARSE_MATRIX: switch (out->type) { case MATRIX_TYPE_DENSE_VECTOR: case MATRIX_TYPE_DENSE_MATRIX: if ((err = blas_spmultsp(in[0]->nrows,in[0]->ncols, in[1]->ncols,in[0]->rowptr,in[0]->rowind, in[0]->rowval,in[1]->rowptr,in[1]->rowind, in[1]->rowval,out->rowval,in[0]->rdist, in[0]->nrdist)) != GOOSEBERRY_SUCCESS) { goto END; } break; case MATRIX_TYPE_SPARSE_VECTOR: case MATRIX_TYPE_SPARSE_MATRIX: if ((err = blas_spmultsp_sp(in[0]->nrows,in[0]->ncols, in[1]->ncols,in[0]->rowptr,in[0]->rowind, in[0]->rowval,in[1]->rowptr,in[1]->rowind, in[1]->rowval,&out->rowptr,&out->rowind, &out->rowval,in[0]->rdist,in[0]->nrdist)) != GOOSEBERRY_SUCCESS) { goto END; } break; default: eprintf("Unsupported output matrix type: %d\n",out->type); err = GOOSEBERRY_ERROR_INVALIDINPUT; break; } break; default: eprintf("Unsupported matrix combinations\n"); err = GOOSEBERRY_ERROR_INVALIDINPUT; break; } break; case MATRIX_TYPE_DENSE_VECTOR: case MATRIX_TYPE_DENSE_MATRIX: switch (in[1]->type) { case MATRIX_TYPE_DENSE_VECTOR: case MATRIX_TYPE_DENSE_MATRIX: matrix_buildindex(in[1]); if ((err = blas_mult(in[0]->nrows,in[0]->ncols,in[1]->ncols, in[0]->rowval,in[1]->colval,out->rowval,in[0]->rdist, in[0]->nrdist)) != GOOSEBERRY_SUCCESS) { goto END; } break; case MATRIX_TYPE_SPARSE_VECTOR: case MATRIX_TYPE_SPARSE_MATRIX: eprintf("The operation mmsp is unsupported at the moment.\n"); err = GOOSEBERRY_ERROR_UNIMPLEMENTED; break; default: eprintf("Unsupported matrix combinations\n"); err = GOOSEBERRY_ERROR_INVALIDINPUT; break; } break; default: eprintf("Unknown matrix type '%d'\n",in[0]->type); err = GOOSEBERRY_ERROR_INVALIDINPUT; break; } break; default: eprintf("Unknown operation '%d'\n",oper); err = GOOSEBERRY_ERROR_INVALIDINPUT; break; } } if (times) { dl_stop_timer(&op_tmr); dl_start_timer(&aux_tmr); } if (rperm) { matrix_unpermute(out,rperm,NULL); } if (times) { dl_stop_timer(&aux_tmr); dl_start_timer(&io_tmr); } /* save the output */ j = __get_file_type(outfile); if (__is_dense(j)) { err = gooseberry_write_dense_matrix(j,outfile,out->nrows,out->ncols, out->rowval); } else { err = gooseberry_write_sparse_matrix(j,outfile,out->nrows,out->ncols, out->rowptr,out->rowind,out->rowval); } if (err != GOOSEBERRY_SUCCESS) { goto END; } if (times) { dl_stop_timer(&io_tmr); dl_print_header("Times",'#'); printf(" I/O: %0.04lf\n",dl_poll_timer(&io_tmr)); printf(" Auxillary: %0.04lf\n",dl_poll_timer(&aux_tmr)); printf(" Compute: %0.04lf\n",dl_poll_timer(&op_tmr)); dl_print_footer('#'); } END: if (out) { matrix_free(out); } if (rperm) { dl_free(rperm); } if (cperm) { dl_free(cperm); } for (i=0;i<ninfiles;++i) { if (in[i]) { matrix_free(in[i]); in[i] = NULL; } } if (args) { dl_free(args); } return err; } static int __cgd( int argc, char ** argv) { dl_timer_t io_tmr, op_tmr; real_t error = 0; size_t nargs, runs, r, i, niter = 0; int times, j, err; dim_t outrows, outcols, prows; cmd_arg_t * args = NULL; dim_t * rpk = NULL, * cpk = NULL, *rperm = NULL, *cperm = NULL, *order; matrix_t * mat = NULL, * vec = NULL, * out = NULL; const char * matfile = NULL, * vecfile = NULL, * outfile = NULL, * rpf = NULL, * cpf = NULL; /* set defaults */ times = 0; runs = 1; err = cmd_parse_args(argc-2,argv+2,CGD_OPTS,NCGD_OPTS,&args,&nargs); if (err != DL_CMDLINE_SUCCESS) { return GOOSEBERRY_ERROR_INVALIDINPUT; } err = GOOSEBERRY_SUCCESS; if (nargs < 2) { __command_usage(argv[0],argv[1],CGD_OPTS,NCGD_OPTS,stderr); goto END; } for (i=0;i<nargs;++i) { switch (args[i].id) { case CGD_OPTION_HELP: __command_usage(argv[0],argv[1],CGD_OPTS,NCGD_OPTS,stdout); goto END; break; case CGD_OPTION_ERROR: error = (real_t)args[i].val.f; break; case CGD_OPTION_NITER: niter = (size_t)args[i].val.i; break; case CGD_OPTION_INFILE: if (matfile == NULL) { matfile = args[i].val.s; } else if (vecfile == NULL) { vecfile = args[i].val.s; } else { eprintf("Too many input files specified\n"); err = GOOSEBERRY_ERROR_INVALIDINPUT; } break; case CGD_OPTION_OUTFILE: outfile = args[i].val.s; break; case CGD_OPTION_TIME: times = 1; break; case CGD_OPTION_RUNS: runs = (size_t)args[i].val.i; break; #ifndef NO_OMP case CGD_OPTION_THREADS: omp_set_num_threads(args[i].val.i); break; #endif case CGD_OPTION_ROWPERM: rpf = args[i].val.s; break; case CGD_OPTION_COLPERM: cpf = args[i].val.s; break; default: eprintf("Unknown argument '%s'\n",args[i].val.s); err = GOOSEBERRY_ERROR_INVALIDINPUT; break; } } if (matfile == NULL || vecfile == NULL) { eprintf("You must specify both a matrix input file and a vector input " "file (in that order).\n"); err = GOOSEBERRY_ERROR_INVALIDINPUT; } if (err != GOOSEBERRY_SUCCESS) { goto END; } if (times) { dl_init_timer(&io_tmr); dl_init_timer(&op_tmr); dl_start_timer(&io_tmr); } /* read in input files */ j = __get_file_type(matfile); if (j < 0) { eprintf("Unknown file format of '%s'\n",matfile); err = GOOSEBERRY_ERROR_INVALIDINPUT; goto END; } else { /* read in the matrix */ mat = matrix_calloc(1); if (__is_dense(j)) { err = gooseberry_read_dense_matrix(j,matfile,&(mat->nrows), &(mat->ncols),&(mat->rowval)); if (mat->ncols == 1 || mat->nrows == 1) { mat->type = MATRIX_TYPE_DENSE_VECTOR; } else { mat->type = MATRIX_TYPE_DENSE_MATRIX; } } else { err = gooseberry_read_sparse_matrix(j,matfile,&(mat->nrows), &(mat->ncols),&(mat->rowptr),&(mat->rowind),&(mat->rowval)); if (mat->ncols == 1 || mat->nrows == 1) { mat->type = MATRIX_TYPE_SPARSE_VECTOR; } else { mat->type = MATRIX_TYPE_SPARSE_MATRIX; } } if (err != GOOSEBERRY_SUCCESS) { goto END; } } j = __get_file_type(vecfile); if (j < 0) { eprintf("Unknown file format of '%s'\n",vecfile); err = GOOSEBERRY_ERROR_INVALIDINPUT; goto END; } else { /* read in the vector */ vec = matrix_calloc(1); if (__is_dense(j)) { err = gooseberry_read_dense_matrix(j,vecfile,&(vec->nrows), &(vec->ncols),&(vec->rowval)); if (vec->ncols == 1 || vec->nrows == 1) { vec->type = MATRIX_TYPE_DENSE_VECTOR; } else { vec->type = MATRIX_TYPE_DENSE_MATRIX; } } else { err = gooseberry_read_sparse_matrix(j,vecfile,&(vec->nrows), &(vec->ncols),&(vec->rowptr),&(vec->rowind),&(vec->rowval)); if (vec->ncols == 1 || vec->nrows == 1) { vec->type = MATRIX_TYPE_SPARSE_VECTOR; } else { vec->type = MATRIX_TYPE_SPARSE_MATRIX; } } if (err != GOOSEBERRY_SUCCESS) { goto END; } } printf("matrix = "PF_DIM_T"x"PF_DIM_T" vector = "PF_DIM_T"x"PF_DIM_T"\n", mat->nrows,mat->ncols,vec->nrows,vec->ncols); /* read in permutation files if provided */ if (rpf) { prows = mat->nrows; err = gooseberry_read_labels(rpf,&prows,&rpk); if (err != GOOSEBERRY_SUCCESS) { goto END; } } if (cpf) { prows = mat->ncols; err = gooseberry_read_labels(cpf,&prows,&cpk); if (err != GOOSEBERRY_SUCCESS) { goto END; } } if (times) { dl_stop_timer(&io_tmr); } /* permute the input matrices */ if (rpk) { rperm = dim_alloc(mat->nrows); order = dim_alloc(mat->nrows); dim_incset(order,0,1,mat->nrows); dd_countingsort_kv(rpk,order,0,mat->nrows,mat->nrows,rperm, &mat->rdist); dl_free(order); dl_free(rpk); rpk = NULL; } if (cpk) { cperm = dim_alloc(mat->ncols); order = dim_alloc(mat->ncols); dim_incset(order,0,1,mat->ncols); dd_countingsort_kv(cpk,order,0,mat->ncols,mat->ncols,cperm,NULL); dl_free(order); dl_free(cpk); cpk = NULL; } matrix_permute(mat,rperm,cperm); matrix_permute(vec,rperm,NULL); /* allocate the output matrix */ outrows = mat->ncols; outcols = 1; out = matrix_alloc(1); j = __get_file_type(outfile); if (__is_dense(j)) { if (outrows == 1|| outcols == 1) { matrix_init(MATRIX_TYPE_DENSE_VECTOR,outrows,outcols,0,out); } else { matrix_init(MATRIX_TYPE_DENSE_MATRIX,outrows,outcols,0,out); } } else { if (outrows == 1|| outcols == 1) { matrix_init(MATRIX_TYPE_SPARSE_VECTOR,outrows,outcols,NULL_IND,out); } else { matrix_init(MATRIX_TYPE_SPARSE_MATRIX,outrows,outcols,NULL_IND,out); } } if (times) { dl_start_timer(&op_tmr); } for (r=0;r<runs;++r) { /* perform operation */ if ((err = cgd(mat,vec,out,error,niter)) != GOOSEBERRY_SUCCESS) { goto END; } } if (times) { dl_stop_timer(&op_tmr); } if (cperm) { matrix_unpermute(out,cperm,NULL); } if (times) { dl_start_timer(&io_tmr); } /* save the output */ err = gooseberry_write_dense_matrix(GOOSEBERRY_FORMAT_GRID,outfile, out->nrows,out->ncols,out->rowval); if (err != GOOSEBERRY_SUCCESS) { goto END; } if (times) { dl_stop_timer(&io_tmr); dl_print_header("Times",'#'); printf(" I/O: %0.04lf\n",dl_poll_timer(&io_tmr)); printf(" Compute: %0.04lf\n",dl_poll_timer(&op_tmr)); dl_print_footer('#'); } END: if (mat) { matrix_free(mat); } if (vec) { matrix_free(vec); } if (out) { matrix_free(out); } if (rperm) { dl_free(rperm); } if (rpk) { dl_free(rpk); } if (cperm) { dl_free(cperm); } if (cpk) { dl_free(cpk); } if (args) { dl_free(args); } return err; } static int __sgd( int argc, char ** argv) { int err = GOOSEBERRY_SUCCESS; #ifdef XXX int times, j, err; dl_timer_t io_tmr, op_tmr; real_t error = 0; size_t nargs, runs, r, i, niter = 0; dim_t outrows, outcols, prows; cmd_arg_t * args = NULL; dim_t * rpk = NULL, * cpk = NULL, *rperm = NULL, *cperm = NULL, *order; matrix_t * mat = NULL, * vec = NULL, * out = NULL; const char * matfile = NULL, * ufile = NULL, * vfile, * rpf = NULL, * cpf = NULL; /* set defaults */ times = 0; runs = 1; err = cmd_parse_args(argc-2,argv+2,SGD_OPTS,NSGD_OPTS,&args,&nargs); if (err != DL_CMDLINE_SUCCESS) { return GOOSEBERRY_ERROR_INVALIDINPUT; } if (nargs < 2) { __command_usage(argv[0],argv[1],SGD_OPTS,NSGD_OPTS,stderr); goto END; } for (i=0;i<nargs;++i) { switch (args[i].id) { case SGD_OPTION_HELP: __command_usage(argv[0],argv[1],SGD_OPTS,NSGD_OPTS,stdout); goto END; break; case SGD_OPTION_ERROR: error = (real_t)args[i].val.f; break; case SGD_OPTION_NITER: niter = (size_t)args[i].val.i; break; case SGD_OPTION_INFILE: if (matfile == NULL) { matfile = args[i].val.s; } else if (vecfile == NULL) { vecfile = args[i].val.s; } else { eprintf("Too many input files specified\n"); err = GOOSEBERRY_ERROR_INVALIDINPUT; } break; case SGD_OPTION_OUTFILE: outfile = args[i].val.s; break; case SGD_OPTION_TIME: times = 1; break; case SGD_OPTION_RUNS: runs = (size_t)args[i].val.i; break; #ifndef NO_OMP case SGD_OPTION_THREADS: omp_set_num_threads(args[i].val.i); break; #endif case SGD_OPTION_ROWPERM: rpf = args[i].val.s; break; case SGD_OPTION_COLPERM: cpf = args[i].val.s; break; default: eprintf("Unknown argument '%s'\n",args[i].val.s); err = GOOSEBERRY_ERROR_INVALIDINPUT; break; } } if (matfile == NULL || vecfile == NULL) { eprintf("You must specify both a matrix input file and a vector input " "file (in that order).\n"); err = GOOSEBERRY_ERROR_INVALIDINPUT; } if (err != GOOSEBERRY_SUCCESS) { goto END; } if (times) { dl_init_timer(&io_tmr); dl_init_timer(&op_tmr); dl_start_timer(&io_tmr); } /* read in input files */ j = __get_file_type(matfile); if (j < 0) { eprintf("Unknown file format of '%s'\n",matfile); err = GOOSEBERRY_ERROR_INVALIDINPUT; goto END; } else { /* read in the matrix */ mat = matrix_calloc(1); if (__is_dense(j)) { err = gooseberry_read_dense_matrix(j,matfile,&(mat->nrows), &(mat->ncols),&(mat->rowval)); if (mat->ncols == 1 || mat->nrows == 1) { mat->type = MATRIX_TYPE_DENSE_VECTOR; } else { mat->type = MATRIX_TYPE_DENSE_MATRIX; } } else { err = gooseberry_read_sparse_matrix(j,matfile,&(mat->nrows), &(mat->ncols),&(mat->rowptr),&(mat->rowind),&(mat->rowval)); if (mat->ncols == 1 || mat->nrows == 1) { mat->type = MATRIX_TYPE_SPARSE_VECTOR; } else { mat->type = MATRIX_TYPE_SPARSE_MATRIX; } } if (err != GOOSEBERRY_SUCCESS) { goto END; } } j = __get_file_type(vecfile); if (j < 0) { eprintf("Unknown file format of '%s'\n",vecfile); err = GOOSEBERRY_ERROR_INVALIDINPUT; goto END; } else { /* read in the vector */ vec = matrix_calloc(1); if (__is_dense(j)) { err = gooseberry_read_dense_matrix(j,vecfile,&(vec->nrows), &(vec->ncols),&(vec->rowval)); if (vec->ncols == 1 || vec->nrows == 1) { vec->type = MATRIX_TYPE_DENSE_VECTOR; } else { vec->type = MATRIX_TYPE_DENSE_MATRIX; } } else { err = gooseberry_read_sparse_matrix(j,vecfile,&(vec->nrows), &(vec->ncols),&(vec->rowptr),&(vec->rowind),&(vec->rowval)); if (vec->ncols == 1 || vec->nrows == 1) { vec->type = MATRIX_TYPE_SPARSE_VECTOR; } else { vec->type = MATRIX_TYPE_SPARSE_MATRIX; } } if (err != GOOSEBERRY_SUCCESS) { goto END; } } printf("matrix = "PF_DIM_T"x"PF_DIM_T" vector = "PF_DIM_T"x"PF_DIM_T"\n", mat->nrows,mat->ncols,vec->nrows,vec->ncols); /* read in permutation files if provided */ if (rpf) { prows = mat->nrows; err = gooseberry_read_labels(rpf,&prows,&rpk); if (err != GOOSEBERRY_SUCCESS) { goto END; } } if (cpf) { prows = mat->ncols; err = gooseberry_read_labels(cpf,&prows,&cpk); if (err != GOOSEBERRY_SUCCESS) { goto END; } } if (times) { dl_stop_timer(&io_tmr); } /* permute the input matrices */ if (rpk) { rperm = dim_alloc(mat->nrows); order = dim_alloc(mat->nrows); dim_incset(order,0,1,mat->nrows); dim_countingsort_v(rpk,order,rperm,0,mat->nrows,mat->nrows); dl_free(order); dl_free(rpk); rpk = NULL; } if (cpk) { cperm = dim_alloc(mat->ncols); order = dim_alloc(mat->ncols); dim_incset(order,0,1,mat->ncols); dim_countingsort_v(cpk,order,cperm,0,mat->ncols,mat->ncols); dl_free(order); dl_free(cpk); cpk = NULL; } matrix_permute(mat,rperm,cperm); matrix_permute(vec,rperm,NULL); /* allocate the output matrix */ outrows = mat->ncols; outcols = 1; out = matrix_alloc(1); j = __get_file_type(outfile); if (__is_dense(j)) { if (outrows == 1|| outcols == 1) { matrix_init(MATRIX_TYPE_DENSE_VECTOR,outrows,outcols,0,out); } else { matrix_init(MATRIX_TYPE_DENSE_MATRIX,outrows,outcols,0,out); } } else { if (outrows == 1|| outcols == 1) { matrix_init(MATRIX_TYPE_SPARSE_VECTOR,outrows,outcols,NULL_IND,out); } else { matrix_init(MATRIX_TYPE_SPARSE_MATRIX,outrows,outcols,NULL_IND,out); } } if (times) { dl_start_timer(&op_tmr); } for (r=0;r<runs;++r) { /* perform operation */ if ((err = cgd(mat,vec,out,error,niter)) != GOOSEBERRY_SUCCESS) { goto END; } } if (times) { dl_stop_timer(&op_tmr); } if (cperm) { matrix_unpermute(out,cperm,NULL); } if (times) { dl_start_timer(&io_tmr); } /* save the output */ err = gooseberry_write_dense_matrix(GOOSEBERRY_FORMAT_GRID,outfile, out->nrows,out->ncols,out->rowval); if (err != GOOSEBERRY_SUCCESS) { goto END; } if (times) { dl_stop_timer(&io_tmr); dl_print_header("Times",'#'); printf(" I/O: %0.04lf\n",dl_poll_timer(&io_tmr)); printf(" Compute: %0.04lf\n",dl_poll_timer(&op_tmr)); dl_print_footer('#'); } END: if (mat) { matrix_free(mat); } if (vec) { matrix_free(vec); } if (out) { matrix_free(out); } if (rperm) { dl_free(rperm); } if (rpk) { dl_free(rpk); } if (cperm) { dl_free(cperm); } if (cpk) { dl_free(cpk); } if (args) { dl_free(args); } #endif return err; } static int __pagerank( int argc, char ** argv) { dl_timer_t io_tmr, op_tmr, pre_tmr, mul_tmr; size_t nargs,i,iter,runs,r; int times, j, err; ind_t l; dim_t prows, k, nsinks, m; real_t minerror, error, damping, diff, dist, deg, wgt; cmd_arg_t * args = NULL; real_t * rank = NULL, * indeg = NULL; dim_t * pk = NULL, * perm = NULL, * order = NULL, * sinks = NULL; matrix_t * mat = NULL, * out = NULL; const char * matfile = NULL, * outfile = NULL, * pf = NULL; /* set defaults */ times = 0; runs = 1; minerror = 0.0; iter = 0; damping = 0.85; err = cmd_parse_args(argc-2,argv+2,PAGERANK_OPTS,NPAGERANK_OPTS,&args, &nargs); if (err != DL_CMDLINE_SUCCESS) { return GOOSEBERRY_ERROR_INVALIDINPUT; } err = GOOSEBERRY_SUCCESS; if (nargs < 2) { __command_usage(argv[0],argv[1],PAGERANK_OPTS,NPAGERANK_OPTS,stderr); goto END; } for (i=0;i<nargs;++i) { switch (args[i].id) { case PAGERANK_OPTION_HELP: __command_usage(argv[0],argv[1],PAGERANK_OPTS,NPAGERANK_OPTS,stdout); goto END; break; case PAGERANK_OPTION_INFILE: if (matfile == NULL) { matfile = args[i].val.s; } else { eprintf("Too many input files specified\n"); err = GOOSEBERRY_ERROR_INVALIDINPUT; } break; case PAGERANK_OPTION_OUTFILE: outfile = args[i].val.s; break; case PAGERANK_OPTION_TIME: times = 1; break; case PAGERANK_OPTION_PERM: pf = args[i].val.s; break; case PAGERANK_OPTION_RUNS: runs = (size_t)args[i].val.i; break; case PAGERANK_OPTION_NITER: iter = (size_t)args[i].val.i; break; case PAGERANK_OPTION_DAMPING: damping = (real_t)args[i].val.f; break; case PAGERANK_OPTION_ERROR: minerror = (real_t)args[i].val.f; break; #ifndef NO_OMP case PAGERANK_OPTION_THREADS: omp_set_num_threads(args[i].val.i); break; #endif default: eprintf("Unknown argument '%s'\n",args[i].val.s); err = GOOSEBERRY_ERROR_INVALIDINPUT; break; } } if (matfile == NULL) { eprintf("You must specify a matrix input file.\n"); err = GOOSEBERRY_ERROR_INVALIDINPUT; } if (outfile == NULL) { eprintf("You must specify an output vector file.\n"); err = GOOSEBERRY_ERROR_INVALIDINPUT; } if (err != GOOSEBERRY_SUCCESS) { goto END; } if (times) { dl_init_timer(&io_tmr); dl_init_timer(&op_tmr); dl_init_timer(&pre_tmr); dl_init_timer(&mul_tmr); dl_start_timer(&io_tmr); } /* read in input files */ j = __get_file_type(matfile); if (j < 0) { eprintf("Unknown file format of '%s'\n",matfile); err = GOOSEBERRY_ERROR_INVALIDINPUT; goto END; } else { /* read in the matrix */ mat = matrix_calloc(1); if (__is_dense(j)) { err = gooseberry_read_dense_matrix(j,matfile,&(mat->nrows), &(mat->ncols),&(mat->rowval)); if (mat->ncols == 1 || mat->nrows == 1) { mat->type = MATRIX_TYPE_DENSE_VECTOR; } else { mat->type = MATRIX_TYPE_DENSE_MATRIX; } } else { err = gooseberry_read_sparse_matrix(j,matfile,&(mat->nrows), &(mat->ncols),&(mat->rowptr),&(mat->rowind),&(mat->rowval)); if (mat->ncols == 1 || mat->nrows == 1) { mat->type = MATRIX_TYPE_SPARSE_VECTOR; } else { mat->type = MATRIX_TYPE_SPARSE_MATRIX; } } if (mat->nrows != mat->ncols) { if (mat->type == MATRIX_TYPE_DENSE_MATRIX) { eprintf("PageRank requires a square matrix, but input matrix '%s' is " PF_DIM_T"x"PF_DIM_T".\n",matfile,mat->nrows,mat->ncols); err = GOOSEBERRY_ERROR_INVALIDINPUT; } else { if (mat->nrows > mat->ncols) { mat->ncols = mat->nrows; } else if (mat->ncols > mat->nrows) { /* stretch the matrix */ mat->rowptr = ind_realloc(mat->rowptr,mat->ncols+1); ind_set(mat->rowptr+mat->nrows+1,mat->rowptr[mat->nrows], mat->ncols-mat->nrows); mat->nrows = mat->ncols; } } } if (err != GOOSEBERRY_SUCCESS) { goto END; } } /* read in permutation file if provided */ if (pf) { prows = mat->nrows; err = gooseberry_read_labels(pf,&prows,&pk); if (err != GOOSEBERRY_SUCCESS) { goto END; } } if (times) { dl_stop_timer(&io_tmr); dl_start_timer(&pre_tmr); } /* permute the input matrix */ if (pk) { perm = dim_alloc(mat->nrows); order = dim_alloc(mat->nrows); dim_incset(order,0,1,mat->nrows); dd_countingsort_kv(pk,order,0,mat->nrows,mat->nrows,perm, &mat->rdist); dl_free(order); dl_free(pk); pk = NULL; matrix_permute(mat,perm,perm); } /* normalize input matrix and find sinks */ sinks = dim_alloc(mat->nrows); nsinks = 0; indeg = real_calloc(mat->ncols); switch (mat->type) { case MATRIX_TYPE_SPARSE_MATRIX: for (k=0;k<mat->nrows;++k) { if (mat->rowptr[k] == mat->rowptr[k+1]) { sinks[nsinks++] = k; } else { for (l=mat->rowptr[k];l<mat->rowptr[k+1];++l) { indeg[mat->rowind[l]] += mat->rowval[l]; } } } for (k=0;k<mat->nrows;++k) { for (l=mat->rowptr[k];l<mat->rowptr[k+1];++l) { mat->rowval[l] /= indeg[mat->rowind[l]]; } } break; case MATRIX_TYPE_DENSE_MATRIX: for (k=0;k<mat->nrows;++k) { deg = 0; for (m=0;m<mat->ncols;++m) { wgt = mat->rowval[(k*mat->ncols)+m]; if (wgt != 0) { deg += wgt; indeg[m] += wgt; } } if (deg == 0) { sinks[nsinks++] = k; } } for (k=0;k<mat->nrows;++k) { for (m=0;m<mat->ncols;++m) { mat->rowval[(k*mat->ncols)+m] /= indeg[m]; } } break; default: eprintf("Unknown matrix type: %d\n",mat->type); err = GOOSEBERRY_ERROR_INVALIDINPUT; goto END; } dl_free(indeg); indeg = NULL; /* allocate output matrix */ out = matrix_calloc(1); matrix_init(MATRIX_TYPE_DENSE_VECTOR,mat->ncols,1,0,out); if (times) { dl_stop_timer(&pre_tmr); dl_start_timer(&op_tmr); } /* peform pagerank */ rank = real_alloc(mat->ncols); for (r=0;r<runs;++r) { real_set(rank,1.0/out->nrows,out->nrows); for (i=0;iter==0||i<iter;++i) { if (times) { dl_start_timer(&mul_tmr); } gooseberry_spmult(mat->nrows,mat->ncols,1,mat->rowptr,mat->rowind, mat->rowval,rank,out->rowval,NULL,0); if (times) { dl_stop_timer(&mul_tmr); } gooseberry_scale(out->nrows,out->rowval,damping); /* redistrubite sunk ranks */ dist = 0; for (k=0;k<nsinks;++k) { dist += rank[sinks[k]]; } gooseberry_add_scalar(out->nrows,out->rowval, ((1.0-damping)/out->nrows) + dist); /* only check RMSE every 10 iterations */ if (i%10 == 0) { #ifndef NO_OMP #pragma omp parallel default(none) shared(out,rank,error) private(diff) { error = 0.0; #pragma omp for schedule(static,OMP_BIG_BLOCK) \ reduction(+:error) for (k=0;k<out->nrows;++k) { diff = rank[k] - out->rowval[k]; error += diff*diff; } } #else error = 0.0; for (k=0;k<out->nrows;++k) { diff = rank[k] - out->rowval[k]; error += diff*diff; } #endif if (error <= minerror*minerror) { ++i; /* skip recalculating RMSE */ goto FINISH; } } dl_swap(rank,out->rowval); } /* check error when finished */ #ifndef NO_OMP #pragma omp parallel default(none) shared(out,rank,error) private(diff) { error = 0.0; #pragma omp for schedule(static,OMP_BIG_BLOCK) \ reduction(+:error) for (k=0;k<out->nrows;++k) { diff = rank[k] - out->rowval[k]; error += diff*diff; } } #else error = 0.0; for (k=0;k<out->nrows;++k) { diff = rank[k] - out->rowval[k]; error += diff*diff; } #endif FINISH: error = sqrt(error); } printf("PageRank finished in %zu iterations with an RMSE of "PF_REAL_T"\n", i,error); if (times) { dl_stop_timer(&op_tmr); } if (perm) { matrix_unpermute(out,perm,NULL); } if (times) { dl_start_timer(&io_tmr); } /* save the output */ j = __get_file_type(outfile); if (__is_dense(j)) { if (out->type != MATRIX_TYPE_DENSE_VECTOR && out->type != MATRIX_TYPE_DENSE_MATRIX) { matrix_densify(out); } err = gooseberry_write_dense_matrix(j,outfile,out->nrows,out->ncols, out->rowval); } else { if (out->type != MATRIX_TYPE_SPARSE_VECTOR && out->type != MATRIX_TYPE_SPARSE_MATRIX) { matrix_sparsify(out); } err = gooseberry_write_sparse_matrix(j,outfile,out->nrows,out->ncols, out->rowptr,out->rowind,out->rowval); } if (err != GOOSEBERRY_SUCCESS) { goto END; } if (times) { dl_stop_timer(&io_tmr); dl_print_header("Times",'#'); printf(" I/O: %0.04lf\n",dl_poll_timer(&io_tmr)); printf(" Preprocessing: %0.04lf\n",dl_poll_timer(&pre_tmr)); printf(" Compute: %0.04lf\n",dl_poll_timer(&op_tmr)); printf(" SpMV: %0.04lf\n",dl_poll_timer(&mul_tmr)); dl_print_footer('#'); } END: if (indeg) { dl_free(indeg); } if (sinks) { dl_free(sinks); } if (rank) { dl_free(rank); } if (mat) { matrix_free(mat); } if (perm) { dl_free(perm); } if (pk) { dl_free(pk); } if (out) { matrix_free(out); } if (args) { dl_free(args); } return err; } typedef int (*__cmdfuncptr_t)(int,char**); static const __cmdfuncptr_t COMMAND_FUNCS[] = { [COMMAND_HELP] = __help, [COMMAND_ANALYSIS] = __analyze, [COMMAND_PERMUTE] = __permute, [COMMAND_TRANSFORM] = __transform, [COMMAND_GENERATE] = __generate, [COMMAND_BLAS] = __blas, [COMMAND_CGD] = __cgd, [COMMAND_SGD] = __sgd, [COMMAND_PAGERANK] = __pagerank }; /* don't get burned */ DL_STATIC_ASSERT(ARRAY_SIZE(COMMANDS) == ARRAY_SIZE(COMMAND_FUNCS)); /****************************************************************************** * MAIN ************************************************************************ ******************************************************************************/ int main( int argc, char ** argv) { int err; char * cmdstr; size_t i; dl_init_rand(); if (argc < 2) { eprintf("Must supply a command.\n"); __usage(argv[0],stderr); return 1; } cmdstr = argv[1]; for (i=0;i<NCOMMANDS;++i) { if (COMMANDS[i].str != NULL && strcmp(cmdstr,COMMANDS[i].str) == 0) { err = COMMAND_FUNCS[i](argc,argv); break; } } if (i == NCOMMANDS) { eprintf("Unrecognized command '%s'.\n",cmdstr); __usage(argv[0],stderr); return 1; } if (err == GOOSEBERRY_SUCCESS) { return 0; } else { eprintf("Operation failed.\n"); return 2; } } #endif
GB_unaryop__ainv_uint8_int64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_uint8_int64 // op(A') function: GB_tran__ainv_uint8_int64 // C type: uint8_t // A type: int64_t // cast: uint8_t cij = (uint8_t) aij // unaryop: cij = -aij #define GB_ATYPE \ int64_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, x) \ uint8_t z = (uint8_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_UINT8 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_uint8_int64 ( uint8_t *restrict Cx, const int64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_uint8_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__land_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__land_uint8 // A.*B function (eWiseMult): GB_AemultB__land_uint8 // A*D function (colscale): GB_AxD__land_uint8 // D*A function (rowscale): GB_DxB__land_uint8 // C+=B function (dense accum): GB_Cdense_accumB__land_uint8 // C+=b function (dense accum): GB_Cdense_accumb__land_uint8 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__land_uint8 // C=scalar+B GB_bind1st__land_uint8 // C=scalar+B' GB_bind1st_tran__land_uint8 // C=A+scalar GB_bind2nd__land_uint8 // C=A'+scalar GB_bind2nd_tran__land_uint8 // C type: uint8_t // A type: uint8_t // B,b type: uint8_t // BinaryOp: cij = ((aij != 0) && (bij != 0)) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = ((x != 0) && (y != 0)) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LAND || GxB_NO_UINT8 || GxB_NO_LAND_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__land_uint8 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__land_uint8 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__land_uint8 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__land_uint8 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__land_uint8 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__land_uint8 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__land_uint8 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__land_uint8 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint8_t bij = Bx [p] ; Cx [p] = ((x != 0) && (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__land_uint8 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint8_t aij = Ax [p] ; Cx [p] = ((aij != 0) && (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = ((x != 0) && (aij != 0)) ; \ } GrB_Info GB_bind1st_tran__land_uint8 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = ((aij != 0) && (y != 0)) ; \ } GrB_Info GB_bind2nd_tran__land_uint8 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__first_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__first_int8) // A.*B function (eWiseMult): GB (_AemultB_08__first_int8) // A.*B function (eWiseMult): GB (_AemultB_02__first_int8) // A.*B function (eWiseMult): GB (_AemultB_04__first_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__first_int8) // A*D function (colscale): GB (_AxD__first_int8) // D*A function (rowscale): GB (_DxB__first_int8) // C+=B function (dense accum): GB (_Cdense_accumB__first_int8) // C+=b function (dense accum): GB (_Cdense_accumb__first_int8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__first_int8) // C=scalar+B GB ((none)) // C=scalar+B' GB ((none)) // C=A+scalar GB ((none)) // C=A'+scalar GB ((none)) // C type: int8_t // A type: int8_t // A pattern? 0 // B type: int8_t // B pattern? 1 // BinaryOp: cij = aij #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ ; // true if values of B are not used #define GB_B_IS_PATTERN \ 1 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = x ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_FIRST || GxB_NO_INT8 || GxB_NO_FIRST_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__first_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__first_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__first_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__first_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__first_int8) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__first_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int8_t alpha_scalar ; int8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int8_t *) alpha_scalar_in)) ; beta_scalar = (*((int8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__first_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__first_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__first_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__first_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; ; ; Cx [p] = x ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = GBX (Ax, p, false) ; Cx [p] = aij ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = x ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = aij ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
DRB033-truedeplinear-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* A linear expression is used as array subscription. Data race pair: a[2*i+1]@64:5 vs. a[i]@64:14 */ #include "omprace.h" #include <omp.h> #include <stdlib.h> #include <stdio.h> int main(int argc, char* argv[]) { omprace_init(); int i; int a[2000]; for (i=0; i<2000; i++) a[i]=i; #pragma omp parallel for for (i=0;i<1000;i++) a[2*i+1]=a[i]+1; printf("a[1001]=%d\n", a[1001]); omprace_fini(); return 0; }
prefilter.h
#pragma once #if !defined(__PREFILTER_H) #define __PREFILTER_H #include <mitsuba/core/plugin.h> #include <mitsuba/core/bitmap.h> #include <mitsuba/core/fstream.h> #include <mitsuba/render/util.h> #include <mitsuba/render/bsdf.h> #include <mitsuba/render/sampler.h> #include <boost/lexical_cast.hpp> #include <Eigen/Dense> #include <vector> #include <string> /** * NOTE ABOUT CUBEMAP COORDINATE SYSTEMS: * * This code uses a right-handed system everywhere (X right, Y up, -Z forward). * * There are some serious sign issues with cube maps in general. Within this code * base, the cube assumes a right-handed system where each cube face is, starting * from looking down -Z with +X right and +Y up: * +X: turn right 90 degrees * -X: turn left 90 degrees * +Y: turn up 90 degrees * -Y: turn down 90 degrees * +Z: turn around 180 degrees about Y * -Z: no change * * HOWEVER, OpenGL seems to have a left-handed system for cube maps with +Z forward * and with Y inverted (pointing down). Further, libraries that wrap OpenGL * (e.g. three.js) seem to flip things around even more, sometimes inconsistently. * Therefore, I am going to work within the above right-handed system for this code * base, and worry about translating to a particular rendering library separately. */ MTS_NAMESPACE_BEGIN typedef Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::ColMajor> matX; typedef Eigen::Matrix<int, Eigen::Dynamic, Eigen::Dynamic, Eigen::ColMajor> matXi; typedef Eigen::Matrix<float, Eigen::Dynamic, 1, Eigen::ColMajor> vecX; typedef Eigen::Matrix<float, 3, 3, Eigen::ColMajor> mat3; typedef Eigen::Matrix<float, 4, 4, Eigen::ColMajor> mat4; typedef Eigen::Matrix<Spectrum, 4, 4, Eigen::ColMajor> mat4s; typedef Eigen::Matrix<float, 2, 1, Eigen::ColMajor> vec2; typedef Eigen::Matrix<float, 3, 1, Eigen::ColMajor> vec3; typedef Eigen::Array<float, 9, 1, Eigen::ColMajor> arr9; typedef Eigen::Array<Spectrum, 9, 1, Eigen::ColMajor> arr9s; typedef Eigen::Array<float, 3, 1, Eigen::ColMajor> arr3; typedef Eigen::AlignedBox<int, 2> ibbox2; typedef Eigen::Matrix<int, 2, 1> ivec2; using std::vector; using std::string; using std::abs; enum ECubeSide { POSX, NEGX, POSY, NEGY, POSZ, NEGZ }; /** * Returns true if two vectors are approximately equal */ template <typename A, typename B> inline bool approx_equals(A a , B b, float epsilon = 1e-5) { if (a.size() != b.size()) return false; for (int i = 0; i < a.size(); ++i) { if (abs(a.coeff(i) - b.coeff(i)) > epsilon) { return false; } } return true; } /** * Compute the homography between mappings of points a --> b, returning M such that * <constant> * [ b' 1 ]' = M * [ a' 1 ]' */ mat3 compute_homography(const vec2 *a, const vec2 *b, int n = 4); bool test_compute_homography(); /** * Applies a homography and projects back to cartesian coordinates */ inline vec2 apply_homography(const mat3& M, const vec2& a) { vec3 b = M * vec3(a[0], a[1], 1.0); return vec2(b[0] / b[2], b[1] / b[2]); } /** * Wrapper around envmap lookup */ template <typename V3> inline Spectrum lookup_envmap(const Emitter* env, const V3& v) { return env->evalEnvironment(RayDifferential( Point3(0,0,0), Vector3(v[0], v[1], v[2]), 0.0)); } /** * Converts a single image into an entire cube map as follows: * * /-------------------\ ^ * | \ +Y / | | margin * height * | --------------- | v * | | | | * |-X| -Z |+X| * | | | | * | --------------- | ^ * | / -Y \ | | margin * height * \-------------------/ v * <--> margin * width * <--> margin * width */ class HomographyEnvmap : public Emitter { public: HomographyEnvmap(ref<Bitmap> img_, float margin_); HomographyEnvmap(Stream *stream, InstanceManager *manager) : Emitter(stream, manager) { Log(EError, "TODO"); } inline Spectrum evalEnvironment(const RayDifferential& ray) const { const Vector3& v = ray.d; int idx; arr3(v[0], v[1], v[2]).abs().maxCoeff(&idx); const float x = v[0], y = v[1], z = v[2]; vec2 uv; switch (idx) { case 0: if (x > 0) { // +X // map right half of cubeface to left half of image uv = apply_homography(M[0], vec2( 0.5 * (-abs(z) / x) + 0.5, 0.5 * (y / x) + 0.5) ); } else { // -X // map left half of cubeface to right half of image uv = apply_homography(M[1], vec2( 0.5 * (abs(z) / (-x)) + 0.5, 0.5 * (y / (-x)) + 0.5) ); } break; case 1: if (y > 0) { // +Y uv = apply_homography(M[2], vec2( // map to bottom half 0.5 * (x / y) + 0.5, 0.5 * (-abs(z) / y) + 0.5) ); } else { // -Y uv = apply_homography(M[3], vec2( // map to top half 0.5 * (x / (-y)) + 0.5, 0.5 * (abs(z) / (-y)) + 0.5) ); } break; case 2: // +/- Z uv = apply_homography(M[4], vec2( // map to entire square 0.5 * (x / abs(z)) + 0.5, 0.5 * (y / abs(z)) + 0.5) ); break; } // fetch pixel, clamping to edge return img->getPixel(Point2i( clamp<int>(uv[0] * img->getWidth(), 0, img->getWidth() - 1), clamp<int>(uv[1] * img->getHeight(), 0, img->getHeight() - 1))); } AABB getAABB() const { return AABB(Point(0,0,0)); } private: mat3 M[5]; // +X, -X, +Y, -Y, +/- Z ref<Bitmap> img; }; /** * Functional test for HomographyEnvmap -- outputs images into current directory */ void test_HomographyEnvmap(); /** * Computes the cubemap, assuming OpenGL ordering * (+X, -X, +Y, -Y, +Z, -Z, each side row-major with 0th row at bottom) * * */ Bitmap* compute_prefiltered_envmap(const Emitter* envmap, const BSDF* bsdf, Sampler* sampler, int resolution, int samples); /** * Projects a 3D cartesian vector into the first 9 SH coefficients */ template <typename V3> inline arr9 cartesian_to_sh9(const V3 &x) { arr9 sh; sh[0] = 0.2820947917738781434740397257804; sh[1] = 0.4886025119029199215863846228384 * x[1]; sh[2] = 0.4886025119029199215863846228384 * x[2]; sh[3] = 0.4886025119029199215863846228384 * x[0]; sh[4] = 1.0925484305920790705433857058027 * x[1] * x[0]; sh[5] = 1.0925484305920790705433857058027 * x[1] * x[2]; sh[6] = 1.0925484305920790705433857058027 * x[2] * x[0]; sh[7] = 0.3153915652525200060308936902957 * (3 * x[2] * x[2] - 1); sh[8] = 0.5462742152960395352716928529014 * (x[0] * x[1] - x[1] * x[1]); return sh; } /** * Computes the first 9 spherical harmonics for an environment map */ inline arr9s compute_sh9(const HomographyEnvmap* envmap, ref<Sampler> sampler_parent, int nsamples) { arr9s ret = arr9s::Constant(Spectrum(0.0)); #pragma omp parallel for schedule(dynamic, 1) for (int k = 0; k < 128; ++k) { arr9s sum = arr9s::Constant(Spectrum(0.0)); // each thread has its own sampler ref<Sampler> sampler = sampler_parent->clone(); sampler->generate(Point2i(k, 0)); for (int i = 0; i < nsamples; ++i) { const Vector3 v = Warp::squareToUniformSphere(sampler->next2D()); const arr9 sh9 = cartesian_to_sh9(v); const Spectrum L = lookup_envmap(envmap, v); for (int j = 0; j < 9; ++j) ret[j] += sh9[j] * L; sampler->advance(); } #pragma omp critical { sum += ret; } } // sampling by solid angle: p(direction) = 1 / 4pi float scale = 4.0 * M_PI / nsamples; for (int j = 0; j < 9; ++j) ret[j] *= scale; return ret; } /** * Computes the 4x4 matrix used for diffuse lighting * Uses method from [ Ravi Ramamoorthi and Pat Hanrahan, "An Efficient * Representation for Irradiance Environment Maps", SIGGRAPH 2001. * http://graphics.stanford.edu/papers/envmap/envmap.pdf ] */ inline mat4s compute_sh9_diffuse_matrix(const HomographyEnvmap* envmap, ref<Sampler> sampler, int nsamples) { mat4s ret; arr9s sh9 = compute_sh9(envmap, sampler, nsamples); const float c1 = 0.429043, c2 = 0.511664, c3 = 0.743125, c4 = 0.886227, c5 = 0.247708; ret << c1 * sh9[8], c1 * sh9[4], c1 * sh9[7], c2 * sh9[3], c1 * sh9[4], -c1 * sh9[8], c1 * sh9[5], c2 * sh9[1], c1 * sh9[7], c1 * sh9[5], c3 * sh9[6], c2 * sh9[2], c2 * sh9[3], c2 * sh9[1], c2 * sh9[2], c4 * sh9[0] - c5 * sh9[6]; return ret; } inline std::string mat4s_to_glsl_string(const mat4s& m) { std::stringstream ss; ss.precision(8); const char names[3] = {'r', 'g', 'b'}; for (int c = 0; c < 3; ++c) { ss << "mat4 M" << names[c] << "=mat4("; for (int i = 0; i < 4; ++i) { ss << ((i > 0) ? ",vec4(" : "vec4("); for (int j = 0; j < 4; ++j) { if (j > 0) ss << ","; float rgb[3]; // NOTE THE TRANSPOSE -- each vec4 is a column m(j, i).toLinearRGB(rgb[0], rgb[1], rgb[2]); ss << rgb[c]; } ss << ")"; } ss << ");" << endl; } return ss.str(); } class Prefilter : public Utility { public: int run(int argc, char **argv); MTS_DECLARE_UTILITY() }; MTS_NAMESPACE_END #endif
effect.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % EEEEE FFFFF FFFFF EEEEE CCCC TTTTT % % E F F E C T % % EEE FFF FFF EEE C T % % E F F E C T % % EEEEE F F EEEEE CCCC T % % % % % % MagickCore Image Effects Methods % % % % Software Design % % Cristy % % October 1996 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/accelerate-private.h" #include "MagickCore/blob.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/constitute.h" #include "MagickCore/decorate.h" #include "MagickCore/distort.h" #include "MagickCore/draw.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/effect.h" #include "MagickCore/fx.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/matrix.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/montage.h" #include "MagickCore/morphology.h" #include "MagickCore/morphology-private.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/property.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/random-private.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resize.h" #include "MagickCore/resource_.h" #include "MagickCore/segment.h" #include "MagickCore/shear.h" #include "MagickCore/signature-private.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" #include "MagickCore/transform.h" #include "MagickCore/threshold.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveBlurImage() adaptively blurs the image by blurring less % intensely near image edges and more intensely far from edges. We blur the % image with a Gaussian operator of the given radius and standard deviation % (sigma). For reasonable results, radius should be larger than sigma. Use a % radius of 0 and AdaptiveBlurImage() selects a suitable radius for you. % % The format of the AdaptiveBlurImage method is: % % Image *AdaptiveBlurImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Laplacian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveBlurImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { #define AdaptiveBlurImageTag "Convolve/Image" #define MagickSigma (fabs(sigma) < MagickEpsilon ? MagickEpsilon : sigma) CacheView *blur_view, *edge_view, *image_view; double normalize, **kernel; Image *blur_image, *edge_image, *gaussian_image; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; size_t width; ssize_t j, k, u, v, y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (fabs(sigma) < MagickEpsilon) return(blur_image); if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { blur_image=DestroyImage(blur_image); return((Image *) NULL); } /* Edge detect the image brightness channel, level, blur, and level again. */ edge_image=EdgeImage(image,radius,exception); if (edge_image == (Image *) NULL) { blur_image=DestroyImage(blur_image); return((Image *) NULL); } (void) AutoLevelImage(edge_image,exception); gaussian_image=BlurImage(edge_image,radius,sigma,exception); if (gaussian_image != (Image *) NULL) { edge_image=DestroyImage(edge_image); edge_image=gaussian_image; } (void) AutoLevelImage(edge_image,exception); /* Create a set of kernels from maximum (radius,sigma) to minimum. */ width=GetOptimalKernelWidth2D(radius,sigma); kernel=(double **) MagickAssumeAligned(AcquireAlignedMemory((size_t) width, sizeof(*kernel))); if (kernel == (double **) NULL) { edge_image=DestroyImage(edge_image); blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) memset(kernel,0,(size_t) width*sizeof(*kernel)); for (i=0; i < (ssize_t) width; i+=2) { kernel[i]=(double *) MagickAssumeAligned(AcquireAlignedMemory( (size_t) (width-i),(width-i)*sizeof(**kernel))); if (kernel[i] == (double *) NULL) break; normalize=0.0; j=(ssize_t) (width-i-1)/2; k=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel[i][k]=(double) (exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); normalize+=kernel[i][k]; k++; } } kernel[i][(k-1)/2]+=(double) (1.0-normalize); if (sigma < MagickEpsilon) kernel[i][(k-1)/2]=1.0; } if (i < (ssize_t) width) { for (i-=2; i >= 0; i-=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); edge_image=DestroyImage(edge_image); blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Adaptively blur image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); edge_view=AcquireVirtualCacheView(edge_image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,blur_image,blur_image->rows,1) #endif for (y=0; y < (ssize_t) blur_image->rows; y++) { register const Quantum *magick_restrict r; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((r == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) blur_image->columns; x++) { register const Quantum *magick_restrict p; register ssize_t i; ssize_t center, j; j=(ssize_t) ceil((double) width*(1.0-QuantumScale* GetPixelIntensity(edge_image,r))-0.5); if (j < 0) j=0; else if (j > (ssize_t) width) j=(ssize_t) width; if ((j & 0x01) != 0) j--; p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-j)/2L),y- (ssize_t) ((width-j)/2L),width-j,width-j,exception); if (p == (const Quantum *) NULL) break; center=(ssize_t) GetPixelChannels(image)*(width-j)*((width-j)/2L)+ GetPixelChannels(image)*((width-j)/2); for (i=0; i < (ssize_t) GetPixelChannels(blur_image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait blur_traits, traits; register const double *magick_restrict k; register const Quantum *magick_restrict pixels; register ssize_t u; ssize_t v; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if ((blur_traits & CopyPixelTrait) != 0) { SetPixelChannel(blur_image,channel,p[center+i],q); continue; } k=kernel[j]; pixels=p; pixel=0.0; gamma=0.0; if ((blur_traits & BlendPixelTrait) == 0) { /* No alpha blending. */ for (v=0; v < (ssize_t) (width-j); v++) { for (u=0; u < (ssize_t) (width-j); u++) { pixel+=(*k)*pixels[i]; gamma+=(*k); k++; pixels+=GetPixelChannels(image); } } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); continue; } /* Alpha blending. */ for (v=0; v < (ssize_t) (width-j); v++) { for (u=0; u < (ssize_t) (width-j); u++) { alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels)); pixel+=(*k)*alpha*pixels[i]; gamma+=(*k)*alpha; k++; pixels+=GetPixelChannels(image); } } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); } q+=GetPixelChannels(blur_image); r+=GetPixelChannels(edge_image); } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,AdaptiveBlurImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_image->type=image->type; blur_view=DestroyCacheView(blur_view); edge_view=DestroyCacheView(edge_view); image_view=DestroyCacheView(image_view); edge_image=DestroyImage(edge_image); for (i=0; i < (ssize_t) width; i+=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e S h a r p e n I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveSharpenImage() adaptively sharpens the image by sharpening more % intensely near image edges and less intensely far from edges. We sharpen the % image with a Gaussian operator of the given radius and standard deviation % (sigma). For reasonable results, radius should be larger than sigma. Use a % radius of 0 and AdaptiveSharpenImage() selects a suitable radius for you. % % The format of the AdaptiveSharpenImage method is: % % Image *AdaptiveSharpenImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Laplacian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveSharpenImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { #define AdaptiveSharpenImageTag "Convolve/Image" #define MagickSigma (fabs(sigma) < MagickEpsilon ? MagickEpsilon : sigma) CacheView *sharp_view, *edge_view, *image_view; double normalize, **kernel; Image *sharp_image, *edge_image, *gaussian_image; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; size_t width; ssize_t j, k, u, v, y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); sharp_image=CloneImage(image,0,0,MagickTrue,exception); if (sharp_image == (Image *) NULL) return((Image *) NULL); if (fabs(sigma) < MagickEpsilon) return(sharp_image); if (SetImageStorageClass(sharp_image,DirectClass,exception) == MagickFalse) { sharp_image=DestroyImage(sharp_image); return((Image *) NULL); } /* Edge detect the image brightness channel, level, sharp, and level again. */ edge_image=EdgeImage(image,radius,exception); if (edge_image == (Image *) NULL) { sharp_image=DestroyImage(sharp_image); return((Image *) NULL); } (void) AutoLevelImage(edge_image,exception); gaussian_image=BlurImage(edge_image,radius,sigma,exception); if (gaussian_image != (Image *) NULL) { edge_image=DestroyImage(edge_image); edge_image=gaussian_image; } (void) AutoLevelImage(edge_image,exception); /* Create a set of kernels from maximum (radius,sigma) to minimum. */ width=GetOptimalKernelWidth2D(radius,sigma); kernel=(double **) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,sizeof(*kernel))); if (kernel == (double **) NULL) { edge_image=DestroyImage(edge_image); sharp_image=DestroyImage(sharp_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) memset(kernel,0,(size_t) width*sizeof(*kernel)); for (i=0; i < (ssize_t) width; i+=2) { kernel[i]=(double *) MagickAssumeAligned(AcquireAlignedMemory((size_t) (width-i),(width-i)*sizeof(**kernel))); if (kernel[i] == (double *) NULL) break; normalize=0.0; j=(ssize_t) (width-i-1)/2; k=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel[i][k]=(double) (-exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); normalize+=kernel[i][k]; k++; } } kernel[i][(k-1)/2]=(double) ((-2.0)*normalize); if (sigma < MagickEpsilon) kernel[i][(k-1)/2]=1.0; } if (i < (ssize_t) width) { for (i-=2; i >= 0; i-=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); edge_image=DestroyImage(edge_image); sharp_image=DestroyImage(sharp_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Adaptively sharpen image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); edge_view=AcquireVirtualCacheView(edge_image,exception); sharp_view=AcquireAuthenticCacheView(sharp_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,sharp_image,sharp_image->rows,1) #endif for (y=0; y < (ssize_t) sharp_image->rows; y++) { register const Quantum *magick_restrict r; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(sharp_view,0,y,sharp_image->columns,1, exception); if ((r == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) sharp_image->columns; x++) { register const Quantum *magick_restrict p; register ssize_t i; ssize_t center, j; j=(ssize_t) ceil((double) width*(1.0-QuantumScale* GetPixelIntensity(edge_image,r))-0.5); if (j < 0) j=0; else if (j > (ssize_t) width) j=(ssize_t) width; if ((j & 0x01) != 0) j--; p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-j)/2L),y- (ssize_t) ((width-j)/2L),width-j,width-j,exception); if (p == (const Quantum *) NULL) break; center=(ssize_t) GetPixelChannels(image)*(width-j)*((width-j)/2L)+ GetPixelChannels(image)*((width-j)/2); for (i=0; i < (ssize_t) GetPixelChannels(sharp_image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait sharp_traits, traits; register const double *magick_restrict k; register const Quantum *magick_restrict pixels; register ssize_t u; ssize_t v; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); sharp_traits=GetPixelChannelTraits(sharp_image,channel); if ((traits == UndefinedPixelTrait) || (sharp_traits == UndefinedPixelTrait)) continue; if ((sharp_traits & CopyPixelTrait) != 0) { SetPixelChannel(sharp_image,channel,p[center+i],q); continue; } k=kernel[j]; pixels=p; pixel=0.0; gamma=0.0; if ((sharp_traits & BlendPixelTrait) == 0) { /* No alpha blending. */ for (v=0; v < (ssize_t) (width-j); v++) { for (u=0; u < (ssize_t) (width-j); u++) { pixel+=(*k)*pixels[i]; gamma+=(*k); k++; pixels+=GetPixelChannels(image); } } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(sharp_image,channel,ClampToQuantum(gamma*pixel),q); continue; } /* Alpha blending. */ for (v=0; v < (ssize_t) (width-j); v++) { for (u=0; u < (ssize_t) (width-j); u++) { alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels)); pixel+=(*k)*alpha*pixels[i]; gamma+=(*k)*alpha; k++; pixels+=GetPixelChannels(image); } } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(sharp_image,channel,ClampToQuantum(gamma*pixel),q); } q+=GetPixelChannels(sharp_image); r+=GetPixelChannels(edge_image); } if (SyncCacheViewAuthenticPixels(sharp_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,AdaptiveSharpenImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } sharp_image->type=image->type; sharp_view=DestroyCacheView(sharp_view); edge_view=DestroyCacheView(edge_view); image_view=DestroyCacheView(image_view); edge_image=DestroyImage(edge_image); for (i=0; i < (ssize_t) width; i+=2) kernel[i]=(double *) RelinquishAlignedMemory(kernel[i]); kernel=(double **) RelinquishAlignedMemory(kernel); if (status == MagickFalse) sharp_image=DestroyImage(sharp_image); return(sharp_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BlurImage() blurs an image. We convolve the image with a Gaussian operator % of the given radius and standard deviation (sigma). For reasonable results, % the radius should be larger than sigma. Use a radius of 0 and BlurImage() % selects a suitable radius for you. % % The format of the BlurImage method is: % % Image *BlurImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *BlurImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { char geometry[MagickPathExtent]; KernelInfo *kernel_info; Image *blur_image; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) blur_image=AccelerateBlurImage(image,radius,sigma,exception); if (blur_image != (Image *) NULL) return(blur_image); #endif (void) FormatLocaleString(geometry,MagickPathExtent, "blur:%.20gx%.20g;blur:%.20gx%.20g+90",radius,sigma,radius,sigma); kernel_info=AcquireKernelInfo(geometry,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); blur_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n v o l v e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConvolveImage() applies a custom convolution kernel to the image. % % The format of the ConvolveImage method is: % % Image *ConvolveImage(const Image *image,const KernelInfo *kernel, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o kernel: the filtering kernel. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ConvolveImage(const Image *image, const KernelInfo *kernel_info,ExceptionInfo *exception) { Image *convolve_image; #if defined(MAGICKCORE_OPENCL_SUPPORT) convolve_image=AccelerateConvolveImage(image,kernel_info,exception); if (convolve_image != (Image *) NULL) return(convolve_image); #endif convolve_image=MorphologyImage(image,ConvolveMorphology,1,kernel_info, exception); return(convolve_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s p e c k l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DespeckleImage() reduces the speckle noise in an image while perserving the % edges of the original image. A speckle removing filter uses a complementary % hulling technique (raising pixels that are darker than their surrounding % neighbors, then complementarily lowering pixels that are brighter than their % surrounding neighbors) to reduce the speckle index of that image (reference % Crimmins speckle removal). % % The format of the DespeckleImage method is: % % Image *DespeckleImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static void Hull(const Image *image,const ssize_t x_offset, const ssize_t y_offset,const size_t columns,const size_t rows, const int polarity,Quantum *magick_restrict f,Quantum *magick_restrict g) { register Quantum *p, *q, *r, *s; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(f != (Quantum *) NULL); assert(g != (Quantum *) NULL); p=f+(columns+2); q=g+(columns+2); r=p+(y_offset*((ssize_t) columns+2)+x_offset); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { MagickRealType v; register ssize_t i, x; i=(2*y+1)+y*columns; if (polarity > 0) for (x=0; x < (ssize_t) columns; x++) { v=(MagickRealType) p[i]; if ((MagickRealType) r[i] >= (v+ScaleCharToQuantum(2))) v+=ScaleCharToQuantum(1); q[i]=(Quantum) v; i++; } else for (x=0; x < (ssize_t) columns; x++) { v=(MagickRealType) p[i]; if ((MagickRealType) r[i] <= (v-ScaleCharToQuantum(2))) v-=ScaleCharToQuantum(1); q[i]=(Quantum) v; i++; } } p=f+(columns+2); q=g+(columns+2); r=q+(y_offset*((ssize_t) columns+2)+x_offset); s=q-(y_offset*((ssize_t) columns+2)+x_offset); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { register ssize_t i, x; MagickRealType v; i=(2*y+1)+y*columns; if (polarity > 0) for (x=0; x < (ssize_t) columns; x++) { v=(MagickRealType) q[i]; if (((MagickRealType) s[i] >= (v+ScaleCharToQuantum(2))) && ((MagickRealType) r[i] > v)) v+=ScaleCharToQuantum(1); p[i]=(Quantum) v; i++; } else for (x=0; x < (ssize_t) columns; x++) { v=(MagickRealType) q[i]; if (((MagickRealType) s[i] <= (v-ScaleCharToQuantum(2))) && ((MagickRealType) r[i] < v)) v-=ScaleCharToQuantum(1); p[i]=(Quantum) v; i++; } } } MagickExport Image *DespeckleImage(const Image *image,ExceptionInfo *exception) { #define DespeckleImageTag "Despeckle/Image" CacheView *despeckle_view, *image_view; Image *despeckle_image; MagickBooleanType status; MemoryInfo *buffer_info, *pixel_info; Quantum *magick_restrict buffer, *magick_restrict pixels; register ssize_t i; size_t length; static const ssize_t X[4] = {0, 1, 1,-1}, Y[4] = {1, 0, 1, 1}; /* Allocate despeckled image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) despeckle_image=AccelerateDespeckleImage(image,exception); if (despeckle_image != (Image *) NULL) return(despeckle_image); #endif despeckle_image=CloneImage(image,0,0,MagickTrue,exception); if (despeckle_image == (Image *) NULL) return((Image *) NULL); status=SetImageStorageClass(despeckle_image,DirectClass,exception); if (status == MagickFalse) { despeckle_image=DestroyImage(despeckle_image); return((Image *) NULL); } /* Allocate image buffer. */ length=(size_t) ((image->columns+2)*(image->rows+2)); pixel_info=AcquireVirtualMemory(length,sizeof(*pixels)); buffer_info=AcquireVirtualMemory(length,sizeof(*buffer)); if ((pixel_info == (MemoryInfo *) NULL) || (buffer_info == (MemoryInfo *) NULL)) { if (buffer_info != (MemoryInfo *) NULL) buffer_info=RelinquishVirtualMemory(buffer_info); if (pixel_info != (MemoryInfo *) NULL) pixel_info=RelinquishVirtualMemory(pixel_info); despeckle_image=DestroyImage(despeckle_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } pixels=(Quantum *) GetVirtualMemoryBlob(pixel_info); buffer=(Quantum *) GetVirtualMemoryBlob(buffer_info); /* Reduce speckle in the image. */ status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); despeckle_view=AcquireAuthenticCacheView(despeckle_image,exception); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait despeckle_traits, traits; register ssize_t k, x; ssize_t j, y; if (status == MagickFalse) continue; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); despeckle_traits=GetPixelChannelTraits(despeckle_image,channel); if ((traits == UndefinedPixelTrait) || (despeckle_traits == UndefinedPixelTrait)) continue; if ((despeckle_traits & CopyPixelTrait) != 0) continue; (void) memset(pixels,0,length*sizeof(*pixels)); j=(ssize_t) image->columns+2; for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } j++; for (x=0; x < (ssize_t) image->columns; x++) { pixels[j++]=p[i]; p+=GetPixelChannels(image); } j++; } (void) memset(buffer,0,length*sizeof(*buffer)); for (k=0; k < 4; k++) { Hull(image,X[k],Y[k],image->columns,image->rows,1,pixels,buffer); Hull(image,-X[k],-Y[k],image->columns,image->rows,1,pixels,buffer); Hull(image,-X[k],-Y[k],image->columns,image->rows,-1,pixels,buffer); Hull(image,X[k],Y[k],image->columns,image->rows,-1,pixels,buffer); } j=(ssize_t) image->columns+2; for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(despeckle_view,0,y,despeckle_image->columns, 1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } j++; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelChannel(despeckle_image,channel,pixels[j++],q); q+=GetPixelChannels(despeckle_image); } sync=SyncCacheViewAuthenticPixels(despeckle_view,exception); if (sync == MagickFalse) status=MagickFalse; j++; } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,DespeckleImageTag,(MagickOffsetType) i, GetPixelChannels(image)); if (proceed == MagickFalse) status=MagickFalse; } } despeckle_view=DestroyCacheView(despeckle_view); image_view=DestroyCacheView(image_view); buffer_info=RelinquishVirtualMemory(buffer_info); pixel_info=RelinquishVirtualMemory(pixel_info); despeckle_image->type=image->type; if (status == MagickFalse) despeckle_image=DestroyImage(despeckle_image); return(despeckle_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E d g e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EdgeImage() finds edges in an image. Radius defines the radius of the % convolution filter. Use a radius of 0 and EdgeImage() selects a suitable % radius for you. % % The format of the EdgeImage method is: % % Image *EdgeImage(const Image *image,const double radius, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *EdgeImage(const Image *image,const double radius, ExceptionInfo *exception) { Image *edge_image; KernelInfo *kernel_info; register ssize_t i; size_t width; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth1D(radius,0.5); kernel_info=AcquireKernelInfo((const char *) NULL,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); (void) memset(kernel_info,0,sizeof(*kernel_info)); kernel_info->width=width; kernel_info->height=width; kernel_info->x=(ssize_t) (kernel_info->width-1)/2; kernel_info->y=(ssize_t) (kernel_info->height-1)/2; kernel_info->signature=MagickCoreSignature; kernel_info->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel_info->width,kernel_info->height* sizeof(*kernel_info->values))); if (kernel_info->values == (MagickRealType *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) kernel_info->values[i]=(-1.0); kernel_info->values[i/2]=(double) kernel_info->width*kernel_info->height-1.0; edge_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(edge_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E m b o s s I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EmbossImage() returns a grayscale image with a three-dimensional effect. % We convolve the image with a Gaussian operator of the given radius and % standard deviation (sigma). For reasonable results, radius should be % larger than sigma. Use a radius of 0 and Emboss() selects a suitable % radius for you. % % The format of the EmbossImage method is: % % Image *EmbossImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *EmbossImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { double gamma, normalize; Image *emboss_image; KernelInfo *kernel_info; register ssize_t i; size_t width; ssize_t j, k, u, v; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth1D(radius,sigma); kernel_info=AcquireKernelInfo((const char *) NULL,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); kernel_info->width=width; kernel_info->height=width; kernel_info->x=(ssize_t) (width-1)/2; kernel_info->y=(ssize_t) (width-1)/2; kernel_info->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel_info->width,kernel_info->width* sizeof(*kernel_info->values))); if (kernel_info->values == (MagickRealType *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } j=(ssize_t) (kernel_info->width-1)/2; k=j; i=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel_info->values[i]=(MagickRealType) (((u < 0) || (v < 0) ? -8.0 : 8.0)*exp(-((double) u*u+v*v)/(2.0*MagickSigma*MagickSigma))/ (2.0*MagickPI*MagickSigma*MagickSigma)); if (u != k) kernel_info->values[i]=0.0; i++; } k--; } normalize=0.0; for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) normalize+=kernel_info->values[i]; gamma=PerceptibleReciprocal(normalize); for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) kernel_info->values[i]*=gamma; emboss_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); if (emboss_image != (Image *) NULL) (void) EqualizeImage(emboss_image,exception); return(emboss_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G a u s s i a n B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GaussianBlurImage() blurs an image. We convolve the image with a % Gaussian operator of the given radius and standard deviation (sigma). % For reasonable results, the radius should be larger than sigma. Use a % radius of 0 and GaussianBlurImage() selects a suitable radius for you % % The format of the GaussianBlurImage method is: % % Image *GaussianBlurImage(const Image *image,onst double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *GaussianBlurImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { char geometry[MagickPathExtent]; KernelInfo *kernel_info; Image *blur_image; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); (void) FormatLocaleString(geometry,MagickPathExtent,"gaussian:%.20gx%.20g", radius,sigma); kernel_info=AcquireKernelInfo(geometry,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); blur_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % K u w a h a r a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % KuwaharaImage() is an edge preserving noise reduction filter. % % The format of the KuwaharaImage method is: % % Image *KuwaharaImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the square window radius. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ static inline MagickRealType GetMeanLuma(const Image *magick_restrict image, const double *magick_restrict pixel) { return(0.212656f*pixel[image->channel_map[RedPixelChannel].offset]+ 0.715158f*pixel[image->channel_map[GreenPixelChannel].offset]+ 0.072186f*pixel[image->channel_map[BluePixelChannel].offset]); /* Rec709 */ } MagickExport Image *KuwaharaImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { #define KuwaharaImageTag "Kuwahara/Image" CacheView *image_view, *kuwahara_view; Image *gaussian_image, *kuwahara_image; MagickBooleanType status; MagickOffsetType progress; size_t width; ssize_t y; /* Initialize Kuwahara image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=(size_t) radius+1; gaussian_image=BlurImage(image,radius,sigma,exception); if (gaussian_image == (Image *) NULL) return((Image *) NULL); kuwahara_image=CloneImage(image,0,0,MagickTrue,exception); if (kuwahara_image == (Image *) NULL) { gaussian_image=DestroyImage(gaussian_image); return((Image *) NULL); } if (SetImageStorageClass(kuwahara_image,DirectClass,exception) == MagickFalse) { gaussian_image=DestroyImage(gaussian_image); kuwahara_image=DestroyImage(kuwahara_image); return((Image *) NULL); } /* Edge preserving noise reduction filter. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(gaussian_image,exception); kuwahara_view=AcquireAuthenticCacheView(kuwahara_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,kuwahara_image,gaussian_image->rows,1) #endif for (y=0; y < (ssize_t) gaussian_image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(kuwahara_view,0,y,kuwahara_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) gaussian_image->columns; x++) { const Quantum *magick_restrict p; double min_variance; RectangleInfo quadrant, target; register size_t i; min_variance=MagickMaximumValue; SetGeometry(gaussian_image,&target); quadrant.width=width; quadrant.height=width; for (i=0; i < 4; i++) { const Quantum *magick_restrict k; double mean[MaxPixelChannels], variance; register ssize_t n; ssize_t j; quadrant.x=x; quadrant.y=y; switch (i) { case 0: { quadrant.x=x-(ssize_t) (width-1); quadrant.y=y-(ssize_t) (width-1); break; } case 1: { quadrant.y=y-(ssize_t) (width-1); break; } case 2: { quadrant.x=x-(ssize_t) (width-1); break; } case 3: default: break; } p=GetCacheViewVirtualPixels(image_view,quadrant.x,quadrant.y, quadrant.width,quadrant.height,exception); if (p == (const Quantum *) NULL) break; for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++) mean[j]=0.0; k=p; for (n=0; n < (ssize_t) (width*width); n++) { for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++) mean[j]+=(double) k[j]; k+=GetPixelChannels(gaussian_image); } for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++) mean[j]/=(double) (width*width); k=p; variance=0.0; for (n=0; n < (ssize_t) (width*width); n++) { double luma; luma=GetPixelLuma(gaussian_image,k); variance+=(luma-GetMeanLuma(gaussian_image,mean))* (luma-GetMeanLuma(gaussian_image,mean)); k+=GetPixelChannels(gaussian_image); } if (variance < min_variance) { min_variance=variance; target=quadrant; } } if (i < 4) { status=MagickFalse; break; } status=InterpolatePixelChannels(gaussian_image,image_view,kuwahara_image, UndefinedInterpolatePixel,(double) target.x+target.width/2.0,(double) target.y+target.height/2.0,q,exception); if (status == MagickFalse) break; q+=GetPixelChannels(kuwahara_image); } if (SyncCacheViewAuthenticPixels(kuwahara_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,KuwaharaImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } kuwahara_view=DestroyCacheView(kuwahara_view); image_view=DestroyCacheView(image_view); gaussian_image=DestroyImage(gaussian_image); if (status == MagickFalse) kuwahara_image=DestroyImage(kuwahara_image); return(kuwahara_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L o c a l C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LocalContrastImage() attempts to increase the appearance of large-scale % light-dark transitions. Local contrast enhancement works similarly to % sharpening with an unsharp mask, however the mask is instead created using % an image with a greater blur distance. % % The format of the LocalContrastImage method is: % % Image *LocalContrastImage(const Image *image, const double radius, % const double strength,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian blur, in percentage with 100% % resulting in a blur radius of 20% of largest dimension. % % o strength: the strength of the blur mask in percentage. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *LocalContrastImage(const Image *image,const double radius, const double strength,ExceptionInfo *exception) { #define LocalContrastImageTag "LocalContrast/Image" CacheView *image_view, *contrast_view; float *interImage, *scanLinePixels, totalWeight; Image *contrast_image; MagickBooleanType status; MemoryInfo *scanLinePixels_info, *interImage_info; ssize_t scanLineSize, width; /* Initialize contrast image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) contrast_image=AccelerateLocalContrastImage(image,radius,strength,exception); if (contrast_image != (Image *) NULL) return(contrast_image); #endif contrast_image=CloneImage(image,0,0,MagickTrue,exception); if (contrast_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(contrast_image,DirectClass,exception) == MagickFalse) { contrast_image=DestroyImage(contrast_image); return((Image *) NULL); } image_view=AcquireVirtualCacheView(image,exception); contrast_view=AcquireAuthenticCacheView(contrast_image,exception); scanLineSize=(ssize_t) MagickMax(image->columns,image->rows); width=(ssize_t) scanLineSize*0.002f*fabs(radius); scanLineSize+=(2*width); scanLinePixels_info=AcquireVirtualMemory((size_t) GetOpenMPMaximumThreads()* scanLineSize,sizeof(*scanLinePixels)); if (scanLinePixels_info == (MemoryInfo *) NULL) { contrast_view=DestroyCacheView(contrast_view); image_view=DestroyCacheView(image_view); contrast_image=DestroyImage(contrast_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } scanLinePixels=(float *) GetVirtualMemoryBlob(scanLinePixels_info); /* Create intermediate buffer. */ interImage_info=AcquireVirtualMemory(image->rows*(image->columns+(2*width)), sizeof(*interImage)); if (interImage_info == (MemoryInfo *) NULL) { scanLinePixels_info=RelinquishVirtualMemory(scanLinePixels_info); contrast_view=DestroyCacheView(contrast_view); image_view=DestroyCacheView(image_view); contrast_image=DestroyImage(contrast_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } interImage=(float *) GetVirtualMemoryBlob(interImage_info); totalWeight=(float) ((width+1)*(width+1)); /* Vertical pass. */ status=MagickTrue; { ssize_t x; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,image->columns,1) #endif for (x=0; x < (ssize_t) image->columns; x++) { const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; float *out, *pix, *pixels; register ssize_t y; ssize_t i; if (status == MagickFalse) continue; pixels=scanLinePixels; pixels+=id*scanLineSize; pix=pixels; p=GetCacheViewVirtualPixels(image_view,x,-width,1,image->rows+(2*width), exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (y=0; y < (ssize_t) image->rows+(2*width); y++) { *pix++=(float)GetPixelLuma(image,p); p+=image->number_channels; } out=interImage+x+width; for (y=0; y < (ssize_t) image->rows; y++) { float sum, weight; weight=1.0f; sum=0; pix=pixels+y; for (i=0; i < width; i++) { sum+=weight*(*pix++); weight+=1.0f; } for (i=width+1; i < (2*width); i++) { sum+=weight*(*pix++); weight-=1.0f; } /* write to output */ *out=sum/totalWeight; /* mirror into padding */ if (x <= width && x != 0) *(out-(x*2))=*out; if ((x > (ssize_t) image->columns-width-2) && (x != (ssize_t) image->columns-1)) *(out+((image->columns-x-1)*2))=*out; out+=image->columns+(width*2); } } } /* Horizontal pass. */ { ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; float *pix, *pixels; register Quantum *magick_restrict q; register ssize_t x; ssize_t i; if (status == MagickFalse) continue; pixels=scanLinePixels; pixels+=id*scanLineSize; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(contrast_view,0,y,image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } memcpy(pixels,interImage+(y*(image->columns+(2*width))),(image->columns+ (2*width))*sizeof(float)); for (x=0; x < (ssize_t) image->columns; x++) { float mult, srcVal, sum, weight; PixelTrait traits; weight=1.0f; sum=0; pix=pixels+x; for (i=0; i < width; i++) { sum+=weight*(*pix++); weight+=1.0f; } for (i=width+1; i < (2*width); i++) { sum+=weight*(*pix++); weight-=1.0f; } /* Apply and write */ srcVal=(float) GetPixelLuma(image,p); mult=(srcVal-(sum/totalWeight))*(strength/100.0f); mult=(srcVal+mult)/srcVal; traits=GetPixelChannelTraits(image,RedPixelChannel); if ((traits & UpdatePixelTrait) != 0) SetPixelRed(contrast_image,ClampToQuantum(GetPixelRed(image,p)*mult), q); traits=GetPixelChannelTraits(image,GreenPixelChannel); if ((traits & UpdatePixelTrait) != 0) SetPixelGreen(contrast_image,ClampToQuantum(GetPixelGreen(image,p)* mult),q); traits=GetPixelChannelTraits(image,BluePixelChannel); if ((traits & UpdatePixelTrait) != 0) SetPixelBlue(contrast_image,ClampToQuantum(GetPixelBlue(image,p)* mult),q); p+=image->number_channels; q+=contrast_image->number_channels; } if (SyncCacheViewAuthenticPixels(contrast_view,exception) == MagickFalse) status=MagickFalse; } } scanLinePixels_info=RelinquishVirtualMemory(scanLinePixels_info); interImage_info=RelinquishVirtualMemory(interImage_info); contrast_view=DestroyCacheView(contrast_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) contrast_image=DestroyImage(contrast_image); return(contrast_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o t i o n B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MotionBlurImage() simulates motion blur. We convolve the image with a % Gaussian operator of the given radius and standard deviation (sigma). % For reasonable results, radius should be larger than sigma. Use a % radius of 0 and MotionBlurImage() selects a suitable radius for you. % Angle gives the angle of the blurring motion. % % Andrew Protano contributed this effect. % % The format of the MotionBlurImage method is: % % Image *MotionBlurImage(const Image *image,const double radius, % const double sigma,const double angle,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting % the center pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o angle: Apply the effect along this angle. % % o exception: return any errors or warnings in this structure. % */ static MagickRealType *GetMotionBlurKernel(const size_t width, const double sigma) { MagickRealType *kernel, normalize; register ssize_t i; /* Generate a 1-D convolution kernel. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); kernel=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,sizeof(*kernel))); if (kernel == (MagickRealType *) NULL) return(kernel); normalize=0.0; for (i=0; i < (ssize_t) width; i++) { kernel[i]=(MagickRealType) (exp((-((double) i*i)/(double) (2.0*MagickSigma* MagickSigma)))/(MagickSQ2PI*MagickSigma)); normalize+=kernel[i]; } for (i=0; i < (ssize_t) width; i++) kernel[i]/=normalize; return(kernel); } MagickExport Image *MotionBlurImage(const Image *image,const double radius, const double sigma,const double angle,ExceptionInfo *exception) { #define BlurImageTag "Blur/Image" CacheView *blur_view, *image_view, *motion_view; Image *blur_image; MagickBooleanType status; MagickOffsetType progress; MagickRealType *kernel; OffsetInfo *offset; PointInfo point; register ssize_t i; size_t width; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); width=GetOptimalKernelWidth1D(radius,sigma); kernel=GetMotionBlurKernel(width,sigma); if (kernel == (MagickRealType *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); offset=(OffsetInfo *) AcquireQuantumMemory(width,sizeof(*offset)); if (offset == (OffsetInfo *) NULL) { kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } point.x=(double) width*sin(DegreesToRadians(angle)); point.y=(double) width*cos(DegreesToRadians(angle)); for (i=0; i < (ssize_t) width; i++) { offset[i].x=(ssize_t) ceil((double) (i*point.y)/hypot(point.x,point.y)-0.5); offset[i].y=(ssize_t) ceil((double) (i*point.x)/hypot(point.x,point.y)-0.5); } /* Motion blur image. */ #if defined(MAGICKCORE_OPENCL_SUPPORT) blur_image=AccelerateMotionBlurImage(image,kernel,width,offset,exception); if (blur_image != (Image *) NULL) { kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); return(blur_image); } #endif blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) { kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); return((Image *) NULL); } if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); blur_image=DestroyImage(blur_image); return((Image *) NULL); } status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); motion_view=AcquireVirtualCacheView(image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,blur_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait blur_traits, traits; register const Quantum *magick_restrict r; register MagickRealType *magick_restrict k; register ssize_t j; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if ((blur_traits & CopyPixelTrait) != 0) { SetPixelChannel(blur_image,channel,p[i],q); continue; } k=kernel; pixel=0.0; if ((blur_traits & BlendPixelTrait) == 0) { for (j=0; j < (ssize_t) width; j++) { r=GetCacheViewVirtualPixels(motion_view,x+offset[j].x,y+ offset[j].y,1,1,exception); if (r == (const Quantum *) NULL) { status=MagickFalse; continue; } pixel+=(*k)*r[i]; k++; } SetPixelChannel(blur_image,channel,ClampToQuantum(pixel),q); continue; } alpha=0.0; gamma=0.0; for (j=0; j < (ssize_t) width; j++) { r=GetCacheViewVirtualPixels(motion_view,x+offset[j].x,y+offset[j].y,1, 1,exception); if (r == (const Quantum *) NULL) { status=MagickFalse; continue; } alpha=(double) (QuantumScale*GetPixelAlpha(image,r)); pixel+=(*k)*alpha*r[i]; gamma+=(*k)*alpha; k++; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(blur_image); } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,BlurImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_view=DestroyCacheView(blur_view); motion_view=DestroyCacheView(motion_view); image_view=DestroyCacheView(image_view); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P r e v i e w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PreviewImage() tiles 9 thumbnails of the specified image with an image % processing operation applied with varying parameters. This may be helpful % pin-pointing an appropriate parameter for a particular image processing % operation. % % The format of the PreviewImages method is: % % Image *PreviewImages(const Image *image,const PreviewType preview, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o preview: the image processing operation. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *PreviewImage(const Image *image,const PreviewType preview, ExceptionInfo *exception) { #define NumberTiles 9 #define PreviewImageTag "Preview/Image" #define DefaultPreviewGeometry "204x204+10+10" char factor[MagickPathExtent], label[MagickPathExtent]; double degrees, gamma, percentage, radius, sigma, threshold; Image *images, *montage_image, *preview_image, *thumbnail; ImageInfo *preview_info; MagickBooleanType proceed; MontageInfo *montage_info; QuantizeInfo quantize_info; RectangleInfo geometry; register ssize_t i, x; size_t colors; ssize_t y; /* Open output image file. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); colors=2; degrees=0.0; gamma=(-0.2f); preview_info=AcquireImageInfo(); SetGeometry(image,&geometry); (void) ParseMetaGeometry(DefaultPreviewGeometry,&geometry.x,&geometry.y, &geometry.width,&geometry.height); images=NewImageList(); percentage=12.5; GetQuantizeInfo(&quantize_info); radius=0.0; sigma=1.0; threshold=0.0; x=0; y=0; for (i=0; i < NumberTiles; i++) { thumbnail=ThumbnailImage(image,geometry.width,geometry.height,exception); if (thumbnail == (Image *) NULL) break; (void) SetImageProgressMonitor(thumbnail,(MagickProgressMonitor) NULL, (void *) NULL); (void) SetImageProperty(thumbnail,"label",DefaultTileLabel,exception); if (i == (NumberTiles/2)) { (void) QueryColorCompliance("#dfdfdf",AllCompliance, &thumbnail->matte_color,exception); AppendImageToList(&images,thumbnail); continue; } switch (preview) { case RotatePreview: { degrees+=45.0; preview_image=RotateImage(thumbnail,degrees,exception); (void) FormatLocaleString(label,MagickPathExtent,"rotate %g",degrees); break; } case ShearPreview: { degrees+=5.0; preview_image=ShearImage(thumbnail,degrees,degrees,exception); (void) FormatLocaleString(label,MagickPathExtent,"shear %gx%g",degrees, 2.0*degrees); break; } case RollPreview: { x=(ssize_t) ((i+1)*thumbnail->columns)/NumberTiles; y=(ssize_t) ((i+1)*thumbnail->rows)/NumberTiles; preview_image=RollImage(thumbnail,x,y,exception); (void) FormatLocaleString(label,MagickPathExtent,"roll %+.20gx%+.20g", (double) x,(double) y); break; } case HuePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(factor,MagickPathExtent,"100,100,%g",2.0* percentage); (void) ModulateImage(preview_image,factor,exception); (void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor); break; } case SaturationPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(factor,MagickPathExtent,"100,%g",2.0* percentage); (void) ModulateImage(preview_image,factor,exception); (void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor); break; } case BrightnessPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(factor,MagickPathExtent,"%g",2.0*percentage); (void) ModulateImage(preview_image,factor,exception); (void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor); break; } case GammaPreview: default: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; gamma+=0.4f; (void) GammaImage(preview_image,gamma,exception); (void) FormatLocaleString(label,MagickPathExtent,"gamma %g",gamma); break; } case SpiffPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image != (Image *) NULL) for (x=0; x < i; x++) (void) ContrastImage(preview_image,MagickTrue,exception); (void) FormatLocaleString(label,MagickPathExtent,"contrast (%.20g)", (double) i+1); break; } case DullPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; for (x=0; x < i; x++) (void) ContrastImage(preview_image,MagickFalse,exception); (void) FormatLocaleString(label,MagickPathExtent,"+contrast (%.20g)", (double) i+1); break; } case GrayscalePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; colors<<=1; quantize_info.number_colors=colors; quantize_info.colorspace=GRAYColorspace; (void) QuantizeImage(&quantize_info,preview_image,exception); (void) FormatLocaleString(label,MagickPathExtent, "-colorspace gray -colors %.20g",(double) colors); break; } case QuantizePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; colors<<=1; quantize_info.number_colors=colors; (void) QuantizeImage(&quantize_info,preview_image,exception); (void) FormatLocaleString(label,MagickPathExtent,"colors %.20g", (double) colors); break; } case DespecklePreview: { for (x=0; x < (i-1); x++) { preview_image=DespeckleImage(thumbnail,exception); if (preview_image == (Image *) NULL) break; thumbnail=DestroyImage(thumbnail); thumbnail=preview_image; } preview_image=DespeckleImage(thumbnail,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(label,MagickPathExtent,"despeckle (%.20g)", (double) i+1); break; } case ReduceNoisePreview: { preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t) radius,(size_t) radius,exception); (void) FormatLocaleString(label,MagickPathExtent,"noise %g",radius); break; } case AddNoisePreview: { switch ((int) i) { case 0: { (void) CopyMagickString(factor,"uniform",MagickPathExtent); break; } case 1: { (void) CopyMagickString(factor,"gaussian",MagickPathExtent); break; } case 2: { (void) CopyMagickString(factor,"multiplicative",MagickPathExtent); break; } case 3: { (void) CopyMagickString(factor,"impulse",MagickPathExtent); break; } case 5: { (void) CopyMagickString(factor,"laplacian",MagickPathExtent); break; } case 6: { (void) CopyMagickString(factor,"Poisson",MagickPathExtent); break; } default: { (void) CopyMagickString(thumbnail->magick,"NULL",MagickPathExtent); break; } } preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t) i, (size_t) i,exception); (void) FormatLocaleString(label,MagickPathExtent,"+noise %s",factor); break; } case SharpenPreview: { preview_image=SharpenImage(thumbnail,radius,sigma,exception); (void) FormatLocaleString(label,MagickPathExtent,"sharpen %gx%g", radius,sigma); break; } case BlurPreview: { preview_image=BlurImage(thumbnail,radius,sigma,exception); (void) FormatLocaleString(label,MagickPathExtent,"blur %gx%g",radius, sigma); break; } case ThresholdPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) BilevelImage(thumbnail,(double) (percentage*((double) QuantumRange+1.0))/100.0,exception); (void) FormatLocaleString(label,MagickPathExtent,"threshold %g", (double) (percentage*((double) QuantumRange+1.0))/100.0); break; } case EdgeDetectPreview: { preview_image=EdgeImage(thumbnail,radius,exception); (void) FormatLocaleString(label,MagickPathExtent,"edge %g",radius); break; } case SpreadPreview: { preview_image=SpreadImage(thumbnail,image->interpolate,radius, exception); (void) FormatLocaleString(label,MagickPathExtent,"spread %g", radius+0.5); break; } case SolarizePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) SolarizeImage(preview_image,(double) QuantumRange*percentage/ 100.0,exception); (void) FormatLocaleString(label,MagickPathExtent,"solarize %g", (QuantumRange*percentage)/100.0); break; } case ShadePreview: { degrees+=10.0; preview_image=ShadeImage(thumbnail,MagickTrue,degrees,degrees, exception); (void) FormatLocaleString(label,MagickPathExtent,"shade %gx%g",degrees, degrees); break; } case RaisePreview: { RectangleInfo raise; preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; raise.width=(size_t) (2*i+2); raise.height=(size_t) (2*i+2); raise.x=(i-1)/2; raise.y=(i-1)/2; (void) RaiseImage(preview_image,&raise,MagickTrue,exception); (void) FormatLocaleString(label,MagickPathExtent, "raise %.20gx%.20g%+.20g%+.20g",(double) raise.width,(double) raise.height,(double) raise.x,(double) raise.y); break; } case SegmentPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; threshold+=0.4f; (void) SegmentImage(preview_image,sRGBColorspace,MagickFalse,threshold, threshold,exception); (void) FormatLocaleString(label,MagickPathExtent,"segment %gx%g", threshold,threshold); break; } case SwirlPreview: { preview_image=SwirlImage(thumbnail,degrees,image->interpolate, exception); (void) FormatLocaleString(label,MagickPathExtent,"swirl %g",degrees); degrees+=45.0; break; } case ImplodePreview: { degrees+=0.1f; preview_image=ImplodeImage(thumbnail,degrees,image->interpolate, exception); (void) FormatLocaleString(label,MagickPathExtent,"implode %g",degrees); break; } case WavePreview: { degrees+=5.0f; preview_image=WaveImage(thumbnail,0.5*degrees,2.0*degrees, image->interpolate,exception); (void) FormatLocaleString(label,MagickPathExtent,"wave %gx%g",0.5* degrees,2.0*degrees); break; } case OilPaintPreview: { preview_image=OilPaintImage(thumbnail,(double) radius,(double) sigma, exception); (void) FormatLocaleString(label,MagickPathExtent,"charcoal %gx%g", radius,sigma); break; } case CharcoalDrawingPreview: { preview_image=CharcoalImage(thumbnail,(double) radius,(double) sigma, exception); (void) FormatLocaleString(label,MagickPathExtent,"charcoal %gx%g", radius,sigma); break; } case JPEGPreview: { char filename[MagickPathExtent]; int file; MagickBooleanType status; preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; preview_info->quality=(size_t) percentage; (void) FormatLocaleString(factor,MagickPathExtent,"%.20g",(double) preview_info->quality); file=AcquireUniqueFileResource(filename); if (file != -1) file=close(file)-1; (void) FormatLocaleString(preview_image->filename,MagickPathExtent, "jpeg:%s",filename); status=WriteImage(preview_info,preview_image,exception); if (status != MagickFalse) { Image *quality_image; (void) CopyMagickString(preview_info->filename, preview_image->filename,MagickPathExtent); quality_image=ReadImage(preview_info,exception); if (quality_image != (Image *) NULL) { preview_image=DestroyImage(preview_image); preview_image=quality_image; } } (void) RelinquishUniqueFileResource(preview_image->filename); if ((GetBlobSize(preview_image)/1024) >= 1024) (void) FormatLocaleString(label,MagickPathExtent,"quality %s\n%gmb ", factor,(double) ((MagickOffsetType) GetBlobSize(preview_image))/ 1024.0/1024.0); else if (GetBlobSize(preview_image) >= 1024) (void) FormatLocaleString(label,MagickPathExtent, "quality %s\n%gkb ",factor,(double) ((MagickOffsetType) GetBlobSize(preview_image))/1024.0); else (void) FormatLocaleString(label,MagickPathExtent, "quality %s\n%.20gb ",factor,(double) ((MagickOffsetType) GetBlobSize(thumbnail))); break; } } thumbnail=DestroyImage(thumbnail); percentage+=12.5; radius+=0.5; sigma+=0.25; if (preview_image == (Image *) NULL) break; (void) DeleteImageProperty(preview_image,"label"); (void) SetImageProperty(preview_image,"label",label,exception); AppendImageToList(&images,preview_image); proceed=SetImageProgress(image,PreviewImageTag,(MagickOffsetType) i, NumberTiles); if (proceed == MagickFalse) break; } if (images == (Image *) NULL) { preview_info=DestroyImageInfo(preview_info); return((Image *) NULL); } /* Create the montage. */ montage_info=CloneMontageInfo(preview_info,(MontageInfo *) NULL); (void) CopyMagickString(montage_info->filename,image->filename, MagickPathExtent); montage_info->shadow=MagickTrue; (void) CloneString(&montage_info->tile,"3x3"); (void) CloneString(&montage_info->geometry,DefaultPreviewGeometry); (void) CloneString(&montage_info->frame,DefaultTileFrame); montage_image=MontageImages(images,montage_info,exception); montage_info=DestroyMontageInfo(montage_info); images=DestroyImageList(images); if (montage_image == (Image *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); if (montage_image->montage != (char *) NULL) { /* Free image directory. */ montage_image->montage=(char *) RelinquishMagickMemory( montage_image->montage); if (image->directory != (char *) NULL) montage_image->directory=(char *) RelinquishMagickMemory( montage_image->directory); } preview_info=DestroyImageInfo(preview_info); return(montage_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R o t a t i o n a l B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RotationalBlurImage() applies a radial blur to the image. % % Andrew Protano contributed this effect. % % The format of the RotationalBlurImage method is: % % Image *RotationalBlurImage(const Image *image,const double angle, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o angle: the angle of the radial blur. % % o blur: the blur. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *RotationalBlurImage(const Image *image,const double angle, ExceptionInfo *exception) { CacheView *blur_view, *image_view, *radial_view; double blur_radius, *cos_theta, offset, *sin_theta, theta; Image *blur_image; MagickBooleanType status; MagickOffsetType progress; PointInfo blur_center; register ssize_t i; size_t n; ssize_t y; /* Allocate blur image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) blur_image=AccelerateRotationalBlurImage(image,angle,exception); if (blur_image != (Image *) NULL) return(blur_image); #endif blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { blur_image=DestroyImage(blur_image); return((Image *) NULL); } blur_center.x=(double) (image->columns-1)/2.0; blur_center.y=(double) (image->rows-1)/2.0; blur_radius=hypot(blur_center.x,blur_center.y); n=(size_t) fabs(4.0*DegreesToRadians(angle)*sqrt((double) blur_radius)+2UL); theta=DegreesToRadians(angle)/(double) (n-1); cos_theta=(double *) AcquireQuantumMemory((size_t) n, sizeof(*cos_theta)); sin_theta=(double *) AcquireQuantumMemory((size_t) n, sizeof(*sin_theta)); if ((cos_theta == (double *) NULL) || (sin_theta == (double *) NULL)) { if (cos_theta != (double *) NULL) cos_theta=(double *) RelinquishMagickMemory(cos_theta); if (sin_theta != (double *) NULL) sin_theta=(double *) RelinquishMagickMemory(sin_theta); blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } offset=theta*(double) (n-1)/2.0; for (i=0; i < (ssize_t) n; i++) { cos_theta[i]=cos((double) (theta*i-offset)); sin_theta[i]=sin((double) (theta*i-offset)); } /* Radial blur image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); radial_view=AcquireVirtualCacheView(image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,blur_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double radius; PointInfo center; register ssize_t i; size_t step; center.x=(double) x-blur_center.x; center.y=(double) y-blur_center.y; radius=hypot((double) center.x,center.y); if (radius == 0) step=1; else { step=(size_t) (blur_radius/radius); if (step == 0) step=1; else if (step >= n) step=n-1; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double gamma, pixel; PixelChannel channel; PixelTrait blur_traits, traits; register const Quantum *magick_restrict r; register ssize_t j; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if ((blur_traits & CopyPixelTrait) != 0) { SetPixelChannel(blur_image,channel,p[i],q); continue; } gamma=0.0; pixel=0.0; if ((GetPixelChannelTraits(image,AlphaPixelChannel) == UndefinedPixelTrait) || (channel == AlphaPixelChannel)) { for (j=0; j < (ssize_t) n; j+=(ssize_t) step) { r=GetCacheViewVirtualPixels(radial_view, (ssize_t) (blur_center.x+ center.x*cos_theta[j]-center.y*sin_theta[j]+0.5),(ssize_t) (blur_center.y+center.x*sin_theta[j]+center.y*cos_theta[j]+0.5), 1,1,exception); if (r == (const Quantum *) NULL) { status=MagickFalse; continue; } pixel+=r[i]; gamma++; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); continue; } for (j=0; j < (ssize_t) n; j+=(ssize_t) step) { double alpha; r=GetCacheViewVirtualPixels(radial_view, (ssize_t) (blur_center.x+ center.x*cos_theta[j]-center.y*sin_theta[j]+0.5),(ssize_t) (blur_center.y+center.x*sin_theta[j]+center.y*cos_theta[j]+0.5), 1,1,exception); if (r == (const Quantum *) NULL) { status=MagickFalse; continue; } alpha=(double) QuantumScale*GetPixelAlpha(image,r); pixel+=alpha*r[i]; gamma+=alpha; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(blur_image); } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,BlurImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_view=DestroyCacheView(blur_view); radial_view=DestroyCacheView(radial_view); image_view=DestroyCacheView(image_view); cos_theta=(double *) RelinquishMagickMemory(cos_theta); sin_theta=(double *) RelinquishMagickMemory(sin_theta); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e l e c t i v e B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SelectiveBlurImage() selectively blur pixels within a contrast threshold. % It is similar to the unsharpen mask that sharpens everything with contrast % above a certain threshold. % % The format of the SelectiveBlurImage method is: % % Image *SelectiveBlurImage(const Image *image,const double radius, % const double sigma,const double threshold,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o threshold: only pixels within this contrast threshold are included % in the blur operation. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SelectiveBlurImage(const Image *image,const double radius, const double sigma,const double threshold,ExceptionInfo *exception) { #define SelectiveBlurImageTag "SelectiveBlur/Image" CacheView *blur_view, *image_view, *luminance_view; Image *blur_image, *luminance_image; MagickBooleanType status; MagickOffsetType progress; MagickRealType *kernel; register ssize_t i; size_t width; ssize_t center, j, u, v, y; /* Initialize blur image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth1D(radius,sigma); kernel=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,width*sizeof(*kernel))); if (kernel == (MagickRealType *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); j=(ssize_t) (width-1)/2; i=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) kernel[i++]=(MagickRealType) (exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); } if (image->debug != MagickFalse) { char format[MagickPathExtent], *message; register const MagickRealType *k; ssize_t u, v; (void) LogMagickEvent(TransformEvent,GetMagickModule(), " SelectiveBlurImage with %.20gx%.20g kernel:",(double) width,(double) width); message=AcquireString(""); k=kernel; for (v=0; v < (ssize_t) width; v++) { *message='\0'; (void) FormatLocaleString(format,MagickPathExtent,"%.20g: ",(double) v); (void) ConcatenateString(&message,format); for (u=0; u < (ssize_t) width; u++) { (void) FormatLocaleString(format,MagickPathExtent,"%+f ",(double) *k++); (void) ConcatenateString(&message,format); } (void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message); } message=DestroyString(message); } blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { blur_image=DestroyImage(blur_image); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); return((Image *) NULL); } luminance_image=CloneImage(image,0,0,MagickTrue,exception); if (luminance_image == (Image *) NULL) { blur_image=DestroyImage(blur_image); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); return((Image *) NULL); } status=TransformImageColorspace(luminance_image,GRAYColorspace,exception); if (status == MagickFalse) { luminance_image=DestroyImage(luminance_image); blur_image=DestroyImage(blur_image); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); return((Image *) NULL); } /* Threshold blur image. */ status=MagickTrue; progress=0; center=(ssize_t) (GetPixelChannels(image)*(image->columns+width)* ((width-1)/2L)+GetPixelChannels(image)*((width-1)/2L)); image_view=AcquireVirtualCacheView(image,exception); luminance_view=AcquireVirtualCacheView(luminance_image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,blur_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double contrast; MagickBooleanType sync; register const Quantum *magick_restrict l, *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) (width-1)/2L),y-(ssize_t) ((width-1)/2L),image->columns+width,width,exception); l=GetCacheViewVirtualPixels(luminance_view,-((ssize_t) (width-1)/2L),y- (ssize_t) ((width-1)/2L),luminance_image->columns+width,width,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (l == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double intensity; register ssize_t i; intensity=GetPixelIntensity(image,p+center); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait blur_traits, traits; register const MagickRealType *magick_restrict k; register const Quantum *magick_restrict luminance_pixels, *magick_restrict pixels; register ssize_t u; ssize_t v; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if ((blur_traits & CopyPixelTrait) != 0) { SetPixelChannel(blur_image,channel,p[center+i],q); continue; } k=kernel; pixel=0.0; pixels=p; luminance_pixels=l; gamma=0.0; if ((blur_traits & BlendPixelTrait) == 0) { for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { contrast=GetPixelIntensity(luminance_image,luminance_pixels)- intensity; if (fabs(contrast) < threshold) { pixel+=(*k)*pixels[i]; gamma+=(*k); } k++; pixels+=GetPixelChannels(image); luminance_pixels+=GetPixelChannels(luminance_image); } pixels+=GetPixelChannels(image)*image->columns; luminance_pixels+=GetPixelChannels(luminance_image)* luminance_image->columns; } if (fabs((double) gamma) < MagickEpsilon) { SetPixelChannel(blur_image,channel,p[center+i],q); continue; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); continue; } for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { contrast=GetPixelIntensity(image,pixels)-intensity; if (fabs(contrast) < threshold) { alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels)); pixel+=(*k)*alpha*pixels[i]; gamma+=(*k)*alpha; } k++; pixels+=GetPixelChannels(image); luminance_pixels+=GetPixelChannels(luminance_image); } pixels+=GetPixelChannels(image)*image->columns; luminance_pixels+=GetPixelChannels(luminance_image)* luminance_image->columns; } if (fabs((double) gamma) < MagickEpsilon) { SetPixelChannel(blur_image,channel,p[center+i],q); continue; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); } p+=GetPixelChannels(image); l+=GetPixelChannels(luminance_image); q+=GetPixelChannels(blur_image); } sync=SyncCacheViewAuthenticPixels(blur_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SelectiveBlurImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_image->type=image->type; blur_view=DestroyCacheView(blur_view); luminance_view=DestroyCacheView(luminance_view); image_view=DestroyCacheView(image_view); luminance_image=DestroyImage(luminance_image); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a d e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShadeImage() shines a distant light on an image to create a % three-dimensional effect. You control the positioning of the light with % azimuth and elevation; azimuth is measured in degrees off the x axis % and elevation is measured in pixels above the Z axis. % % The format of the ShadeImage method is: % % Image *ShadeImage(const Image *image,const MagickBooleanType gray, % const double azimuth,const double elevation,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o gray: A value other than zero shades the intensity of each pixel. % % o azimuth, elevation: Define the light source direction. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShadeImage(const Image *image,const MagickBooleanType gray, const double azimuth,const double elevation,ExceptionInfo *exception) { #define GetShadeIntensity(image,pixel) \ ClampPixel(GetPixelIntensity((image),(pixel))) #define ShadeImageTag "Shade/Image" CacheView *image_view, *shade_view; Image *linear_image, *shade_image; MagickBooleanType status; MagickOffsetType progress; PrimaryInfo light; ssize_t y; /* Initialize shaded image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); linear_image=CloneImage(image,0,0,MagickTrue,exception); shade_image=CloneImage(image,0,0,MagickTrue,exception); if ((linear_image == (Image *) NULL) || (shade_image == (Image *) NULL)) { if (linear_image != (Image *) NULL) linear_image=DestroyImage(linear_image); if (shade_image != (Image *) NULL) shade_image=DestroyImage(shade_image); return((Image *) NULL); } if (SetImageStorageClass(shade_image,DirectClass,exception) == MagickFalse) { linear_image=DestroyImage(linear_image); shade_image=DestroyImage(shade_image); return((Image *) NULL); } /* Compute the light vector. */ light.x=(double) QuantumRange*cos(DegreesToRadians(azimuth))* cos(DegreesToRadians(elevation)); light.y=(double) QuantumRange*sin(DegreesToRadians(azimuth))* cos(DegreesToRadians(elevation)); light.z=(double) QuantumRange*sin(DegreesToRadians(elevation)); /* Shade image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(linear_image,exception); shade_view=AcquireAuthenticCacheView(shade_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(linear_image,shade_image,linear_image->rows,1) #endif for (y=0; y < (ssize_t) linear_image->rows; y++) { double distance, normal_distance, shade; PrimaryInfo normal; register const Quantum *magick_restrict center, *magick_restrict p, *magick_restrict post, *magick_restrict pre; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-1,y-1,linear_image->columns+2,3, exception); q=QueueCacheViewAuthenticPixels(shade_view,0,y,shade_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } /* Shade this row of pixels. */ normal.z=2.0*(double) QuantumRange; /* constant Z of surface normal */ for (x=0; x < (ssize_t) linear_image->columns; x++) { register ssize_t i; /* Determine the surface normal and compute shading. */ pre=p+GetPixelChannels(linear_image); center=pre+(linear_image->columns+2)*GetPixelChannels(linear_image); post=center+(linear_image->columns+2)*GetPixelChannels(linear_image); normal.x=(double) ( GetShadeIntensity(linear_image,pre-GetPixelChannels(linear_image))+ GetShadeIntensity(linear_image,center-GetPixelChannels(linear_image))+ GetShadeIntensity(linear_image,post-GetPixelChannels(linear_image))- GetShadeIntensity(linear_image,pre+GetPixelChannels(linear_image))- GetShadeIntensity(linear_image,center+GetPixelChannels(linear_image))- GetShadeIntensity(linear_image,post+GetPixelChannels(linear_image))); normal.y=(double) ( GetShadeIntensity(linear_image,post-GetPixelChannels(linear_image))+ GetShadeIntensity(linear_image,post)+ GetShadeIntensity(linear_image,post+GetPixelChannels(linear_image))- GetShadeIntensity(linear_image,pre-GetPixelChannels(linear_image))- GetShadeIntensity(linear_image,pre)- GetShadeIntensity(linear_image,pre+GetPixelChannels(linear_image))); if ((fabs(normal.x) <= MagickEpsilon) && (fabs(normal.y) <= MagickEpsilon)) shade=light.z; else { shade=0.0; distance=normal.x*light.x+normal.y*light.y+normal.z*light.z; if (distance > MagickEpsilon) { normal_distance=normal.x*normal.x+normal.y*normal.y+ normal.z*normal.z; if (normal_distance > (MagickEpsilon*MagickEpsilon)) shade=distance/sqrt((double) normal_distance); } } for (i=0; i < (ssize_t) GetPixelChannels(linear_image); i++) { PixelChannel channel; PixelTrait shade_traits, traits; channel=GetPixelChannelChannel(linear_image,i); traits=GetPixelChannelTraits(linear_image,channel); shade_traits=GetPixelChannelTraits(shade_image,channel); if ((traits == UndefinedPixelTrait) || (shade_traits == UndefinedPixelTrait)) continue; if ((shade_traits & CopyPixelTrait) != 0) { SetPixelChannel(shade_image,channel,center[i],q); continue; } if ((traits & UpdatePixelTrait) == 0) { SetPixelChannel(shade_image,channel,center[i],q); continue; } if (gray != MagickFalse) { SetPixelChannel(shade_image,channel,ClampToQuantum(shade),q); continue; } SetPixelChannel(shade_image,channel,ClampToQuantum(QuantumScale*shade* center[i]),q); } p+=GetPixelChannels(linear_image); q+=GetPixelChannels(shade_image); } if (SyncCacheViewAuthenticPixels(shade_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ShadeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } shade_view=DestroyCacheView(shade_view); image_view=DestroyCacheView(image_view); linear_image=DestroyImage(linear_image); if (status == MagickFalse) shade_image=DestroyImage(shade_image); return(shade_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a r p e n I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SharpenImage() sharpens the image. We convolve the image with a Gaussian % operator of the given radius and standard deviation (sigma). For % reasonable results, radius should be larger than sigma. Use a radius of 0 % and SharpenImage() selects a suitable radius for you. % % Using a separable kernel would be faster, but the negative weights cancel % out on the corners of the kernel producing often undesirable ringing in the % filtered result; this can be avoided by using a 2D gaussian shaped image % sharpening kernel instead. % % The format of the SharpenImage method is: % % Image *SharpenImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Laplacian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SharpenImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { double gamma, normalize; Image *sharp_image; KernelInfo *kernel_info; register ssize_t i; size_t width; ssize_t j, u, v; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth2D(radius,sigma); kernel_info=AcquireKernelInfo((const char *) NULL,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); (void) memset(kernel_info,0,sizeof(*kernel_info)); kernel_info->width=width; kernel_info->height=width; kernel_info->x=(ssize_t) (width-1)/2; kernel_info->y=(ssize_t) (width-1)/2; kernel_info->signature=MagickCoreSignature; kernel_info->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel_info->width,kernel_info->height* sizeof(*kernel_info->values))); if (kernel_info->values == (MagickRealType *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } normalize=0.0; j=(ssize_t) (kernel_info->width-1)/2; i=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel_info->values[i]=(MagickRealType) (-exp(-((double) u*u+v*v)/(2.0* MagickSigma*MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); normalize+=kernel_info->values[i]; i++; } } kernel_info->values[i/2]=(double) ((-2.0)*normalize); normalize=0.0; for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) normalize+=kernel_info->values[i]; gamma=PerceptibleReciprocal(normalize); for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) kernel_info->values[i]*=gamma; sharp_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(sharp_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S p r e a d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SpreadImage() is a special effects method that randomly displaces each % pixel in a square area defined by the radius parameter. % % The format of the SpreadImage method is: % % Image *SpreadImage(const Image *image, % const PixelInterpolateMethod method,const double radius, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o method: intepolation method. % % o radius: choose a random pixel in a neighborhood of this extent. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SpreadImage(const Image *image, const PixelInterpolateMethod method,const double radius, ExceptionInfo *exception) { #define SpreadImageTag "Spread/Image" CacheView *image_view, *spread_view; Image *spread_image; MagickBooleanType status; MagickOffsetType progress; RandomInfo **magick_restrict random_info; size_t width; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif /* Initialize spread image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); spread_image=CloneImage(image,0,0,MagickTrue,exception); if (spread_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(spread_image,DirectClass,exception) == MagickFalse) { spread_image=DestroyImage(spread_image); return((Image *) NULL); } /* Spread image. */ status=MagickTrue; progress=0; width=GetOptimalKernelWidth1D(radius,0.5); random_info=AcquireRandomInfoThreadSet(); image_view=AcquireVirtualCacheView(image,exception); spread_view=AcquireAuthenticCacheView(spread_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,spread_image,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(spread_view,0,y,spread_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { PointInfo point; point.x=GetPseudoRandomValue(random_info[id]); point.y=GetPseudoRandomValue(random_info[id]); status=InterpolatePixelChannels(image,image_view,spread_image,method, (double) x+width*(point.x-0.5),(double) y+width*(point.y-0.5),q, exception); if (status == MagickFalse) break; q+=GetPixelChannels(spread_image); } if (SyncCacheViewAuthenticPixels(spread_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SpreadImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } spread_view=DestroyCacheView(spread_view); image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); if (status == MagickFalse) spread_image=DestroyImage(spread_image); return(spread_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n s h a r p M a s k I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnsharpMaskImage() sharpens one or more image channels. We convolve the % image with a Gaussian operator of the given radius and standard deviation % (sigma). For reasonable results, radius should be larger than sigma. Use a % radius of 0 and UnsharpMaskImage() selects a suitable radius for you. % % The format of the UnsharpMaskImage method is: % % Image *UnsharpMaskImage(const Image *image,const double radius, % const double sigma,const double amount,const double threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o gain: the percentage of the difference between the original and the % blur image that is added back into the original. % % o threshold: the threshold in pixels needed to apply the diffence gain. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *UnsharpMaskImage(const Image *image,const double radius, const double sigma,const double gain,const double threshold, ExceptionInfo *exception) { #define SharpenImageTag "Sharpen/Image" CacheView *image_view, *unsharp_view; Image *unsharp_image; MagickBooleanType status; MagickOffsetType progress; double quantum_threshold; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); /* This kernel appears to be broken. #if defined(MAGICKCORE_OPENCL_SUPPORT) unsharp_image=AccelerateUnsharpMaskImage(image,radius,sigma,gain,threshold, exception); if (unsharp_image != (Image *) NULL) return(unsharp_image); #endif */ unsharp_image=BlurImage(image,radius,sigma,exception); if (unsharp_image == (Image *) NULL) return((Image *) NULL); quantum_threshold=(double) QuantumRange*threshold; /* Unsharp-mask image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); unsharp_view=AcquireAuthenticCacheView(unsharp_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,unsharp_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(unsharp_view,0,y,unsharp_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double pixel; PixelChannel channel; PixelTrait traits, unsharp_traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); unsharp_traits=GetPixelChannelTraits(unsharp_image,channel); if ((traits == UndefinedPixelTrait) || (unsharp_traits == UndefinedPixelTrait)) continue; if ((unsharp_traits & CopyPixelTrait) != 0) { SetPixelChannel(unsharp_image,channel,p[i],q); continue; } pixel=p[i]-(double) GetPixelChannel(unsharp_image,channel,q); if (fabs(2.0*pixel) < quantum_threshold) pixel=(double) p[i]; else pixel=(double) p[i]+gain*pixel; SetPixelChannel(unsharp_image,channel,ClampToQuantum(pixel),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(unsharp_image); } if (SyncCacheViewAuthenticPixels(unsharp_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SharpenImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } unsharp_image->type=image->type; unsharp_view=DestroyCacheView(unsharp_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) unsharp_image=DestroyImage(unsharp_image); return(unsharp_image); }
zboxloop.c
/****************************************************************************** * Copyright (c) 1998 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ #include <stdlib.h> #include <stdio.h> #include <math.h> #include "_hypre_utilities.h" #include "HYPRE_struct_ls.h" #include "HYPRE_krylov.h" #include "_hypre_struct_mv.h" #include "_hypre_struct_mv.hpp" /*-------------------------------------------------------------------------- * Test driver to time new boxloops and compare to the old ones *--------------------------------------------------------------------------*/ hypre_int main( hypre_int argc, char *argv[] ) { HYPRE_Int arg_index; HYPRE_Int print_usage; HYPRE_Int nx, ny, nz; HYPRE_Int P, Q, R; HYPRE_Int time_index; HYPRE_Int num_procs, myid; HYPRE_Int dim; HYPRE_Int rep, reps, fail, sum; HYPRE_Int size; hypre_Box *x1_data_box, *x2_data_box, *x3_data_box, *x4_data_box; //HYPRE_Int xi1, xi2, xi3, xi4; HYPRE_Int xi1; HYPRE_Real *xp1, *xp2, *xp3, *xp4; HYPRE_Real *d_xp1, *d_xp2, *d_xp3, *d_xp4; hypre_Index loop_size, start, unit_stride, index; /*----------------------------------------------------------- * Initialize some stuff *-----------------------------------------------------------*/ /* Initialize MPI */ hypre_MPI_Init(&argc, &argv); hypre_MPI_Comm_size(hypre_MPI_COMM_WORLD, &num_procs ); hypre_MPI_Comm_rank(hypre_MPI_COMM_WORLD, &myid ); HYPRE_Init(); #if defined(HYPRE_USING_KOKKOS) Kokkos::initialize (argc, argv); #endif /*----------------------------------------------------------- * Set defaults *-----------------------------------------------------------*/ dim = 3; nx = 10; ny = 10; nz = 10; P = num_procs; Q = 1; R = 1; reps = -1; /*----------------------------------------------------------- * Parse command line *-----------------------------------------------------------*/ print_usage = 0; arg_index = 1; while (arg_index < argc) { if ( strcmp(argv[arg_index], "-n") == 0 ) { arg_index++; nx = atoi(argv[arg_index++]); ny = atoi(argv[arg_index++]); nz = atoi(argv[arg_index++]); } else if ( strcmp(argv[arg_index], "-P") == 0 ) { arg_index++; P = atoi(argv[arg_index++]); Q = atoi(argv[arg_index++]); R = atoi(argv[arg_index++]); } else if ( strcmp(argv[arg_index], "-d") == 0 ) { arg_index++; dim = atoi(argv[arg_index++]); } else if ( strcmp(argv[arg_index], "-reps") == 0 ) { arg_index++; reps = atoi(argv[arg_index++]); } else if ( strcmp(argv[arg_index], "-help") == 0 ) { print_usage = 1; break; } else { arg_index++; } } /*----------------------------------------------------------- * Print usage info *-----------------------------------------------------------*/ if ( (print_usage) && (myid == 0) ) { hypre_printf("\n"); hypre_printf("Usage: %s [<options>]\n", argv[0]); hypre_printf("\n"); hypre_printf(" -n <nx> <ny> <nz> : problem size per block\n"); hypre_printf(" -P <Px> <Py> <Pz> : processor topology\n"); hypre_printf(" -d <dim> : problem dimension (2 or 3)\n"); hypre_printf("\n"); } if ( print_usage ) { exit(1); } /*----------------------------------------------------------- * Check a few things *-----------------------------------------------------------*/ if ((P * Q * R) > num_procs) { if (myid == 0) { hypre_printf("Error: PxQxR is more than the number of processors\n"); } exit(1); } else if ((P * Q * R) < num_procs) { if (myid == 0) { hypre_printf("Warning: PxQxR is less than the number of processors\n"); } } /*----------------------------------------------------------- * Initialize some stuff *-----------------------------------------------------------*/ hypre_SetIndex3(start, 1, 1, 1); hypre_SetIndex3(loop_size, nx, ny, nz); hypre_SetIndex3(unit_stride, 1, 1, 1); x1_data_box = hypre_BoxCreate(dim); x2_data_box = hypre_BoxCreate(dim); x3_data_box = hypre_BoxCreate(dim); x4_data_box = hypre_BoxCreate(dim); hypre_SetIndex3(hypre_BoxIMin(x1_data_box), 0, 0, 0); hypre_SetIndex3(hypre_BoxIMax(x1_data_box), nx + 1, ny + 1, nz + 1); hypre_CopyBox(x1_data_box, x2_data_box); hypre_CopyBox(x1_data_box, x3_data_box); hypre_CopyBox(x1_data_box, x4_data_box); size = (nx + 2) * (ny + 2) * (nz + 2); xp1 = hypre_CTAlloc(HYPRE_Real, size, HYPRE_MEMORY_HOST); xp2 = hypre_CTAlloc(HYPRE_Real, size, HYPRE_MEMORY_HOST); xp3 = hypre_CTAlloc(HYPRE_Real, size, HYPRE_MEMORY_HOST); xp4 = hypre_CTAlloc(HYPRE_Real, size, HYPRE_MEMORY_HOST); d_xp1 = hypre_CTAlloc(HYPRE_Real, size, HYPRE_MEMORY_DEVICE); d_xp2 = hypre_CTAlloc(HYPRE_Real, size, HYPRE_MEMORY_DEVICE); d_xp3 = hypre_CTAlloc(HYPRE_Real, size, HYPRE_MEMORY_DEVICE); d_xp4 = hypre_CTAlloc(HYPRE_Real, size, HYPRE_MEMORY_DEVICE); if (reps < 0) { reps = 1000000000 / (nx * ny * nz + 1000); } /*----------------------------------------------------------- * Print driver parameters *-----------------------------------------------------------*/ if (myid == 0) { hypre_printf("Running with these driver parameters:\n"); hypre_printf(" (nx, ny, nz) = (%d, %d, %d)\n", nx, ny, nz); hypre_printf(" (Px, Py, Pz) = (%d, %d, %d)\n", P, Q, R); hypre_printf(" dim = %d\n", dim); hypre_printf(" reps = %d\n", reps); } /*----------------------------------------------------------- * Check new boxloops *-----------------------------------------------------------*/ /* xp1 is already initialized to 0 */ zypre_BoxLoop1Begin(dim, loop_size, x1_data_box, start, unit_stride, xi1); zypre_BoxLoop1For(xi1) { xp1[xi1] ++; } zypre_BoxLoop1End(xi1); /* Use old boxloop to check that values are set to 1 */ fail = 0; sum = 0; hypre_SerialBoxLoop1Begin(3, loop_size, x1_data_box, start, unit_stride, xi1); { sum += xp1[xi1]; if (xp1[xi1] != 1) { zypre_BoxLoopGetIndex(index); hypre_printf("*(%d,%d,%d) = %d\n", index[0], index[1], index[2], (HYPRE_Int) xp1[xi1]); fail = 1; } } hypre_SerialBoxLoop1End(xi1); if (sum != (nx * ny * nz)) { hypre_printf("*sum = %d\n", sum); fail = 1; } if (fail) { exit(1); } /*----------------------------------------------------------- * Synchronize so that timings make sense *-----------------------------------------------------------*/ hypre_MPI_Barrier(hypre_MPI_COMM_WORLD); /*----------------------------------------------------------- * Time old boxloops [Device] *-----------------------------------------------------------*/ /* Time BoxLoop0 */ time_index = hypre_InitializeTiming("BoxLoop0"); hypre_BeginTiming(time_index); for (rep = 0; rep < reps; rep++) { xi1 = 0; #define DEVICE_VAR is_device_ptr(d_xp1) hypre_BoxLoop0Begin(3, loop_size); { d_xp1[xi1] += d_xp1[xi1]; //xi1++; } hypre_BoxLoop0End(); #undef DEVICE_VAR } hypre_EndTiming(time_index); /* Time BoxLoop1 */ time_index = hypre_InitializeTiming("BoxLoop1"); hypre_BeginTiming(time_index); for (rep = 0; rep < reps; rep++) { #define DEVICE_VAR is_device_ptr(d_xp1) hypre_BoxLoop1Begin(3, loop_size, x1_data_box, start, unit_stride, xi1); { d_xp1[xi1] += d_xp1[xi1]; } hypre_BoxLoop1End(xi1); #undef DEVICE_VAR } hypre_EndTiming(time_index); /* Time BoxLoop2 */ time_index = hypre_InitializeTiming("BoxLoop2"); hypre_BeginTiming(time_index); for (rep = 0; rep < reps; rep++) { #define DEVICE_VAR is_device_ptr(d_xp1,d_xp2) hypre_BoxLoop2Begin(3, loop_size, x1_data_box, start, unit_stride, xi1, x2_data_box, start, unit_stride, xi2); { d_xp1[xi1] += d_xp1[xi1] + d_xp2[xi2]; } hypre_BoxLoop2End(xi1, xi2); #undef DEVICE_VAR } hypre_EndTiming(time_index); /* Time BoxLoop3 */ time_index = hypre_InitializeTiming("BoxLoop3"); hypre_BeginTiming(time_index); for (rep = 0; rep < reps; rep++) { #define DEVICE_VAR is_device_ptr(d_xp1,d_xp2,d_xp3) hypre_BoxLoop3Begin(3, loop_size, x1_data_box, start, unit_stride, xi1, x2_data_box, start, unit_stride, xi2, x3_data_box, start, unit_stride, xi3); { d_xp1[xi1] += d_xp1[xi1] + d_xp2[xi2] + d_xp3[xi3]; } hypre_BoxLoop3End(xi1, xi2, xi3); #undef DEVICE_VAR } hypre_EndTiming(time_index); /* Time BoxLoop4 */ time_index = hypre_InitializeTiming("BoxLoop4"); hypre_BeginTiming(time_index); for (rep = 0; rep < reps; rep++) { #define DEVICE_VAR is_device_ptr(d_xp1,d_xp2,d_xp3,d_xp4) hypre_BoxLoop4Begin(3, loop_size, x1_data_box, start, unit_stride, xi1, x2_data_box, start, unit_stride, xi2, x3_data_box, start, unit_stride, xi3, x4_data_box, start, unit_stride, xi4); { d_xp1[xi1] += d_xp1[xi1] + d_xp2[xi2] + d_xp3[xi3] + d_xp4[xi4]; } hypre_BoxLoop4End(xi1, xi2, xi3, xi4); #undef DEVICE_VAR } hypre_EndTiming(time_index); hypre_PrintTiming("Old BoxLoop times [DEVICE]", hypre_MPI_COMM_WORLD); hypre_FinalizeAllTimings(); hypre_ClearTiming(); /*----------------------------------------------------------- * Time new boxloops [Host] *-----------------------------------------------------------*/ /* Time BoxLoop0 */ time_index = hypre_InitializeTiming("BoxLoop0"); hypre_BeginTiming(time_index); for (rep = 0; rep < reps; rep++) { xi1 = 0; zypre_BoxLoop0Begin(dim, loop_size); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ZYPRE_BOX_PRIVATE) firstprivate(xi1) HYPRE_SMP_SCHEDULE #endif zypre_BoxLoop0For() { xp1[xi1] += xp1[xi1]; xi1++; } zypre_BoxLoop0End(); } hypre_EndTiming(time_index); /* Time BoxLoop1 */ time_index = hypre_InitializeTiming("BoxLoop1"); hypre_BeginTiming(time_index); for (rep = 0; rep < reps; rep++) { zypre_BoxLoop1Begin(dim, loop_size, x1_data_box, start, unit_stride, xi1); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ZYPRE_BOX_PRIVATE) HYPRE_SMP_SCHEDULE #endif zypre_BoxLoop1For(xi1) { xp1[xi1] += xp1[xi1]; } zypre_BoxLoop1End(xi1); } hypre_EndTiming(time_index); /* Time BoxLoop2 */ time_index = hypre_InitializeTiming("BoxLoop2"); hypre_BeginTiming(time_index); for (rep = 0; rep < reps; rep++) { zypre_BoxLoop2Begin(dim, loop_size, x1_data_box, start, unit_stride, xi1, x2_data_box, start, unit_stride, xi2); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ZYPRE_BOX_PRIVATE) HYPRE_SMP_SCHEDULE #endif zypre_BoxLoop2For(xi1, xi2) { xp1[xi1] += xp1[xi1] + xp2[xi2]; } zypre_BoxLoop2End(xi1, xi2); } hypre_EndTiming(time_index); /* Time BoxLoop3 */ time_index = hypre_InitializeTiming("BoxLoop3"); hypre_BeginTiming(time_index); for (rep = 0; rep < reps; rep++) { zypre_BoxLoop3Begin(dim, loop_size, x1_data_box, start, unit_stride, xi1, x2_data_box, start, unit_stride, xi2, x3_data_box, start, unit_stride, xi3); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ZYPRE_BOX_PRIVATE) HYPRE_SMP_SCHEDULE #endif zypre_BoxLoop3For(xi1, xi2, xi3) { xp1[xi1] += xp1[xi1] + xp2[xi2] + xp3[xi3]; } zypre_BoxLoop3End(xi1, xi2, xi3); } hypre_EndTiming(time_index); /* Time BoxLoop4 */ time_index = hypre_InitializeTiming("BoxLoop4"); hypre_BeginTiming(time_index); for (rep = 0; rep < reps; rep++) { zypre_BoxLoop4Begin(dim, loop_size, x1_data_box, start, unit_stride, xi1, x2_data_box, start, unit_stride, xi2, x3_data_box, start, unit_stride, xi3, x4_data_box, start, unit_stride, xi4); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ZYPRE_BOX_PRIVATE) HYPRE_SMP_SCHEDULE #endif zypre_BoxLoop4For(xi1, xi2, xi3, xi4) { xp1[xi1] += xp1[xi1] + xp2[xi2] + xp3[xi3] + xp4[xi4]; } zypre_BoxLoop4End(xi1, xi2, xi3, xi4); } hypre_EndTiming(time_index); hypre_PrintTiming("New BoxLoop times [HOST]", hypre_MPI_COMM_WORLD); hypre_FinalizeAllTimings(); hypre_ClearTiming(); /*----------------------------------------------------------- * Reduction Loops *-----------------------------------------------------------*/ { HYPRE_Int i; for (i = 0; i < size; i++) { xp1[i] = cos(i + 1.0); xp2[i] = sin(i + 2.0); } hypre_TMemcpy(d_xp1, xp1, HYPRE_Real, size, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); hypre_TMemcpy(d_xp2, xp2, HYPRE_Real, size, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); } #if defined(HYPRE_USING_KOKKOS) HYPRE_Real reducer = 0.0; #elif defined(HYPRE_USING_RAJA) ReduceSum<hypre_raja_reduce_policy, HYPRE_Real> reducer(0.0); #elif defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) ReduceSum<HYPRE_Real> reducer(0.0); #else HYPRE_Real reducer = 0.0; #endif HYPRE_Real box_sum1 = 0.0, box_sum2 = 0.0; #undef HYPRE_BOX_REDUCTION #if defined(HYPRE_USING_DEVICE_OPENMP) #define HYPRE_BOX_REDUCTION map(tofrom:reducer) reduction(+:reducer) #else #define HYPRE_BOX_REDUCTION reduction(+:reducer) #endif /*----------------------------------------------------------- * Time old boxloops [Device] *-----------------------------------------------------------*/ /* Time BoxLoop1Reduction */ time_index = hypre_InitializeTiming("BoxLoopReduction1"); hypre_BeginTiming(time_index); for (rep = 0; rep < reps; rep++) { reducer = 0.0; #define DEVICE_VAR is_device_ptr(d_xp1) hypre_BoxLoop1ReductionBegin(3, loop_size, x1_data_box, start, unit_stride, xi1, reducer); { reducer += 1.0 / d_xp1[xi1]; } hypre_BoxLoop1ReductionEnd(xi1, reducer); #undef DEVICE_VAR box_sum1 += (HYPRE_Real) reducer; } hypre_EndTiming(time_index); /* Time BoxLoop2Reduction */ time_index = hypre_InitializeTiming("BoxLoopReduction2"); hypre_BeginTiming(time_index); for (rep = 0; rep < reps; rep++) { reducer = 0.0; #define DEVICE_VAR is_device_ptr(d_xp1,d_xp2) hypre_BoxLoop2ReductionBegin(3, loop_size, x1_data_box, start, unit_stride, xi1, x2_data_box, start, unit_stride, xi2, reducer); { reducer += 1.0 / d_xp1[xi1] + d_xp2[xi2] * 3.1415926; } hypre_BoxLoop2ReductionEnd(xi1, xi2, reducer); #undef DEVICE_VAR box_sum2 += (HYPRE_Real) reducer; } hypre_EndTiming(time_index); hypre_PrintTiming("New BoxLoopReduction times [DEVICE]", hypre_MPI_COMM_WORLD); hypre_FinalizeAllTimings(); hypre_ClearTiming(); /*----------------------------------------------------------- * Time new boxloops [Host] *-----------------------------------------------------------*/ HYPRE_Real zbox_sum1 = 0.0, zbox_sum2 = 0.0; /* Time BoxLoop1 */ time_index = hypre_InitializeTiming("BoxLoopReduction1"); hypre_BeginTiming(time_index); for (rep = 0; rep < reps; rep++) { zypre_BoxLoop1Begin(dim, loop_size, x1_data_box, start, unit_stride, xi1); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ZYPRE_BOX_PRIVATE) HYPRE_SMP_SCHEDULE reduction(+:zbox_sum1) #endif zypre_BoxLoop1For(xi1) { zbox_sum1 += 1.0 / xp1[xi1]; } zypre_BoxLoop1End(xi1); } hypre_EndTiming(time_index); /* Time BoxLoop2 */ time_index = hypre_InitializeTiming("BoxLoopReduction2"); hypre_BeginTiming(time_index); for (rep = 0; rep < reps; rep++) { zypre_BoxLoop2Begin(dim, loop_size, x1_data_box, start, unit_stride, xi1, x2_data_box, start, unit_stride, xi2); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(ZYPRE_BOX_PRIVATE) HYPRE_SMP_SCHEDULE reduction(+:zbox_sum2) #endif zypre_BoxLoop2For(xi1, xi2) { zbox_sum2 += 1.0 / xp1[xi1] + xp2[xi2] * 3.1415926; } zypre_BoxLoop2End(xi1, xi2); } hypre_EndTiming(time_index); hypre_PrintTiming("New BoxLoopReduction times [HOST]", hypre_MPI_COMM_WORLD); hypre_FinalizeAllTimings(); hypre_ClearTiming(); hypre_printf("BoxLoopReduction1, error %e\n", hypre_abs((zbox_sum1 - box_sum1) / zbox_sum1)); hypre_printf("BoxLoopReduction2, error %e\n", hypre_abs((zbox_sum2 - box_sum2) / zbox_sum2)); /*----------------------------------------------------------- * Finalize things *-----------------------------------------------------------*/ hypre_BoxDestroy(x1_data_box); hypre_BoxDestroy(x2_data_box); hypre_BoxDestroy(x3_data_box); hypre_BoxDestroy(x4_data_box); hypre_TFree(xp1, HYPRE_MEMORY_HOST); hypre_TFree(xp2, HYPRE_MEMORY_HOST); hypre_TFree(xp3, HYPRE_MEMORY_HOST); hypre_TFree(xp4, HYPRE_MEMORY_HOST); hypre_TFree(d_xp1, HYPRE_MEMORY_DEVICE); hypre_TFree(d_xp2, HYPRE_MEMORY_DEVICE); hypre_TFree(d_xp3, HYPRE_MEMORY_DEVICE); hypre_TFree(d_xp4, HYPRE_MEMORY_DEVICE); #if defined(HYPRE_USING_KOKKOS) Kokkos::finalize (); #endif HYPRE_Finalize(); /* Finalize MPI */ hypre_MPI_Finalize(); return (0); }
bml_import_ellsort_typed.c
#include "../../macros.h" #include "../../typed.h" #include "../bml_allocate.h" #include "../bml_logger.h" #include "../bml_types.h" #include "bml_allocate_ellsort.h" #include "bml_import_ellsort.h" #include "bml_types_ellsort.h" #include <complex.h> #include <math.h> #include <stdlib.h> #include <string.h> #ifdef _OPENMP #include <omp.h> #endif /** Convert a dense matrix into a bml matrix. * * \ingroup convert_group * * \param N The number of rows/columns * \param matrix_precision The real precision * \param A The dense matrix * \return The bml matrix */ bml_matrix_ellsort_t *TYPED_FUNC( bml_import_from_dense_ellsort) ( bml_dense_order_t order, int N, void *A, double threshold, int M, bml_distribution_mode_t distrib_mode) { bml_matrix_ellsort_t *A_bml = TYPED_FUNC(bml_zero_matrix_ellsort) (N, M, distrib_mode); int *A_index = A_bml->index; int *A_nnz = A_bml->nnz; REAL_T *dense_A = (REAL_T *) A; REAL_T *A_value = A_bml->value; #pragma omp parallel for shared(A_value, A_index, A_nnz, dense_A) for (int i = 0; i < N; i++) { A_nnz[i] = 0; for (int j = 0; j < N; j++) { REAL_T A_ij; switch (order) { case dense_row_major: A_ij = dense_A[ROWMAJOR(i, j, N, N)]; break; case dense_column_major: A_ij = dense_A[COLMAJOR(i, j, N, N)]; break; default: LOG_ERROR("unknown order\n"); break; } if (is_above_threshold(A_ij, threshold)) { A_value[ROWMAJOR(i, A_nnz[i], N, M)] = A_ij; A_index[ROWMAJOR(i, A_nnz[i], N, M)] = j; A_nnz[i]++; } } } return A_bml; }
GB_binop__eq_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__eq_uint32) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__eq_uint32) // A.*B function (eWiseMult): GB (_AemultB_03__eq_uint32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__eq_uint32) // A*D function (colscale): GB (_AxD__eq_uint32) // D*A function (rowscale): GB (_DxB__eq_uint32) // C+=B function (dense accum): GB (_Cdense_accumB__eq_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__eq_uint32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__eq_uint32) // C=scalar+B GB (_bind1st__eq_uint32) // C=scalar+B' GB (_bind1st_tran__eq_uint32) // C=A+scalar GB (_bind2nd__eq_uint32) // C=A'+scalar GB (_bind2nd_tran__eq_uint32) // C type: bool // A type: uint32_t // B,b type: uint32_t // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x == y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EQ || GxB_NO_UINT32 || GxB_NO_EQ_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__eq_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__eq_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__eq_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__eq_uint32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__eq_uint32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__eq_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__eq_uint32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__eq_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__eq_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__eq_uint32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__eq_uint32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = Bx [p] ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__eq_uint32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = Ax [p] ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB (_bind1st_tran__eq_uint32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB (_bind2nd_tran__eq_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__le_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__le_int16) // A.*B function (eWiseMult): GB (_AemultB_08__le_int16) // A.*B function (eWiseMult): GB (_AemultB_02__le_int16) // A.*B function (eWiseMult): GB (_AemultB_04__le_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__le_int16) // A*D function (colscale): GB (_AxD__le_int16) // D*A function (rowscale): GB (_DxB__le_int16) // C+=B function (dense accum): GB (_Cdense_accumB__le_int16) // C+=b function (dense accum): GB (_Cdense_accumb__le_int16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__le_int16) // C=scalar+B GB (_bind1st__le_int16) // C=scalar+B' GB (_bind1st_tran__le_int16) // C=A+scalar GB (_bind2nd__le_int16) // C=A'+scalar GB (_bind2nd_tran__le_int16) // C type: bool // A type: int16_t // A pattern? 0 // B type: int16_t // B pattern? 0 // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x <= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LE || GxB_NO_INT16 || GxB_NO_LE_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__le_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__le_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__le_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__le_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__le_int16) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__le_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int16_t alpha_scalar ; int16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int16_t *) alpha_scalar_in)) ; beta_scalar = (*((int16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__le_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__le_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__le_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__le_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__le_int16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = GBX (Bx, p, false) ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__le_int16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = GBX (Ax, p, false) ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB (_bind1st_tran__le_int16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB (_bind2nd_tran__le_int16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
PoW.c
// Copyright (c) 2016-2018 Ulord Foundation Ltd. #include "PoW.h" #include <stdio.h> #include <stdint.h> #include <string.h> #include <stdlib.h> #include <assert.h> #ifndef MAC_OSX #include <omp.h> #endif #include "my_time.h" #include "common.h" #include "my_rand48_r.h" #include "oneWayFunction.h" // #define SSE_VERSION /* * Step 1: Initialize working memory. */ void initWorkMemory(uint8_t *input, uint32_t inputLen, uint8_t *Maddr, const uint32_t K) { uint32_t i, j; uint8_t a[OUTPUT_LEN], b[OUTPUT_LEN]; funcInfor[0].func(input, inputLen, a); uint64_t randSeed[4] = {0, 0, 0, 0}; #ifndef SSE_VERSION struct my_rand48_data randBuffer[4]; #else struct vrand48_data randBuffer[2]; #endif const uint32_t iterNum = WORK_MEMORY_SIZE >> 5; for (i = 0; i < iterNum; ++i) { if (i % K) { #ifndef SSE_VERSION uint64_t num = 0; for (j = 0; j < 4; ++j) { my_rand64_r(&randBuffer[j], &num); memcpy(b + (j << 3), (uint8_t *)&num, 8*sizeof(uint8_t)); } #else vrand64(b, randBuffer); #endif uint8_t shift_num; uint8_t result[OUTPUT_LEN]; reduce_bit((uint8_t *)&i, 4, (uint8_t *)&shift_num, 8); rrs(b, OUTPUT_LEN, result, shift_num); memcpy(Maddr + (i << 5), result, OUTPUT_LEN*sizeof(uint8_t)); for (j = 0; j < 32; ++j) { a[j] ^= result[j]; } } else { uint8_t t = 0, shift_num = 0; reduce_bit(a, 32, (uint8_t *)&t, 8); t = (t & 0x0f) ^ (t >> 4); reduce_bit((uint8_t *)&i, 4, (uint8_t *)&shift_num, 8); uint8_t a_rrs[INPUT_LEN]; rrs(a, OUTPUT_LEN, a_rrs, shift_num); funcInfor[t].func(a_rrs, 32, a); reduce_bit(a, 8, (uint8_t *)&randSeed[0], 48); reduce_bit(a + 8, 8, (uint8_t *)&randSeed[1], 48); reduce_bit(a + 16, 8, (uint8_t *)&randSeed[2], 48); reduce_bit(a + 24, 8, (uint8_t *)&randSeed[3], 48); #ifndef SSE_VERSION my_seed48_r(randSeed[0], &randBuffer[0]); my_seed48_r(randSeed[1], &randBuffer[1]); my_seed48_r(randSeed[2], &randBuffer[2]); my_seed48_r(randSeed[3], &randBuffer[3]); #else vseed48(randSeed , &randBuffer[0]); vseed48(randSeed + 2, &randBuffer[1]); #endif memcpy(Maddr + (i << 5), a, 32*sizeof(uint8_t)); } } } /* * Step 2: Modify the working memory contents. */ void modifyWorkMemory(uint8_t *Maddr, const uint32_t L, const uint32_t C, uint8_t *result) { uint32_t i, j; uint8_t a[OUTPUT_LEN], b[64]; funcInfor[0].func(Maddr + WORK_MEMORY_SIZE - 32, 32, a); memcpy(result, a, OUTPUT_LEN*sizeof(uint8_t)); uint64_t r = 0; reduce_bit(a, 32, (uint8_t *)&r, 64); const uint32_t iterNum = L << 6; for (i = 0; i < C; ++i) { uint64_t randSeed = 0; reduce_bit(a, 32, (uint8_t *)&randSeed, 48); struct my_rand48_data randBuffer; my_seed48_r(randSeed, &randBuffer); uint8_t t1, t2, s; uint64_t randNum = 0, base = 0; for (j = 0; j < iterNum; ++j) { my_rand48_r(&randBuffer, &randNum); base = randNum + r; uint64_t offset = 0; reduce_bit((uint8_t *)&r, 8, (uint8_t *)&offset, 8); offset = (offset << 8) + 1; uint64_t addr1 = (base + WORK_MEMORY_SIZE - offset) % WORK_MEMORY_SIZE; uint64_t addr2 = (base + offset) % WORK_MEMORY_SIZE; t1 = Maddr[addr1]; t2 = Maddr[addr2]; s = a[j & 0x1f]; Maddr[addr1] = t2 ^ s; Maddr[addr2] = t1 ^ s; b[j & 0x3f] = t1 ^ t2; r = r + s + t1 + t2; } uint8_t t = 0; reduce_bit((uint8_t *)&r, 8, (uint8_t *)&t, 8); t = (t & 0x0f) ^ (t >> 4); reduce_bit(b, 64, a, 256); uint8_t shift_num = 0; uint64_t ir = r + i; reduce_bit((uint8_t *)&ir, 8, (uint8_t *)&shift_num, 8); uint8_t a_rrs[INPUT_LEN]; rrs(a, OUTPUT_LEN, a_rrs, shift_num); funcInfor[t].func(a_rrs, 32, a); for (j = 0; j < OUTPUT_LEN; ++j) { result[j] ^= a[j]; } } } /* * Step 3: Calculate the final result. */ void calculateFinalResult(uint8_t *Maddr, uint8_t *c, const uint32_t D, uint8_t *result) { uint32_t i = 0, j = 0, k = 0; memcpy(result, c, OUTPUT_LEN*sizeof(uint8_t)); const uint32_t num = (WORK_MEMORY_SIZE >> 5) - 1; uint32_t it = 0; uint8_t result_rrs[OUTPUT_LEN]; while(1) { uint8_t t = 0, shift_num = 0; uint32_t d = 0; reduce_bit(result, 32, (uint8_t *)&t, 8); t = (t & 0x0f) ^ (t >> 4); reduce_bit(result, 32, (uint8_t *)&d, D); ++d; for (j = 0; j < d; ++j) { uint32_t index = i << 5; for (k = 0; k < 32; ++k) { result[k] ^= Maddr[index + k]; } ++i; if (i == num) { it = i + t; reduce_bit((uint8_t *)&it, 4, (uint8_t *)&shift_num, 8); rrs(result, OUTPUT_LEN, result_rrs, shift_num); funcInfor[0].func(result_rrs, 32, result); return; } } it = t + i; reduce_bit((uint8_t *)&it, 4, (uint8_t *)&shift_num, 8); rrs(result, OUTPUT_LEN, result_rrs, shift_num); funcInfor[t].func(result_rrs, 32, result); } } /* * Correctness & Performance test for Proof of work */ void testPowFunction(uint8_t *mess, uint32_t messLen, const int64_t iterNum) { int64_t j; uint32_t inputLen = messLen; uint8_t input[INPUT_LEN], output[OUTPUT_LEN]; memset(input, 0, INPUT_LEN*sizeof(uint8_t)); memcpy(input, mess, messLen*sizeof(char)); // Init all one-way function initOneWayFunction(); uint8_t *Maddr = (uint8_t *)malloc(64 * WORK_MEMORY_SIZE*sizeof(uint8_t)); assert(NULL != Maddr); memset(Maddr, 0, 64 * WORK_MEMORY_SIZE*sizeof(uint8_t)); printf("****************************** Correctness test (PoW function) ******************************\n"); printf("Test message: %s\n", mess); powFunction(input, inputLen, Maddr, output); view_data_u8("PoW", output, OUTPUT_LEN); printf("*********************************************************************************************\n"); /* printf("*************************************************** Performance test (PoW function) ***************************************************\n"); uint8_t *result = (uint8_t *)malloc(iterNum * OUTPUT_LEN * sizeof(uint8_t)); assert(NULL != result); memset(result, 0, iterNum * OUTPUT_LEN * sizeof(uint8_t)); uint32_t threadNumArr[] = {1, 4, 8, 12, 16, 20, 24, 32, 48, 64}; uint32_t threadNumTypes = sizeof(threadNumArr) / sizeof(uint32_t); printf(" %-18s", "Algorithm"); for (uint32_t ix = 0; ix < threadNumTypes; ++ix) printf("%12d", threadNumArr[ix]); printf("\n"); printf("00 %-18s\t", "PoW"); for (uint32_t ix = 0; ix < threadNumTypes; ++ix) { omp_set_num_threads(threadNumArr[ix]); double startTime = get_wall_time(); if (threadNumArr[ix] == 1) { for (j = 0; j < iterNum; ++j) { powFunction(input, inputLen, Maddr, result + j * OUTPUT_LEN); } } else { #pragma omp parallel for firstprivate(input), private(j) shared(result) for (j = 0; j < iterNum; ++j) { powFunction(input, inputLen, Maddr + omp_get_thread_num() * WORK_MEMORY_SIZE, result + j * OUTPUT_LEN); } } double endTime = get_wall_time(); double costTime = endTime - startTime; printf("%5.0f bps ", iterNum / costTime); fflush(stdout); // Check result for (j = 0; j < iterNum; j += 1) { if (memcmp(output, result + j * OUTPUT_LEN, OUTPUT_LEN)) { printf("Thread num: %d, j: %ld\n", threadNumArr[ix], j); view_data_u8("output", output, OUTPUT_LEN); view_data_u8("result", result + j * OUTPUT_LEN, OUTPUT_LEN); abort(); } } } printf("\n"); printf("***************************************************************************************************************************************\n"); if (NULL != result) { free(result); result = NULL; } */ if (NULL != Maddr) { free(Maddr); Maddr = NULL; } } #define OUTPUT_BUFFER_SIZE (32 * 1024UL * 1024UL) #define MAX_TEST_INPUT_LEN 140 #define MAX_OUT_FILE_NAME_LEN 25 const char testInputCase[][MAX_TEST_INPUT_LEN] = { "", "HelloWorld", "0123456789" }; void powNistTest(const char *outFileName) { const uint64_t iterNum = 1024UL * 1024UL; // const uint64_t iterNum = 1024UL; uint8_t *outputBuffer = (uint8_t *)malloc(OUTPUT_BUFFER_SIZE * sizeof(uint8_t)); assert(NULL != outputBuffer); memset(outputBuffer, 0, OUTPUT_BUFFER_SIZE * sizeof(uint8_t)); uint8_t *Maddr = (uint8_t *)malloc(WORK_MEMORY_SIZE*sizeof(uint8_t)); assert(NULL != Maddr); memset(Maddr, 0, WORK_MEMORY_SIZE*sizeof(uint8_t)); initOneWayFunction(); uint32_t testInputCaseNum = sizeof(testInputCase) / sizeof(const char [MAX_TEST_INPUT_LEN]); for (uint32_t testCaseIx = 0; testCaseIx < testInputCaseNum; ++testCaseIx) { char curOutFileName[MAX_OUT_FILE_NAME_LEN] = ""; sprintf(curOutFileName, "%s-%u.txt", outFileName, testCaseIx); FILE *fp = NULL; if (NULL != (fp = fopen(curOutFileName, "wb"))) { const uint32_t testInputCaseLen = strlen((char *)testInputCase[testCaseIx]); uint8_t input[MAX_TEST_INPUT_LEN]; memset(input, 0, MAX_TEST_INPUT_LEN*sizeof(uint8_t)); memcpy(input, testInputCase[testCaseIx], testInputCaseLen*sizeof(uint8_t)); double startTime = get_wall_time(); powFunction(input, testInputCaseLen, Maddr, outputBuffer); for (uint64_t i = 1, j = 0; i < iterNum; ++i) { memcpy(input, outputBuffer + j, OUTPUT_LEN * sizeof(uint32_t)); j += OUTPUT_LEN; powFunction(input, OUTPUT_LEN, Maddr, outputBuffer + j); /* if (j == OUTPUT_BUFFER_SIZE) { fwrite(outputBuffer, sizeof(uint8_t), OUTPUT_BUFFER_SIZE / sizeof(uint8_t), fp); j = 0; } */ } double endTime = get_wall_time(); double costTime = endTime - startTime; fprintf(stdout, "TestCaseIx: %d, Input: %s, IterNum: %llu, Time: %4.2f, Performance: %5.2f bps\n", testCaseIx, \ testInputCase[testCaseIx], iterNum, costTime, ((double)(iterNum * OUTPUT_LEN)) / costTime); fflush(stdout); fwrite(outputBuffer, sizeof(uint8_t), OUTPUT_BUFFER_SIZE / sizeof(uint8_t), fp); fclose(fp); } else { fprintf(stderr, "Error: Open %s failed!\n", curOutFileName); abort(); } } if (NULL != outputBuffer) { free(outputBuffer); outputBuffer = NULL; } if (NULL != Maddr) { free(Maddr); Maddr = NULL; } } void helloHash(const uint8_t *mess, uint32_t messLen, uint8_t output[OUTPUT_LEN]) { if(messLen != INPUT_LEN) { //won't get in printf("helloHash:Invalid message length %d\n", messLen); return; } int64_t j; uint32_t inputLen =messLen; uint8_t input[INPUT_LEN]; memset(input, 0, INPUT_LEN*sizeof(uint8_t)); memcpy(input, mess, inputLen*sizeof(char)); //operation: input uint8_t *Maddr = (uint8_t *)malloc(WORK_MEMORY_SIZE*sizeof(uint8_t)); //1024*1024*1 assert(NULL != Maddr); memset(Maddr, 0, WORK_MEMORY_SIZE*sizeof(uint8_t)); //printf("Test message: %s\n", mess); powFunction(input, inputLen,Maddr, output); //view_data_u8("PoW", output, OUTPUT_LEN); //output if (NULL != Maddr) { free(Maddr); Maddr = NULL; } } int my_rand64_r (struct my_rand48_data *buffer, uint64_t *result) { uint64_t X = buffer->__x; X = (X * buffer->__a + buffer->__c) & 0xffffffffffffULL; buffer->__x = X; buffer->__x = (X * buffer->__a + buffer->__c) & 0xffffffffffffULL; X ^= buffer->__x << 16; *result = X; return 0; } int my_seed48_r (uint64_t seedval, struct my_rand48_data *buffer) { buffer->__x = seedval & 0xffffffffffffULL; buffer->__a = 0x5deece66dULL; buffer->__c = 0xb; return 0; } void powFunction(uint8_t *input, uint32_t inputLen, uint8_t *Maddr, uint8_t *output) { uint8_t c[OUTPUT_LEN]; // Step 1: Initialize working memory. initWorkMemory(input, inputLen, Maddr, 128); // view_data_u8("Maddr", Maddr, OUTPUT_LEN); // Step 2: Modify the working memory contents. modifyWorkMemory(Maddr, 4, WORK_MEMORY_SIZE >> 11, c); // view_data_u8("c", c, OUTPUT_LEN); // Step 3: Calculate the final result. calculateFinalResult(Maddr, c, 8, output); // view_data_u8("output", output, OUTPUT_LEN); } int my_rand48_r (struct my_rand48_data *buffer, uint64_t *result) { *result = (buffer->__x * buffer->__a + buffer->__c) & 0xffffffffffffULL; buffer->__x = *result; return 0; }
GB_unaryop__minv_bool_fp64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_bool_fp64 // op(A') function: GB_tran__minv_bool_fp64 // C type: bool // A type: double // cast: ; // unaryop: cij = true #define GB_ATYPE \ double #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ ; #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = true ; // casting #define GB_CASTING(z, x) \ ; ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_BOOL || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_bool_fp64 ( bool *restrict Cx, const double *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_bool_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
convolution_pack4_bf16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convolution_transform_kernel_pack4_bf16s_neon(const Mat& weight_data, Mat& weight_data_bf16, int num_input, int num_output, int kernel_w, int kernel_h) { const int maxk = kernel_w * kernel_h; // src = kw-kh-inch-outch // dst = 4b-4a-kw-kh-inch/4a-outch/4b Mat weight_data_r2 = weight_data.reshape(maxk, num_input, num_output); weight_data_bf16.create(maxk, num_input / 4, num_output / 4, (size_t)2 * 16, 16); for (int q = 0; q + 3 < num_output; q += 4) { const Mat k0 = weight_data_r2.channel(q); const Mat k1 = weight_data_r2.channel(q + 1); const Mat k2 = weight_data_r2.channel(q + 2); const Mat k3 = weight_data_r2.channel(q + 3); Mat g0 = weight_data_bf16.channel(q / 4); for (int p = 0; p + 3 < num_input; p += 4) { const float* k00 = k0.row(p); const float* k01 = k0.row(p + 1); const float* k02 = k0.row(p + 2); const float* k03 = k0.row(p + 3); const float* k10 = k1.row(p); const float* k11 = k1.row(p + 1); const float* k12 = k1.row(p + 2); const float* k13 = k1.row(p + 3); const float* k20 = k2.row(p); const float* k21 = k2.row(p + 1); const float* k22 = k2.row(p + 2); const float* k23 = k2.row(p + 3); const float* k30 = k3.row(p); const float* k31 = k3.row(p + 1); const float* k32 = k3.row(p + 2); const float* k33 = k3.row(p + 3); unsigned short* g00 = g0.row<unsigned short>(p / 4); for (int k = 0; k < maxk; k++) { g00[0] = float32_to_bfloat16(k00[k]); g00[1] = float32_to_bfloat16(k10[k]); g00[2] = float32_to_bfloat16(k20[k]); g00[3] = float32_to_bfloat16(k30[k]); g00[4] = float32_to_bfloat16(k01[k]); g00[5] = float32_to_bfloat16(k11[k]); g00[6] = float32_to_bfloat16(k21[k]); g00[7] = float32_to_bfloat16(k31[k]); g00[8] = float32_to_bfloat16(k02[k]); g00[9] = float32_to_bfloat16(k12[k]); g00[10] = float32_to_bfloat16(k22[k]); g00[11] = float32_to_bfloat16(k32[k]); g00[12] = float32_to_bfloat16(k03[k]); g00[13] = float32_to_bfloat16(k13[k]); g00[14] = float32_to_bfloat16(k23[k]); g00[15] = float32_to_bfloat16(k33[k]); g00 += 16; } } } } static void convolution_pack4_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_bf16, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt) { int w = bottom_blob.w; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int maxk = kernel_w * kernel_h; // kernel offsets std::vector<int> _space_ofs(maxk); int* space_ofs = &_space_ofs[0]; { int p1 = 0; int p2 = 0; int gap = w * dilation_h - kernel_w * dilation_w; for (int i = 0; i < kernel_h; i++) { for (int j = 0; j < kernel_w; j++) { space_ofs[p1] = p2; p1++; p2 += dilation_w; } p2 += gap; } } const float* bias_data_ptr = bias_data; // num_output #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { unsigned short* outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { float32x4_t _sum = vdupq_n_f32(0.f); if (bias_data_ptr) { _sum = vld1q_f32(bias_data_ptr + p * 4); } const unsigned short* kptr = weight_data_bf16.channel(p); // channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); const unsigned short* sptr = m.row<const unsigned short>(i * stride_h) + j * stride_w * 4; for (int k = 0; k < maxk; k++) { float32x4_t _val = vcvt_f32_bf16(vld1_u16(sptr + space_ofs[k] * 4)); float32x4_t _w0 = vcvt_f32_bf16(vld1_u16(kptr)); float32x4_t _w1 = vcvt_f32_bf16(vld1_u16(kptr + 4)); float32x4_t _w2 = vcvt_f32_bf16(vld1_u16(kptr + 8)); float32x4_t _w3 = vcvt_f32_bf16(vld1_u16(kptr + 12)); #if __aarch64__ _sum = vmlaq_laneq_f32(_sum, _w0, _val, 0); _sum = vmlaq_laneq_f32(_sum, _w1, _val, 1); _sum = vmlaq_laneq_f32(_sum, _w2, _val, 2); _sum = vmlaq_laneq_f32(_sum, _w3, _val, 3); #else _sum = vmlaq_lane_f32(_sum, _w0, vget_low_f32(_val), 0); _sum = vmlaq_lane_f32(_sum, _w1, vget_low_f32(_val), 1); _sum = vmlaq_lane_f32(_sum, _w2, vget_high_f32(_val), 0); _sum = vmlaq_lane_f32(_sum, _w3, vget_high_f32(_val), 1); #endif kptr += 16; } } _sum = activation_ps(_sum, activation_type, activation_params); vst1_u16(outptr + j * 4, vcvt_bf16_f32(_sum)); } outptr += outw * 4; } } }
DRB050-functionparameter-orig-yes.c
/* Copyright (C) 1991-2018 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it andor modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http:www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is synchronized with ISOIEC 10646:2017, fifth edition, plus the following additions from Amendment 1 to the fifth edition: - 56 emoji characters - 285 hentaigana - 3 additional Zanabazar Square characters */ /* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https:github.comLLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> #include <stdlib.h> /* Arrays passed as function parameters */ void foo1(double o1[], double c[], int len) { int i; #pragma cetus private(volnew_o8) #pragma loop name foo1#0 for (i=0; i<len; ++ i) { double volnew_o8 = 0.5*c[i]; o1[i]=volnew_o8; } return ; } int main() { double o1[101]; double c[101]; int i; int len = 100; int _ret_val_0; #pragma cetus private(i) #pragma loop name main#0 #pragma cetus parallel #pragma omp parallel for private(i) for (i=0; i<len; ++ i) { c[i]=(i+1.01); o1[i]=(i+1.01); } foo1( & o1[1], & o1[0], 100); #pragma cetus private(i) #pragma loop name main#1 for (i=0; i<len; ++ i) { printf("%lf\n", o1[i]); } _ret_val_0=0; return _ret_val_0; }